xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp (revision 647cbc5de815c5651677bf8582797f716ec7b48d)
1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "X86ISelLowering.h"
15 #include "MCTargetDesc/X86ShuffleDecode.h"
16 #include "X86.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86IntrinsicsInfo.h"
21 #include "X86MachineFunctionInfo.h"
22 #include "X86TargetMachine.h"
23 #include "X86TargetObjectFile.h"
24 #include "llvm/ADT/SmallBitVector.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/Analysis/BlockFrequencyInfo.h"
30 #include "llvm/Analysis/ObjCARCUtil.h"
31 #include "llvm/Analysis/ProfileSummaryInfo.h"
32 #include "llvm/Analysis/VectorUtils.h"
33 #include "llvm/CodeGen/IntrinsicLowering.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineJumpTableInfo.h"
38 #include "llvm/CodeGen/MachineLoopInfo.h"
39 #include "llvm/CodeGen/MachineModuleInfo.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/TargetLowering.h"
42 #include "llvm/CodeGen/WinEHFuncInfo.h"
43 #include "llvm/IR/CallingConv.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DerivedTypes.h"
46 #include "llvm/IR/EHPersonalities.h"
47 #include "llvm/IR/Function.h"
48 #include "llvm/IR/GlobalAlias.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/IRBuilder.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/Intrinsics.h"
53 #include "llvm/IR/PatternMatch.h"
54 #include "llvm/MC/MCAsmInfo.h"
55 #include "llvm/MC/MCContext.h"
56 #include "llvm/MC/MCExpr.h"
57 #include "llvm/MC/MCSymbol.h"
58 #include "llvm/Support/CommandLine.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/KnownBits.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Target/TargetOptions.h"
64 #include <algorithm>
65 #include <bitset>
66 #include <cctype>
67 #include <numeric>
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "x86-isel"
71 
72 static cl::opt<int> ExperimentalPrefInnermostLoopAlignment(
73     "x86-experimental-pref-innermost-loop-alignment", cl::init(4),
74     cl::desc(
75         "Sets the preferable loop alignment for experiments (as log2 bytes) "
76         "for innermost loops only. If specified, this option overrides "
77         "alignment set by x86-experimental-pref-loop-alignment."),
78     cl::Hidden);
79 
80 static cl::opt<bool> MulConstantOptimization(
81     "mul-constant-optimization", cl::init(true),
82     cl::desc("Replace 'mul x, Const' with more effective instructions like "
83              "SHIFT, LEA, etc."),
84     cl::Hidden);
85 
86 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
87                                      const X86Subtarget &STI)
88     : TargetLowering(TM), Subtarget(STI) {
89   bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
90   MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
91 
92   // Set up the TargetLowering object.
93 
94   // X86 is weird. It always uses i8 for shift amounts and setcc results.
95   setBooleanContents(ZeroOrOneBooleanContent);
96   // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
97   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
98 
99   // For 64-bit, since we have so many registers, use the ILP scheduler.
100   // For 32-bit, use the register pressure specific scheduling.
101   // For Atom, always use ILP scheduling.
102   if (Subtarget.isAtom())
103     setSchedulingPreference(Sched::ILP);
104   else if (Subtarget.is64Bit())
105     setSchedulingPreference(Sched::ILP);
106   else
107     setSchedulingPreference(Sched::RegPressure);
108   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
109   setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
110 
111   // Bypass expensive divides and use cheaper ones.
112   if (TM.getOptLevel() >= CodeGenOptLevel::Default) {
113     if (Subtarget.hasSlowDivide32())
114       addBypassSlowDiv(32, 8);
115     if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
116       addBypassSlowDiv(64, 32);
117   }
118 
119   // Setup Windows compiler runtime calls.
120   if (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()) {
121     static const struct {
122       const RTLIB::Libcall Op;
123       const char * const Name;
124       const CallingConv::ID CC;
125     } LibraryCalls[] = {
126       { RTLIB::SDIV_I64, "_alldiv", CallingConv::X86_StdCall },
127       { RTLIB::UDIV_I64, "_aulldiv", CallingConv::X86_StdCall },
128       { RTLIB::SREM_I64, "_allrem", CallingConv::X86_StdCall },
129       { RTLIB::UREM_I64, "_aullrem", CallingConv::X86_StdCall },
130       { RTLIB::MUL_I64, "_allmul", CallingConv::X86_StdCall },
131     };
132 
133     for (const auto &LC : LibraryCalls) {
134       setLibcallName(LC.Op, LC.Name);
135       setLibcallCallingConv(LC.Op, LC.CC);
136     }
137   }
138 
139   if (Subtarget.getTargetTriple().isOSMSVCRT()) {
140     // MSVCRT doesn't have powi; fall back to pow
141     setLibcallName(RTLIB::POWI_F32, nullptr);
142     setLibcallName(RTLIB::POWI_F64, nullptr);
143   }
144 
145   if (Subtarget.canUseCMPXCHG16B())
146     setMaxAtomicSizeInBitsSupported(128);
147   else if (Subtarget.canUseCMPXCHG8B())
148     setMaxAtomicSizeInBitsSupported(64);
149   else
150     setMaxAtomicSizeInBitsSupported(32);
151 
152   setMaxDivRemBitWidthSupported(Subtarget.is64Bit() ? 128 : 64);
153 
154   setMaxLargeFPConvertBitWidthSupported(128);
155 
156   // Set up the register classes.
157   addRegisterClass(MVT::i8, &X86::GR8RegClass);
158   addRegisterClass(MVT::i16, &X86::GR16RegClass);
159   addRegisterClass(MVT::i32, &X86::GR32RegClass);
160   if (Subtarget.is64Bit())
161     addRegisterClass(MVT::i64, &X86::GR64RegClass);
162 
163   for (MVT VT : MVT::integer_valuetypes())
164     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
165 
166   // We don't accept any truncstore of integer registers.
167   setTruncStoreAction(MVT::i64, MVT::i32, Expand);
168   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
169   setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
170   setTruncStoreAction(MVT::i32, MVT::i16, Expand);
171   setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
172   setTruncStoreAction(MVT::i16, MVT::i8,  Expand);
173 
174   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
175 
176   // SETOEQ and SETUNE require checking two conditions.
177   for (auto VT : {MVT::f32, MVT::f64, MVT::f80}) {
178     setCondCodeAction(ISD::SETOEQ, VT, Expand);
179     setCondCodeAction(ISD::SETUNE, VT, Expand);
180   }
181 
182   // Integer absolute.
183   if (Subtarget.canUseCMOV()) {
184     setOperationAction(ISD::ABS            , MVT::i16  , Custom);
185     setOperationAction(ISD::ABS            , MVT::i32  , Custom);
186     if (Subtarget.is64Bit())
187       setOperationAction(ISD::ABS          , MVT::i64  , Custom);
188   }
189 
190   // Absolute difference.
191   for (auto Op : {ISD::ABDS, ISD::ABDU}) {
192     setOperationAction(Op                  , MVT::i8   , Custom);
193     setOperationAction(Op                  , MVT::i16  , Custom);
194     setOperationAction(Op                  , MVT::i32  , Custom);
195     if (Subtarget.is64Bit())
196      setOperationAction(Op                 , MVT::i64  , Custom);
197   }
198 
199   // Signed saturation subtraction.
200   setOperationAction(ISD::SSUBSAT          , MVT::i8   , Custom);
201   setOperationAction(ISD::SSUBSAT          , MVT::i16  , Custom);
202   setOperationAction(ISD::SSUBSAT          , MVT::i32  , Custom);
203   if (Subtarget.is64Bit())
204     setOperationAction(ISD::SSUBSAT        , MVT::i64  , Custom);
205 
206   // Funnel shifts.
207   for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
208     // For slow shld targets we only lower for code size.
209     LegalizeAction ShiftDoubleAction = Subtarget.isSHLDSlow() ? Custom : Legal;
210 
211     setOperationAction(ShiftOp             , MVT::i8   , Custom);
212     setOperationAction(ShiftOp             , MVT::i16  , Custom);
213     setOperationAction(ShiftOp             , MVT::i32  , ShiftDoubleAction);
214     if (Subtarget.is64Bit())
215       setOperationAction(ShiftOp           , MVT::i64  , ShiftDoubleAction);
216   }
217 
218   if (!Subtarget.useSoftFloat()) {
219     // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
220     // operation.
221     setOperationAction(ISD::UINT_TO_FP,        MVT::i8, Promote);
222     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote);
223     setOperationAction(ISD::UINT_TO_FP,        MVT::i16, Promote);
224     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote);
225     // We have an algorithm for SSE2, and we turn this into a 64-bit
226     // FILD or VCVTUSI2SS/SD for other targets.
227     setOperationAction(ISD::UINT_TO_FP,        MVT::i32, Custom);
228     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
229     // We have an algorithm for SSE2->double, and we turn this into a
230     // 64-bit FILD followed by conditional FADD for other targets.
231     setOperationAction(ISD::UINT_TO_FP,        MVT::i64, Custom);
232     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
233 
234     // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
235     // this operation.
236     setOperationAction(ISD::SINT_TO_FP,        MVT::i8, Promote);
237     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote);
238     // SSE has no i16 to fp conversion, only i32. We promote in the handler
239     // to allow f80 to use i16 and f64 to use i16 with sse1 only
240     setOperationAction(ISD::SINT_TO_FP,        MVT::i16, Custom);
241     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom);
242     // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not
243     setOperationAction(ISD::SINT_TO_FP,        MVT::i32, Custom);
244     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
245     // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
246     // are Legal, f80 is custom lowered.
247     setOperationAction(ISD::SINT_TO_FP,        MVT::i64, Custom);
248     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
249 
250     // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
251     // this operation.
252     setOperationAction(ISD::FP_TO_SINT,        MVT::i8,  Promote);
253     // FIXME: This doesn't generate invalid exception when it should. PR44019.
254     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8,  Promote);
255     setOperationAction(ISD::FP_TO_SINT,        MVT::i16, Custom);
256     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
257     setOperationAction(ISD::FP_TO_SINT,        MVT::i32, Custom);
258     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
259     // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
260     // are Legal, f80 is custom lowered.
261     setOperationAction(ISD::FP_TO_SINT,        MVT::i64, Custom);
262     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
263 
264     // Handle FP_TO_UINT by promoting the destination to a larger signed
265     // conversion.
266     setOperationAction(ISD::FP_TO_UINT,        MVT::i8,  Promote);
267     // FIXME: This doesn't generate invalid exception when it should. PR44019.
268     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8,  Promote);
269     setOperationAction(ISD::FP_TO_UINT,        MVT::i16, Promote);
270     // FIXME: This doesn't generate invalid exception when it should. PR44019.
271     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
272     setOperationAction(ISD::FP_TO_UINT,        MVT::i32, Custom);
273     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
274     setOperationAction(ISD::FP_TO_UINT,        MVT::i64, Custom);
275     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
276 
277     setOperationAction(ISD::LRINT,             MVT::f32, Custom);
278     setOperationAction(ISD::LRINT,             MVT::f64, Custom);
279     setOperationAction(ISD::LLRINT,            MVT::f32, Custom);
280     setOperationAction(ISD::LLRINT,            MVT::f64, Custom);
281 
282     if (!Subtarget.is64Bit()) {
283       setOperationAction(ISD::LRINT,  MVT::i64, Custom);
284       setOperationAction(ISD::LLRINT, MVT::i64, Custom);
285     }
286   }
287 
288   if (Subtarget.hasSSE2()) {
289     // Custom lowering for saturating float to int conversions.
290     // We handle promotion to larger result types manually.
291     for (MVT VT : { MVT::i8, MVT::i16, MVT::i32 }) {
292       setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom);
293       setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom);
294     }
295     if (Subtarget.is64Bit()) {
296       setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
297       setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
298     }
299   }
300 
301   // Handle address space casts between mixed sized pointers.
302   setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
303   setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
304 
305   // TODO: when we have SSE, these could be more efficient, by using movd/movq.
306   if (!Subtarget.hasSSE2()) {
307     setOperationAction(ISD::BITCAST        , MVT::f32  , Expand);
308     setOperationAction(ISD::BITCAST        , MVT::i32  , Expand);
309     if (Subtarget.is64Bit()) {
310       setOperationAction(ISD::BITCAST      , MVT::f64  , Expand);
311       // Without SSE, i64->f64 goes through memory.
312       setOperationAction(ISD::BITCAST      , MVT::i64  , Expand);
313     }
314   } else if (!Subtarget.is64Bit())
315     setOperationAction(ISD::BITCAST      , MVT::i64  , Custom);
316 
317   // Scalar integer divide and remainder are lowered to use operations that
318   // produce two results, to match the available instructions. This exposes
319   // the two-result form to trivial CSE, which is able to combine x/y and x%y
320   // into a single instruction.
321   //
322   // Scalar integer multiply-high is also lowered to use two-result
323   // operations, to match the available instructions. However, plain multiply
324   // (low) operations are left as Legal, as there are single-result
325   // instructions for this in x86. Using the two-result multiply instructions
326   // when both high and low results are needed must be arranged by dagcombine.
327   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
328     setOperationAction(ISD::MULHS, VT, Expand);
329     setOperationAction(ISD::MULHU, VT, Expand);
330     setOperationAction(ISD::SDIV, VT, Expand);
331     setOperationAction(ISD::UDIV, VT, Expand);
332     setOperationAction(ISD::SREM, VT, Expand);
333     setOperationAction(ISD::UREM, VT, Expand);
334   }
335 
336   setOperationAction(ISD::BR_JT            , MVT::Other, Expand);
337   setOperationAction(ISD::BRCOND           , MVT::Other, Custom);
338   for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
339                    MVT::i8,  MVT::i16, MVT::i32, MVT::i64 }) {
340     setOperationAction(ISD::BR_CC,     VT, Expand);
341     setOperationAction(ISD::SELECT_CC, VT, Expand);
342   }
343   if (Subtarget.is64Bit())
344     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
345   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16  , Legal);
346   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8   , Legal);
347   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1   , Expand);
348 
349   setOperationAction(ISD::FREM             , MVT::f32  , Expand);
350   setOperationAction(ISD::FREM             , MVT::f64  , Expand);
351   setOperationAction(ISD::FREM             , MVT::f80  , Expand);
352   setOperationAction(ISD::FREM             , MVT::f128 , Expand);
353 
354   if (!Subtarget.useSoftFloat() && Subtarget.hasX87()) {
355     setOperationAction(ISD::GET_ROUNDING   , MVT::i32  , Custom);
356     setOperationAction(ISD::SET_ROUNDING   , MVT::Other, Custom);
357     setOperationAction(ISD::GET_FPENV_MEM  , MVT::Other, Custom);
358     setOperationAction(ISD::SET_FPENV_MEM  , MVT::Other, Custom);
359     setOperationAction(ISD::RESET_FPENV    , MVT::Other, Custom);
360   }
361 
362   // Promote the i8 variants and force them on up to i32 which has a shorter
363   // encoding.
364   setOperationPromotedToType(ISD::CTTZ           , MVT::i8   , MVT::i32);
365   setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
366   // Promoted i16. tzcntw has a false dependency on Intel CPUs. For BSF, we emit
367   // a REP prefix to encode it as TZCNT for modern CPUs so it makes sense to
368   // promote that too.
369   setOperationPromotedToType(ISD::CTTZ           , MVT::i16  , MVT::i32);
370   setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i16  , MVT::i32);
371 
372   if (!Subtarget.hasBMI()) {
373     setOperationAction(ISD::CTTZ           , MVT::i32  , Custom);
374     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32  , Legal);
375     if (Subtarget.is64Bit()) {
376       setOperationAction(ISD::CTTZ         , MVT::i64  , Custom);
377       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
378     }
379   }
380 
381   if (Subtarget.hasLZCNT()) {
382     // When promoting the i8 variants, force them to i32 for a shorter
383     // encoding.
384     setOperationPromotedToType(ISD::CTLZ           , MVT::i8   , MVT::i32);
385     setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
386   } else {
387     for (auto VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
388       if (VT == MVT::i64 && !Subtarget.is64Bit())
389         continue;
390       setOperationAction(ISD::CTLZ           , VT, Custom);
391       setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
392     }
393   }
394 
395   for (auto Op : {ISD::FP16_TO_FP, ISD::STRICT_FP16_TO_FP, ISD::FP_TO_FP16,
396                   ISD::STRICT_FP_TO_FP16}) {
397     // Special handling for half-precision floating point conversions.
398     // If we don't have F16C support, then lower half float conversions
399     // into library calls.
400     setOperationAction(
401         Op, MVT::f32,
402         (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ? Custom : Expand);
403     // There's never any support for operations beyond MVT::f32.
404     setOperationAction(Op, MVT::f64, Expand);
405     setOperationAction(Op, MVT::f80, Expand);
406     setOperationAction(Op, MVT::f128, Expand);
407   }
408 
409   for (MVT VT : {MVT::f32, MVT::f64, MVT::f80, MVT::f128}) {
410     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
411     setLoadExtAction(ISD::EXTLOAD, VT, MVT::bf16, Expand);
412     setTruncStoreAction(VT, MVT::f16, Expand);
413     setTruncStoreAction(VT, MVT::bf16, Expand);
414 
415     setOperationAction(ISD::BF16_TO_FP, VT, Expand);
416     setOperationAction(ISD::FP_TO_BF16, VT, Custom);
417   }
418 
419   setOperationAction(ISD::PARITY, MVT::i8, Custom);
420   setOperationAction(ISD::PARITY, MVT::i16, Custom);
421   setOperationAction(ISD::PARITY, MVT::i32, Custom);
422   if (Subtarget.is64Bit())
423     setOperationAction(ISD::PARITY, MVT::i64, Custom);
424   if (Subtarget.hasPOPCNT()) {
425     setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
426     // popcntw is longer to encode than popcntl and also has a false dependency
427     // on the dest that popcntl hasn't had since Cannon Lake.
428     setOperationPromotedToType(ISD::CTPOP, MVT::i16, MVT::i32);
429   } else {
430     setOperationAction(ISD::CTPOP          , MVT::i8   , Expand);
431     setOperationAction(ISD::CTPOP          , MVT::i16  , Expand);
432     setOperationAction(ISD::CTPOP          , MVT::i32  , Expand);
433     if (Subtarget.is64Bit())
434       setOperationAction(ISD::CTPOP        , MVT::i64  , Expand);
435     else
436       setOperationAction(ISD::CTPOP        , MVT::i64  , Custom);
437   }
438 
439   setOperationAction(ISD::READCYCLECOUNTER , MVT::i64  , Custom);
440 
441   if (!Subtarget.hasMOVBE())
442     setOperationAction(ISD::BSWAP          , MVT::i16  , Expand);
443 
444   // X86 wants to expand cmov itself.
445   for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
446     setOperationAction(ISD::SELECT, VT, Custom);
447     setOperationAction(ISD::SETCC, VT, Custom);
448     setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
449     setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
450   }
451   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
452     if (VT == MVT::i64 && !Subtarget.is64Bit())
453       continue;
454     setOperationAction(ISD::SELECT, VT, Custom);
455     setOperationAction(ISD::SETCC,  VT, Custom);
456   }
457 
458   // Custom action for SELECT MMX and expand action for SELECT_CC MMX
459   setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
460   setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
461 
462   setOperationAction(ISD::EH_RETURN       , MVT::Other, Custom);
463   // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
464   // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
465   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
466   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
467   setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
468   if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
469     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
470 
471   // Darwin ABI issue.
472   for (auto VT : { MVT::i32, MVT::i64 }) {
473     if (VT == MVT::i64 && !Subtarget.is64Bit())
474       continue;
475     setOperationAction(ISD::ConstantPool    , VT, Custom);
476     setOperationAction(ISD::JumpTable       , VT, Custom);
477     setOperationAction(ISD::GlobalAddress   , VT, Custom);
478     setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
479     setOperationAction(ISD::ExternalSymbol  , VT, Custom);
480     setOperationAction(ISD::BlockAddress    , VT, Custom);
481   }
482 
483   // 64-bit shl, sra, srl (iff 32-bit x86)
484   for (auto VT : { MVT::i32, MVT::i64 }) {
485     if (VT == MVT::i64 && !Subtarget.is64Bit())
486       continue;
487     setOperationAction(ISD::SHL_PARTS, VT, Custom);
488     setOperationAction(ISD::SRA_PARTS, VT, Custom);
489     setOperationAction(ISD::SRL_PARTS, VT, Custom);
490   }
491 
492   if (Subtarget.hasSSEPrefetch() || Subtarget.hasThreeDNow())
493     setOperationAction(ISD::PREFETCH      , MVT::Other, Custom);
494 
495   setOperationAction(ISD::ATOMIC_FENCE  , MVT::Other, Custom);
496 
497   // Expand certain atomics
498   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
499     setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
500     setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
501     setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
502     setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
503     setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
504     setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
505     setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
506   }
507 
508   if (!Subtarget.is64Bit())
509     setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
510 
511   if (Subtarget.canUseCMPXCHG16B())
512     setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
513 
514   // FIXME - use subtarget debug flags
515   if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
516       !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
517       TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
518     setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
519   }
520 
521   setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
522   setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
523 
524   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
525   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
526 
527   setOperationAction(ISD::TRAP, MVT::Other, Legal);
528   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
529   if (Subtarget.isTargetPS())
530     setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
531   else
532     setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal);
533 
534   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
535   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
536   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
537   bool Is64Bit = Subtarget.is64Bit();
538   setOperationAction(ISD::VAARG,  MVT::Other, Is64Bit ? Custom : Expand);
539   setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
540 
541   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
542   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
543 
544   setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
545 
546   // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
547   setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
548   setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
549 
550   setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
551 
552   auto setF16Action = [&] (MVT VT, LegalizeAction Action) {
553     setOperationAction(ISD::FABS, VT, Action);
554     setOperationAction(ISD::FNEG, VT, Action);
555     setOperationAction(ISD::FCOPYSIGN, VT, Expand);
556     setOperationAction(ISD::FREM, VT, Action);
557     setOperationAction(ISD::FMA, VT, Action);
558     setOperationAction(ISD::FMINNUM, VT, Action);
559     setOperationAction(ISD::FMAXNUM, VT, Action);
560     setOperationAction(ISD::FMINIMUM, VT, Action);
561     setOperationAction(ISD::FMAXIMUM, VT, Action);
562     setOperationAction(ISD::FSIN, VT, Action);
563     setOperationAction(ISD::FCOS, VT, Action);
564     setOperationAction(ISD::FSINCOS, VT, Action);
565     setOperationAction(ISD::FSQRT, VT, Action);
566     setOperationAction(ISD::FPOW, VT, Action);
567     setOperationAction(ISD::FLOG, VT, Action);
568     setOperationAction(ISD::FLOG2, VT, Action);
569     setOperationAction(ISD::FLOG10, VT, Action);
570     setOperationAction(ISD::FEXP, VT, Action);
571     setOperationAction(ISD::FEXP2, VT, Action);
572     setOperationAction(ISD::FEXP10, VT, Action);
573     setOperationAction(ISD::FCEIL, VT, Action);
574     setOperationAction(ISD::FFLOOR, VT, Action);
575     setOperationAction(ISD::FNEARBYINT, VT, Action);
576     setOperationAction(ISD::FRINT, VT, Action);
577     setOperationAction(ISD::BR_CC, VT, Action);
578     setOperationAction(ISD::SETCC, VT, Action);
579     setOperationAction(ISD::SELECT, VT, Custom);
580     setOperationAction(ISD::SELECT_CC, VT, Action);
581     setOperationAction(ISD::FROUND, VT, Action);
582     setOperationAction(ISD::FROUNDEVEN, VT, Action);
583     setOperationAction(ISD::FTRUNC, VT, Action);
584     setOperationAction(ISD::FLDEXP, VT, Action);
585   };
586 
587   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
588     // f16, f32 and f64 use SSE.
589     // Set up the FP register classes.
590     addRegisterClass(MVT::f16, Subtarget.hasAVX512() ? &X86::FR16XRegClass
591                                                      : &X86::FR16RegClass);
592     addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
593                                                      : &X86::FR32RegClass);
594     addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
595                                                      : &X86::FR64RegClass);
596 
597     // Disable f32->f64 extload as we can only generate this in one instruction
598     // under optsize. So its easier to pattern match (fpext (load)) for that
599     // case instead of needing to emit 2 instructions for extload in the
600     // non-optsize case.
601     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
602 
603     for (auto VT : { MVT::f32, MVT::f64 }) {
604       // Use ANDPD to simulate FABS.
605       setOperationAction(ISD::FABS, VT, Custom);
606 
607       // Use XORP to simulate FNEG.
608       setOperationAction(ISD::FNEG, VT, Custom);
609 
610       // Use ANDPD and ORPD to simulate FCOPYSIGN.
611       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
612 
613       // These might be better off as horizontal vector ops.
614       setOperationAction(ISD::FADD, VT, Custom);
615       setOperationAction(ISD::FSUB, VT, Custom);
616 
617       // We don't support sin/cos/fmod
618       setOperationAction(ISD::FSIN   , VT, Expand);
619       setOperationAction(ISD::FCOS   , VT, Expand);
620       setOperationAction(ISD::FSINCOS, VT, Expand);
621     }
622 
623     // Half type will be promoted by default.
624     setF16Action(MVT::f16, Promote);
625     setOperationAction(ISD::FADD, MVT::f16, Promote);
626     setOperationAction(ISD::FSUB, MVT::f16, Promote);
627     setOperationAction(ISD::FMUL, MVT::f16, Promote);
628     setOperationAction(ISD::FDIV, MVT::f16, Promote);
629     setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
630     setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom);
631     setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
632 
633     setOperationAction(ISD::STRICT_FADD, MVT::f16, Promote);
634     setOperationAction(ISD::STRICT_FSUB, MVT::f16, Promote);
635     setOperationAction(ISD::STRICT_FMUL, MVT::f16, Promote);
636     setOperationAction(ISD::STRICT_FDIV, MVT::f16, Promote);
637     setOperationAction(ISD::STRICT_FMA, MVT::f16, Promote);
638     setOperationAction(ISD::STRICT_FMINNUM, MVT::f16, Promote);
639     setOperationAction(ISD::STRICT_FMAXNUM, MVT::f16, Promote);
640     setOperationAction(ISD::STRICT_FMINIMUM, MVT::f16, Promote);
641     setOperationAction(ISD::STRICT_FMAXIMUM, MVT::f16, Promote);
642     setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Promote);
643     setOperationAction(ISD::STRICT_FPOW, MVT::f16, Promote);
644     setOperationAction(ISD::STRICT_FLDEXP, MVT::f16, Promote);
645     setOperationAction(ISD::STRICT_FLOG, MVT::f16, Promote);
646     setOperationAction(ISD::STRICT_FLOG2, MVT::f16, Promote);
647     setOperationAction(ISD::STRICT_FLOG10, MVT::f16, Promote);
648     setOperationAction(ISD::STRICT_FEXP, MVT::f16, Promote);
649     setOperationAction(ISD::STRICT_FEXP2, MVT::f16, Promote);
650     setOperationAction(ISD::STRICT_FCEIL, MVT::f16, Promote);
651     setOperationAction(ISD::STRICT_FFLOOR, MVT::f16, Promote);
652     setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f16, Promote);
653     setOperationAction(ISD::STRICT_FRINT, MVT::f16, Promote);
654     setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Promote);
655     setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Promote);
656     setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote);
657     setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Promote);
658     setOperationAction(ISD::STRICT_FTRUNC, MVT::f16, Promote);
659     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
660     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
661     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
662 
663     setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
664     setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
665 
666     // Lower this to MOVMSK plus an AND.
667     setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
668     setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
669 
670   } else if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1() &&
671              (UseX87 || Is64Bit)) {
672     // Use SSE for f32, x87 for f64.
673     // Set up the FP register classes.
674     addRegisterClass(MVT::f32, &X86::FR32RegClass);
675     if (UseX87)
676       addRegisterClass(MVT::f64, &X86::RFP64RegClass);
677 
678     // Use ANDPS to simulate FABS.
679     setOperationAction(ISD::FABS , MVT::f32, Custom);
680 
681     // Use XORP to simulate FNEG.
682     setOperationAction(ISD::FNEG , MVT::f32, Custom);
683 
684     if (UseX87)
685       setOperationAction(ISD::UNDEF, MVT::f64, Expand);
686 
687     // Use ANDPS and ORPS to simulate FCOPYSIGN.
688     if (UseX87)
689       setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
690     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
691 
692     // We don't support sin/cos/fmod
693     setOperationAction(ISD::FSIN   , MVT::f32, Expand);
694     setOperationAction(ISD::FCOS   , MVT::f32, Expand);
695     setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
696 
697     if (UseX87) {
698       // Always expand sin/cos functions even though x87 has an instruction.
699       setOperationAction(ISD::FSIN, MVT::f64, Expand);
700       setOperationAction(ISD::FCOS, MVT::f64, Expand);
701       setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
702     }
703   } else if (UseX87) {
704     // f32 and f64 in x87.
705     // Set up the FP register classes.
706     addRegisterClass(MVT::f64, &X86::RFP64RegClass);
707     addRegisterClass(MVT::f32, &X86::RFP32RegClass);
708 
709     for (auto VT : { MVT::f32, MVT::f64 }) {
710       setOperationAction(ISD::UNDEF,     VT, Expand);
711       setOperationAction(ISD::FCOPYSIGN, VT, Expand);
712 
713       // Always expand sin/cos functions even though x87 has an instruction.
714       setOperationAction(ISD::FSIN   , VT, Expand);
715       setOperationAction(ISD::FCOS   , VT, Expand);
716       setOperationAction(ISD::FSINCOS, VT, Expand);
717     }
718   }
719 
720   // Expand FP32 immediates into loads from the stack, save special cases.
721   if (isTypeLegal(MVT::f32)) {
722     if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
723       addLegalFPImmediate(APFloat(+0.0f)); // FLD0
724       addLegalFPImmediate(APFloat(+1.0f)); // FLD1
725       addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
726       addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
727     } else // SSE immediates.
728       addLegalFPImmediate(APFloat(+0.0f)); // xorps
729   }
730   // Expand FP64 immediates into loads from the stack, save special cases.
731   if (isTypeLegal(MVT::f64)) {
732     if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
733       addLegalFPImmediate(APFloat(+0.0)); // FLD0
734       addLegalFPImmediate(APFloat(+1.0)); // FLD1
735       addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
736       addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
737     } else // SSE immediates.
738       addLegalFPImmediate(APFloat(+0.0)); // xorpd
739   }
740   // Support fp16 0 immediate.
741   if (isTypeLegal(MVT::f16))
742     addLegalFPImmediate(APFloat::getZero(APFloat::IEEEhalf()));
743 
744   // Handle constrained floating-point operations of scalar.
745   setOperationAction(ISD::STRICT_FADD,      MVT::f32, Legal);
746   setOperationAction(ISD::STRICT_FADD,      MVT::f64, Legal);
747   setOperationAction(ISD::STRICT_FSUB,      MVT::f32, Legal);
748   setOperationAction(ISD::STRICT_FSUB,      MVT::f64, Legal);
749   setOperationAction(ISD::STRICT_FMUL,      MVT::f32, Legal);
750   setOperationAction(ISD::STRICT_FMUL,      MVT::f64, Legal);
751   setOperationAction(ISD::STRICT_FDIV,      MVT::f32, Legal);
752   setOperationAction(ISD::STRICT_FDIV,      MVT::f64, Legal);
753   setOperationAction(ISD::STRICT_FP_ROUND,  MVT::f32, Legal);
754   setOperationAction(ISD::STRICT_FP_ROUND,  MVT::f64, Legal);
755   setOperationAction(ISD::STRICT_FSQRT,     MVT::f32, Legal);
756   setOperationAction(ISD::STRICT_FSQRT,     MVT::f64, Legal);
757 
758   // We don't support FMA.
759   setOperationAction(ISD::FMA, MVT::f64, Expand);
760   setOperationAction(ISD::FMA, MVT::f32, Expand);
761 
762   // f80 always uses X87.
763   if (UseX87) {
764     addRegisterClass(MVT::f80, &X86::RFP80RegClass);
765     setOperationAction(ISD::UNDEF,     MVT::f80, Expand);
766     setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
767     {
768       APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
769       addLegalFPImmediate(TmpFlt);  // FLD0
770       TmpFlt.changeSign();
771       addLegalFPImmediate(TmpFlt);  // FLD0/FCHS
772 
773       bool ignored;
774       APFloat TmpFlt2(+1.0);
775       TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
776                       &ignored);
777       addLegalFPImmediate(TmpFlt2);  // FLD1
778       TmpFlt2.changeSign();
779       addLegalFPImmediate(TmpFlt2);  // FLD1/FCHS
780     }
781 
782     // Always expand sin/cos functions even though x87 has an instruction.
783     setOperationAction(ISD::FSIN   , MVT::f80, Expand);
784     setOperationAction(ISD::FCOS   , MVT::f80, Expand);
785     setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
786 
787     setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
788     setOperationAction(ISD::FCEIL,  MVT::f80, Expand);
789     setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
790     setOperationAction(ISD::FRINT,  MVT::f80, Expand);
791     setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
792     setOperationAction(ISD::FROUNDEVEN, MVT::f80, Expand);
793     setOperationAction(ISD::FMA, MVT::f80, Expand);
794     setOperationAction(ISD::LROUND, MVT::f80, Expand);
795     setOperationAction(ISD::LLROUND, MVT::f80, Expand);
796     setOperationAction(ISD::LRINT, MVT::f80, Custom);
797     setOperationAction(ISD::LLRINT, MVT::f80, Custom);
798 
799     // Handle constrained floating-point operations of scalar.
800     setOperationAction(ISD::STRICT_FADD     , MVT::f80, Legal);
801     setOperationAction(ISD::STRICT_FSUB     , MVT::f80, Legal);
802     setOperationAction(ISD::STRICT_FMUL     , MVT::f80, Legal);
803     setOperationAction(ISD::STRICT_FDIV     , MVT::f80, Legal);
804     setOperationAction(ISD::STRICT_FSQRT    , MVT::f80, Legal);
805     if (isTypeLegal(MVT::f16)) {
806       setOperationAction(ISD::FP_EXTEND, MVT::f80, Custom);
807       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Custom);
808     } else {
809       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal);
810     }
811     // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten
812     // as Custom.
813     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal);
814   }
815 
816   // f128 uses xmm registers, but most operations require libcalls.
817   if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
818     addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
819                                                    : &X86::VR128RegClass);
820 
821     addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
822 
823     setOperationAction(ISD::FADD,        MVT::f128, LibCall);
824     setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall);
825     setOperationAction(ISD::FSUB,        MVT::f128, LibCall);
826     setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall);
827     setOperationAction(ISD::FDIV,        MVT::f128, LibCall);
828     setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall);
829     setOperationAction(ISD::FMUL,        MVT::f128, LibCall);
830     setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall);
831     setOperationAction(ISD::FMA,         MVT::f128, LibCall);
832     setOperationAction(ISD::STRICT_FMA,  MVT::f128, LibCall);
833 
834     setOperationAction(ISD::FABS, MVT::f128, Custom);
835     setOperationAction(ISD::FNEG, MVT::f128, Custom);
836     setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
837 
838     setOperationAction(ISD::FSIN,         MVT::f128, LibCall);
839     setOperationAction(ISD::STRICT_FSIN,  MVT::f128, LibCall);
840     setOperationAction(ISD::FCOS,         MVT::f128, LibCall);
841     setOperationAction(ISD::STRICT_FCOS,  MVT::f128, LibCall);
842     setOperationAction(ISD::FSINCOS,      MVT::f128, LibCall);
843     // No STRICT_FSINCOS
844     setOperationAction(ISD::FSQRT,        MVT::f128, LibCall);
845     setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall);
846 
847     setOperationAction(ISD::FP_EXTEND,        MVT::f128, Custom);
848     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom);
849     // We need to custom handle any FP_ROUND with an f128 input, but
850     // LegalizeDAG uses the result type to know when to run a custom handler.
851     // So we have to list all legal floating point result types here.
852     if (isTypeLegal(MVT::f32)) {
853       setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
854       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
855     }
856     if (isTypeLegal(MVT::f64)) {
857       setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
858       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
859     }
860     if (isTypeLegal(MVT::f80)) {
861       setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
862       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
863     }
864 
865     setOperationAction(ISD::SETCC, MVT::f128, Custom);
866 
867     setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
868     setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
869     setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
870     setTruncStoreAction(MVT::f128, MVT::f32, Expand);
871     setTruncStoreAction(MVT::f128, MVT::f64, Expand);
872     setTruncStoreAction(MVT::f128, MVT::f80, Expand);
873   }
874 
875   // Always use a library call for pow.
876   setOperationAction(ISD::FPOW             , MVT::f32  , Expand);
877   setOperationAction(ISD::FPOW             , MVT::f64  , Expand);
878   setOperationAction(ISD::FPOW             , MVT::f80  , Expand);
879   setOperationAction(ISD::FPOW             , MVT::f128 , Expand);
880 
881   setOperationAction(ISD::FLOG, MVT::f80, Expand);
882   setOperationAction(ISD::FLOG2, MVT::f80, Expand);
883   setOperationAction(ISD::FLOG10, MVT::f80, Expand);
884   setOperationAction(ISD::FEXP, MVT::f80, Expand);
885   setOperationAction(ISD::FEXP2, MVT::f80, Expand);
886   setOperationAction(ISD::FEXP10, MVT::f80, Expand);
887   setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
888   setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
889 
890   // Some FP actions are always expanded for vector types.
891   for (auto VT : { MVT::v8f16, MVT::v16f16, MVT::v32f16,
892                    MVT::v4f32, MVT::v8f32,  MVT::v16f32,
893                    MVT::v2f64, MVT::v4f64,  MVT::v8f64 }) {
894     setOperationAction(ISD::FSIN,      VT, Expand);
895     setOperationAction(ISD::FSINCOS,   VT, Expand);
896     setOperationAction(ISD::FCOS,      VT, Expand);
897     setOperationAction(ISD::FREM,      VT, Expand);
898     setOperationAction(ISD::FCOPYSIGN, VT, Expand);
899     setOperationAction(ISD::FPOW,      VT, Expand);
900     setOperationAction(ISD::FLOG,      VT, Expand);
901     setOperationAction(ISD::FLOG2,     VT, Expand);
902     setOperationAction(ISD::FLOG10,    VT, Expand);
903     setOperationAction(ISD::FEXP,      VT, Expand);
904     setOperationAction(ISD::FEXP2,     VT, Expand);
905     setOperationAction(ISD::FEXP10,    VT, Expand);
906   }
907 
908   // First set operation action for all vector types to either promote
909   // (for widening) or expand (for scalarization). Then we will selectively
910   // turn on ones that can be effectively codegen'd.
911   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
912     setOperationAction(ISD::SDIV, VT, Expand);
913     setOperationAction(ISD::UDIV, VT, Expand);
914     setOperationAction(ISD::SREM, VT, Expand);
915     setOperationAction(ISD::UREM, VT, Expand);
916     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
917     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
918     setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
919     setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
920     setOperationAction(ISD::FMA,  VT, Expand);
921     setOperationAction(ISD::FFLOOR, VT, Expand);
922     setOperationAction(ISD::FCEIL, VT, Expand);
923     setOperationAction(ISD::FTRUNC, VT, Expand);
924     setOperationAction(ISD::FRINT, VT, Expand);
925     setOperationAction(ISD::FNEARBYINT, VT, Expand);
926     setOperationAction(ISD::FROUNDEVEN, VT, Expand);
927     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
928     setOperationAction(ISD::MULHS, VT, Expand);
929     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
930     setOperationAction(ISD::MULHU, VT, Expand);
931     setOperationAction(ISD::SDIVREM, VT, Expand);
932     setOperationAction(ISD::UDIVREM, VT, Expand);
933     setOperationAction(ISD::CTPOP, VT, Expand);
934     setOperationAction(ISD::CTTZ, VT, Expand);
935     setOperationAction(ISD::CTLZ, VT, Expand);
936     setOperationAction(ISD::ROTL, VT, Expand);
937     setOperationAction(ISD::ROTR, VT, Expand);
938     setOperationAction(ISD::BSWAP, VT, Expand);
939     setOperationAction(ISD::SETCC, VT, Expand);
940     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
941     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
942     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
943     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
944     setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
945     setOperationAction(ISD::TRUNCATE, VT, Expand);
946     setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
947     setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
948     setOperationAction(ISD::ANY_EXTEND, VT, Expand);
949     setOperationAction(ISD::SELECT_CC, VT, Expand);
950     for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
951       setTruncStoreAction(InnerVT, VT, Expand);
952 
953       setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
954       setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
955 
956       // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
957       // types, we have to deal with them whether we ask for Expansion or not.
958       // Setting Expand causes its own optimisation problems though, so leave
959       // them legal.
960       if (VT.getVectorElementType() == MVT::i1)
961         setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
962 
963       // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
964       // split/scalarized right now.
965       if (VT.getVectorElementType() == MVT::f16 ||
966           VT.getVectorElementType() == MVT::bf16)
967         setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
968     }
969   }
970 
971   // FIXME: In order to prevent SSE instructions being expanded to MMX ones
972   // with -msoft-float, disable use of MMX as well.
973   if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
974     addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
975     // No operations on x86mmx supported, everything uses intrinsics.
976   }
977 
978   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
979     addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
980                                                     : &X86::VR128RegClass);
981 
982     setOperationAction(ISD::FMAXIMUM,           MVT::f32, Custom);
983     setOperationAction(ISD::FMINIMUM,           MVT::f32, Custom);
984 
985     setOperationAction(ISD::FNEG,               MVT::v4f32, Custom);
986     setOperationAction(ISD::FABS,               MVT::v4f32, Custom);
987     setOperationAction(ISD::FCOPYSIGN,          MVT::v4f32, Custom);
988     setOperationAction(ISD::BUILD_VECTOR,       MVT::v4f32, Custom);
989     setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4f32, Custom);
990     setOperationAction(ISD::VSELECT,            MVT::v4f32, Custom);
991     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
992     setOperationAction(ISD::SELECT,             MVT::v4f32, Custom);
993 
994     setOperationAction(ISD::LOAD,               MVT::v2f32, Custom);
995     setOperationAction(ISD::STORE,              MVT::v2f32, Custom);
996 
997     setOperationAction(ISD::STRICT_FADD,        MVT::v4f32, Legal);
998     setOperationAction(ISD::STRICT_FSUB,        MVT::v4f32, Legal);
999     setOperationAction(ISD::STRICT_FMUL,        MVT::v4f32, Legal);
1000     setOperationAction(ISD::STRICT_FDIV,        MVT::v4f32, Legal);
1001     setOperationAction(ISD::STRICT_FSQRT,       MVT::v4f32, Legal);
1002   }
1003 
1004   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
1005     addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1006                                                     : &X86::VR128RegClass);
1007 
1008     // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
1009     // registers cannot be used even for integer operations.
1010     addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
1011                                                     : &X86::VR128RegClass);
1012     addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1013                                                     : &X86::VR128RegClass);
1014     addRegisterClass(MVT::v8f16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1015                                                     : &X86::VR128RegClass);
1016     addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
1017                                                     : &X86::VR128RegClass);
1018     addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1019                                                     : &X86::VR128RegClass);
1020 
1021     for (auto VT : { MVT::f64, MVT::v4f32, MVT::v2f64 }) {
1022       setOperationAction(ISD::FMAXIMUM, VT, Custom);
1023       setOperationAction(ISD::FMINIMUM, VT, Custom);
1024     }
1025 
1026     for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
1027                      MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
1028       setOperationAction(ISD::SDIV, VT, Custom);
1029       setOperationAction(ISD::SREM, VT, Custom);
1030       setOperationAction(ISD::UDIV, VT, Custom);
1031       setOperationAction(ISD::UREM, VT, Custom);
1032     }
1033 
1034     setOperationAction(ISD::MUL,                MVT::v2i8,  Custom);
1035     setOperationAction(ISD::MUL,                MVT::v4i8,  Custom);
1036     setOperationAction(ISD::MUL,                MVT::v8i8,  Custom);
1037 
1038     setOperationAction(ISD::MUL,                MVT::v16i8, Custom);
1039     setOperationAction(ISD::MUL,                MVT::v4i32, Custom);
1040     setOperationAction(ISD::MUL,                MVT::v2i64, Custom);
1041     setOperationAction(ISD::MULHU,              MVT::v4i32, Custom);
1042     setOperationAction(ISD::MULHS,              MVT::v4i32, Custom);
1043     setOperationAction(ISD::MULHU,              MVT::v16i8, Custom);
1044     setOperationAction(ISD::MULHS,              MVT::v16i8, Custom);
1045     setOperationAction(ISD::MULHU,              MVT::v8i16, Legal);
1046     setOperationAction(ISD::MULHS,              MVT::v8i16, Legal);
1047     setOperationAction(ISD::MUL,                MVT::v8i16, Legal);
1048     setOperationAction(ISD::AVGCEILU,           MVT::v16i8, Legal);
1049     setOperationAction(ISD::AVGCEILU,           MVT::v8i16, Legal);
1050 
1051     setOperationAction(ISD::SMULO,              MVT::v16i8, Custom);
1052     setOperationAction(ISD::UMULO,              MVT::v16i8, Custom);
1053     setOperationAction(ISD::UMULO,              MVT::v2i32, Custom);
1054 
1055     setOperationAction(ISD::FNEG,               MVT::v2f64, Custom);
1056     setOperationAction(ISD::FABS,               MVT::v2f64, Custom);
1057     setOperationAction(ISD::FCOPYSIGN,          MVT::v2f64, Custom);
1058 
1059     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1060       setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
1061       setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
1062       setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
1063       setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
1064     }
1065 
1066     setOperationAction(ISD::ABDU,               MVT::v16i8, Custom);
1067     setOperationAction(ISD::ABDS,               MVT::v16i8, Custom);
1068     setOperationAction(ISD::ABDU,               MVT::v8i16, Custom);
1069     setOperationAction(ISD::ABDS,               MVT::v8i16, Custom);
1070     setOperationAction(ISD::ABDU,               MVT::v4i32, Custom);
1071     setOperationAction(ISD::ABDS,               MVT::v4i32, Custom);
1072 
1073     setOperationAction(ISD::UADDSAT,            MVT::v16i8, Legal);
1074     setOperationAction(ISD::SADDSAT,            MVT::v16i8, Legal);
1075     setOperationAction(ISD::USUBSAT,            MVT::v16i8, Legal);
1076     setOperationAction(ISD::SSUBSAT,            MVT::v16i8, Legal);
1077     setOperationAction(ISD::UADDSAT,            MVT::v8i16, Legal);
1078     setOperationAction(ISD::SADDSAT,            MVT::v8i16, Legal);
1079     setOperationAction(ISD::USUBSAT,            MVT::v8i16, Legal);
1080     setOperationAction(ISD::SSUBSAT,            MVT::v8i16, Legal);
1081     setOperationAction(ISD::USUBSAT,            MVT::v4i32, Custom);
1082     setOperationAction(ISD::USUBSAT,            MVT::v2i64, Custom);
1083 
1084     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v16i8, Custom);
1085     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i16, Custom);
1086     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i32, Custom);
1087     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f32, Custom);
1088 
1089     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1090       setOperationAction(ISD::SETCC,              VT, Custom);
1091       setOperationAction(ISD::CTPOP,              VT, Custom);
1092       setOperationAction(ISD::ABS,                VT, Custom);
1093 
1094       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1095       // setcc all the way to isel and prefer SETGT in some isel patterns.
1096       setCondCodeAction(ISD::SETLT, VT, Custom);
1097       setCondCodeAction(ISD::SETLE, VT, Custom);
1098     }
1099 
1100     setOperationAction(ISD::SETCC,          MVT::v2f64, Custom);
1101     setOperationAction(ISD::SETCC,          MVT::v4f32, Custom);
1102     setOperationAction(ISD::STRICT_FSETCC,  MVT::v2f64, Custom);
1103     setOperationAction(ISD::STRICT_FSETCC,  MVT::v4f32, Custom);
1104     setOperationAction(ISD::STRICT_FSETCCS, MVT::v2f64, Custom);
1105     setOperationAction(ISD::STRICT_FSETCCS, MVT::v4f32, Custom);
1106 
1107     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
1108       setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1109       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1110       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1111       setOperationAction(ISD::VSELECT,            VT, Custom);
1112       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1113     }
1114 
1115     for (auto VT : { MVT::v8f16, MVT::v2f64, MVT::v2i64 }) {
1116       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1117       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1118       setOperationAction(ISD::VSELECT,            VT, Custom);
1119 
1120       if (VT == MVT::v2i64 && !Subtarget.is64Bit())
1121         continue;
1122 
1123       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1124       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1125     }
1126     setF16Action(MVT::v8f16, Expand);
1127     setOperationAction(ISD::FADD, MVT::v8f16, Expand);
1128     setOperationAction(ISD::FSUB, MVT::v8f16, Expand);
1129     setOperationAction(ISD::FMUL, MVT::v8f16, Expand);
1130     setOperationAction(ISD::FDIV, MVT::v8f16, Expand);
1131     setOperationAction(ISD::FNEG, MVT::v8f16, Custom);
1132     setOperationAction(ISD::FABS, MVT::v8f16, Custom);
1133     setOperationAction(ISD::FCOPYSIGN, MVT::v8f16, Custom);
1134 
1135     // Custom lower v2i64 and v2f64 selects.
1136     setOperationAction(ISD::SELECT,             MVT::v2f64, Custom);
1137     setOperationAction(ISD::SELECT,             MVT::v2i64, Custom);
1138     setOperationAction(ISD::SELECT,             MVT::v4i32, Custom);
1139     setOperationAction(ISD::SELECT,             MVT::v8i16, Custom);
1140     setOperationAction(ISD::SELECT,             MVT::v8f16, Custom);
1141     setOperationAction(ISD::SELECT,             MVT::v16i8, Custom);
1142 
1143     setOperationAction(ISD::FP_TO_SINT,         MVT::v4i32, Custom);
1144     setOperationAction(ISD::FP_TO_UINT,         MVT::v4i32, Custom);
1145     setOperationAction(ISD::FP_TO_SINT,         MVT::v2i32, Custom);
1146     setOperationAction(ISD::FP_TO_UINT,         MVT::v2i32, Custom);
1147     setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v4i32, Custom);
1148     setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v2i32, Custom);
1149 
1150     // Custom legalize these to avoid over promotion or custom promotion.
1151     for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) {
1152       setOperationAction(ISD::FP_TO_SINT,        VT, Custom);
1153       setOperationAction(ISD::FP_TO_UINT,        VT, Custom);
1154       setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
1155       setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
1156     }
1157 
1158     setOperationAction(ISD::SINT_TO_FP,         MVT::v4i32, Custom);
1159     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v4i32, Custom);
1160     setOperationAction(ISD::SINT_TO_FP,         MVT::v2i32, Custom);
1161     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v2i32, Custom);
1162 
1163     setOperationAction(ISD::UINT_TO_FP,         MVT::v2i32, Custom);
1164     setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v2i32, Custom);
1165 
1166     setOperationAction(ISD::UINT_TO_FP,         MVT::v4i32, Custom);
1167     setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v4i32, Custom);
1168 
1169     // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
1170     setOperationAction(ISD::SINT_TO_FP,         MVT::v2f32, Custom);
1171     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v2f32, Custom);
1172     setOperationAction(ISD::UINT_TO_FP,         MVT::v2f32, Custom);
1173     setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v2f32, Custom);
1174 
1175     setOperationAction(ISD::FP_EXTEND,          MVT::v2f32, Custom);
1176     setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v2f32, Custom);
1177     setOperationAction(ISD::FP_ROUND,           MVT::v2f32, Custom);
1178     setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v2f32, Custom);
1179 
1180     // We want to legalize this to an f64 load rather than an i64 load on
1181     // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
1182     // store.
1183     setOperationAction(ISD::LOAD,               MVT::v2i32, Custom);
1184     setOperationAction(ISD::LOAD,               MVT::v4i16, Custom);
1185     setOperationAction(ISD::LOAD,               MVT::v8i8,  Custom);
1186     setOperationAction(ISD::STORE,              MVT::v2i32, Custom);
1187     setOperationAction(ISD::STORE,              MVT::v4i16, Custom);
1188     setOperationAction(ISD::STORE,              MVT::v8i8,  Custom);
1189 
1190     // Add 32-bit vector stores to help vectorization opportunities.
1191     setOperationAction(ISD::STORE,              MVT::v2i16, Custom);
1192     setOperationAction(ISD::STORE,              MVT::v4i8,  Custom);
1193 
1194     setOperationAction(ISD::BITCAST,            MVT::v2i32, Custom);
1195     setOperationAction(ISD::BITCAST,            MVT::v4i16, Custom);
1196     setOperationAction(ISD::BITCAST,            MVT::v8i8,  Custom);
1197     if (!Subtarget.hasAVX512())
1198       setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
1199 
1200     setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1201     setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1202     setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1203 
1204     setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1205 
1206     setOperationAction(ISD::TRUNCATE,    MVT::v2i8,  Custom);
1207     setOperationAction(ISD::TRUNCATE,    MVT::v2i16, Custom);
1208     setOperationAction(ISD::TRUNCATE,    MVT::v2i32, Custom);
1209     setOperationAction(ISD::TRUNCATE,    MVT::v2i64, Custom);
1210     setOperationAction(ISD::TRUNCATE,    MVT::v4i8,  Custom);
1211     setOperationAction(ISD::TRUNCATE,    MVT::v4i16, Custom);
1212     setOperationAction(ISD::TRUNCATE,    MVT::v4i32, Custom);
1213     setOperationAction(ISD::TRUNCATE,    MVT::v4i64, Custom);
1214     setOperationAction(ISD::TRUNCATE,    MVT::v8i8,  Custom);
1215     setOperationAction(ISD::TRUNCATE,    MVT::v8i16, Custom);
1216     setOperationAction(ISD::TRUNCATE,    MVT::v8i32, Custom);
1217     setOperationAction(ISD::TRUNCATE,    MVT::v8i64, Custom);
1218     setOperationAction(ISD::TRUNCATE,    MVT::v16i8, Custom);
1219     setOperationAction(ISD::TRUNCATE,    MVT::v16i16, Custom);
1220     setOperationAction(ISD::TRUNCATE,    MVT::v16i32, Custom);
1221     setOperationAction(ISD::TRUNCATE,    MVT::v16i64, Custom);
1222 
1223     // In the customized shift lowering, the legal v4i32/v2i64 cases
1224     // in AVX2 will be recognized.
1225     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1226       setOperationAction(ISD::SRL,              VT, Custom);
1227       setOperationAction(ISD::SHL,              VT, Custom);
1228       setOperationAction(ISD::SRA,              VT, Custom);
1229       if (VT == MVT::v2i64) continue;
1230       setOperationAction(ISD::ROTL,             VT, Custom);
1231       setOperationAction(ISD::ROTR,             VT, Custom);
1232       setOperationAction(ISD::FSHL,             VT, Custom);
1233       setOperationAction(ISD::FSHR,             VT, Custom);
1234     }
1235 
1236     setOperationAction(ISD::STRICT_FSQRT,       MVT::v2f64, Legal);
1237     setOperationAction(ISD::STRICT_FADD,        MVT::v2f64, Legal);
1238     setOperationAction(ISD::STRICT_FSUB,        MVT::v2f64, Legal);
1239     setOperationAction(ISD::STRICT_FMUL,        MVT::v2f64, Legal);
1240     setOperationAction(ISD::STRICT_FDIV,        MVT::v2f64, Legal);
1241   }
1242 
1243   if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1244     setOperationAction(ISD::ABS,                MVT::v16i8, Legal);
1245     setOperationAction(ISD::ABS,                MVT::v8i16, Legal);
1246     setOperationAction(ISD::ABS,                MVT::v4i32, Legal);
1247     setOperationAction(ISD::BITREVERSE,         MVT::v16i8, Custom);
1248     setOperationAction(ISD::CTLZ,               MVT::v16i8, Custom);
1249     setOperationAction(ISD::CTLZ,               MVT::v8i16, Custom);
1250     setOperationAction(ISD::CTLZ,               MVT::v4i32, Custom);
1251     setOperationAction(ISD::CTLZ,               MVT::v2i64, Custom);
1252 
1253     // These might be better off as horizontal vector ops.
1254     setOperationAction(ISD::ADD,                MVT::i16, Custom);
1255     setOperationAction(ISD::ADD,                MVT::i32, Custom);
1256     setOperationAction(ISD::SUB,                MVT::i16, Custom);
1257     setOperationAction(ISD::SUB,                MVT::i32, Custom);
1258   }
1259 
1260   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1261     for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1262       setOperationAction(ISD::FFLOOR,            RoundedTy,  Legal);
1263       setOperationAction(ISD::STRICT_FFLOOR,     RoundedTy,  Legal);
1264       setOperationAction(ISD::FCEIL,             RoundedTy,  Legal);
1265       setOperationAction(ISD::STRICT_FCEIL,      RoundedTy,  Legal);
1266       setOperationAction(ISD::FTRUNC,            RoundedTy,  Legal);
1267       setOperationAction(ISD::STRICT_FTRUNC,     RoundedTy,  Legal);
1268       setOperationAction(ISD::FRINT,             RoundedTy,  Legal);
1269       setOperationAction(ISD::STRICT_FRINT,      RoundedTy,  Legal);
1270       setOperationAction(ISD::FNEARBYINT,        RoundedTy,  Legal);
1271       setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy,  Legal);
1272       setOperationAction(ISD::FROUNDEVEN,        RoundedTy,  Legal);
1273       setOperationAction(ISD::STRICT_FROUNDEVEN, RoundedTy,  Legal);
1274 
1275       setOperationAction(ISD::FROUND,            RoundedTy,  Custom);
1276     }
1277 
1278     setOperationAction(ISD::SMAX,               MVT::v16i8, Legal);
1279     setOperationAction(ISD::SMAX,               MVT::v4i32, Legal);
1280     setOperationAction(ISD::UMAX,               MVT::v8i16, Legal);
1281     setOperationAction(ISD::UMAX,               MVT::v4i32, Legal);
1282     setOperationAction(ISD::SMIN,               MVT::v16i8, Legal);
1283     setOperationAction(ISD::SMIN,               MVT::v4i32, Legal);
1284     setOperationAction(ISD::UMIN,               MVT::v8i16, Legal);
1285     setOperationAction(ISD::UMIN,               MVT::v4i32, Legal);
1286 
1287     for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
1288       setOperationAction(ISD::ABDS,             VT, Custom);
1289       setOperationAction(ISD::ABDU,             VT, Custom);
1290     }
1291 
1292     setOperationAction(ISD::UADDSAT,            MVT::v4i32, Custom);
1293     setOperationAction(ISD::SADDSAT,            MVT::v2i64, Custom);
1294     setOperationAction(ISD::SSUBSAT,            MVT::v2i64, Custom);
1295 
1296     // FIXME: Do we need to handle scalar-to-vector here?
1297     setOperationAction(ISD::MUL,                MVT::v4i32, Legal);
1298     setOperationAction(ISD::SMULO,              MVT::v2i32, Custom);
1299 
1300     // We directly match byte blends in the backend as they match the VSELECT
1301     // condition form.
1302     setOperationAction(ISD::VSELECT,            MVT::v16i8, Legal);
1303 
1304     // SSE41 brings specific instructions for doing vector sign extend even in
1305     // cases where we don't have SRA.
1306     for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1307       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1308       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1309     }
1310 
1311     // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1312     for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1313       setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8,  Legal);
1314       setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8,  Legal);
1315       setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8,  Legal);
1316       setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1317       setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1318       setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1319     }
1320 
1321     if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) {
1322       // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can
1323       // do the pre and post work in the vector domain.
1324       setOperationAction(ISD::UINT_TO_FP,        MVT::v4i64, Custom);
1325       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom);
1326       // We need to mark SINT_TO_FP as Custom even though we want to expand it
1327       // so that DAG combine doesn't try to turn it into uint_to_fp.
1328       setOperationAction(ISD::SINT_TO_FP,        MVT::v4i64, Custom);
1329       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom);
1330     }
1331   }
1332 
1333   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE42()) {
1334     setOperationAction(ISD::UADDSAT,            MVT::v2i64, Custom);
1335   }
1336 
1337   if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1338     for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1339                      MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1340       setOperationAction(ISD::ROTL, VT, Custom);
1341       setOperationAction(ISD::ROTR, VT, Custom);
1342     }
1343 
1344     // XOP can efficiently perform BITREVERSE with VPPERM.
1345     for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1346       setOperationAction(ISD::BITREVERSE, VT, Custom);
1347 
1348     for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1349                      MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1350       setOperationAction(ISD::BITREVERSE, VT, Custom);
1351   }
1352 
1353   if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1354     bool HasInt256 = Subtarget.hasInt256();
1355 
1356     addRegisterClass(MVT::v32i8,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1357                                                      : &X86::VR256RegClass);
1358     addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1359                                                      : &X86::VR256RegClass);
1360     addRegisterClass(MVT::v16f16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1361                                                      : &X86::VR256RegClass);
1362     addRegisterClass(MVT::v8i32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1363                                                      : &X86::VR256RegClass);
1364     addRegisterClass(MVT::v8f32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1365                                                      : &X86::VR256RegClass);
1366     addRegisterClass(MVT::v4i64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1367                                                      : &X86::VR256RegClass);
1368     addRegisterClass(MVT::v4f64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1369                                                      : &X86::VR256RegClass);
1370 
1371     for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1372       setOperationAction(ISD::FFLOOR,            VT, Legal);
1373       setOperationAction(ISD::STRICT_FFLOOR,     VT, Legal);
1374       setOperationAction(ISD::FCEIL,             VT, Legal);
1375       setOperationAction(ISD::STRICT_FCEIL,      VT, Legal);
1376       setOperationAction(ISD::FTRUNC,            VT, Legal);
1377       setOperationAction(ISD::STRICT_FTRUNC,     VT, Legal);
1378       setOperationAction(ISD::FRINT,             VT, Legal);
1379       setOperationAction(ISD::STRICT_FRINT,      VT, Legal);
1380       setOperationAction(ISD::FNEARBYINT,        VT, Legal);
1381       setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1382       setOperationAction(ISD::FROUNDEVEN,        VT, Legal);
1383       setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1384 
1385       setOperationAction(ISD::FROUND,            VT, Custom);
1386 
1387       setOperationAction(ISD::FNEG,              VT, Custom);
1388       setOperationAction(ISD::FABS,              VT, Custom);
1389       setOperationAction(ISD::FCOPYSIGN,         VT, Custom);
1390 
1391       setOperationAction(ISD::FMAXIMUM,          VT, Custom);
1392       setOperationAction(ISD::FMINIMUM,          VT, Custom);
1393     }
1394 
1395     // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1396     // even though v8i16 is a legal type.
1397     setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v8i16, MVT::v8i32);
1398     setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v8i16, MVT::v8i32);
1399     setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1400     setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1401     setOperationAction(ISD::FP_TO_SINT,                MVT::v8i32, Custom);
1402     setOperationAction(ISD::FP_TO_UINT,                MVT::v8i32, Custom);
1403     setOperationAction(ISD::STRICT_FP_TO_SINT,         MVT::v8i32, Custom);
1404 
1405     setOperationAction(ISD::SINT_TO_FP,         MVT::v8i32, Custom);
1406     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v8i32, Custom);
1407     setOperationAction(ISD::FP_EXTEND,          MVT::v8f32, Expand);
1408     setOperationAction(ISD::FP_ROUND,           MVT::v8f16, Expand);
1409     setOperationAction(ISD::FP_EXTEND,          MVT::v4f64, Custom);
1410     setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v4f64, Custom);
1411 
1412     setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v4f32, Legal);
1413     setOperationAction(ISD::STRICT_FADD,        MVT::v8f32, Legal);
1414     setOperationAction(ISD::STRICT_FADD,        MVT::v4f64, Legal);
1415     setOperationAction(ISD::STRICT_FSUB,        MVT::v8f32, Legal);
1416     setOperationAction(ISD::STRICT_FSUB,        MVT::v4f64, Legal);
1417     setOperationAction(ISD::STRICT_FMUL,        MVT::v8f32, Legal);
1418     setOperationAction(ISD::STRICT_FMUL,        MVT::v4f64, Legal);
1419     setOperationAction(ISD::STRICT_FDIV,        MVT::v8f32, Legal);
1420     setOperationAction(ISD::STRICT_FDIV,        MVT::v4f64, Legal);
1421     setOperationAction(ISD::STRICT_FSQRT,       MVT::v8f32, Legal);
1422     setOperationAction(ISD::STRICT_FSQRT,       MVT::v4f64, Legal);
1423 
1424     if (!Subtarget.hasAVX512())
1425       setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1426 
1427     // In the customized shift lowering, the legal v8i32/v4i64 cases
1428     // in AVX2 will be recognized.
1429     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1430       setOperationAction(ISD::SRL,             VT, Custom);
1431       setOperationAction(ISD::SHL,             VT, Custom);
1432       setOperationAction(ISD::SRA,             VT, Custom);
1433       setOperationAction(ISD::ABDS,            VT, Custom);
1434       setOperationAction(ISD::ABDU,            VT, Custom);
1435       if (VT == MVT::v4i64) continue;
1436       setOperationAction(ISD::ROTL,            VT, Custom);
1437       setOperationAction(ISD::ROTR,            VT, Custom);
1438       setOperationAction(ISD::FSHL,            VT, Custom);
1439       setOperationAction(ISD::FSHR,            VT, Custom);
1440     }
1441 
1442     // These types need custom splitting if their input is a 128-bit vector.
1443     setOperationAction(ISD::SIGN_EXTEND,       MVT::v8i64,  Custom);
1444     setOperationAction(ISD::SIGN_EXTEND,       MVT::v16i32, Custom);
1445     setOperationAction(ISD::ZERO_EXTEND,       MVT::v8i64,  Custom);
1446     setOperationAction(ISD::ZERO_EXTEND,       MVT::v16i32, Custom);
1447 
1448     setOperationAction(ISD::SELECT,            MVT::v4f64, Custom);
1449     setOperationAction(ISD::SELECT,            MVT::v4i64, Custom);
1450     setOperationAction(ISD::SELECT,            MVT::v8i32, Custom);
1451     setOperationAction(ISD::SELECT,            MVT::v16i16, Custom);
1452     setOperationAction(ISD::SELECT,            MVT::v16f16, Custom);
1453     setOperationAction(ISD::SELECT,            MVT::v32i8, Custom);
1454     setOperationAction(ISD::SELECT,            MVT::v8f32, Custom);
1455 
1456     for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1457       setOperationAction(ISD::SIGN_EXTEND,     VT, Custom);
1458       setOperationAction(ISD::ZERO_EXTEND,     VT, Custom);
1459       setOperationAction(ISD::ANY_EXTEND,      VT, Custom);
1460     }
1461 
1462     setOperationAction(ISD::TRUNCATE,          MVT::v32i8, Custom);
1463     setOperationAction(ISD::TRUNCATE,          MVT::v32i16, Custom);
1464     setOperationAction(ISD::TRUNCATE,          MVT::v32i32, Custom);
1465     setOperationAction(ISD::TRUNCATE,          MVT::v32i64, Custom);
1466 
1467     setOperationAction(ISD::BITREVERSE,        MVT::v32i8, Custom);
1468 
1469     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1470       setOperationAction(ISD::SETCC,           VT, Custom);
1471       setOperationAction(ISD::CTPOP,           VT, Custom);
1472       setOperationAction(ISD::CTLZ,            VT, Custom);
1473 
1474       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1475       // setcc all the way to isel and prefer SETGT in some isel patterns.
1476       setCondCodeAction(ISD::SETLT, VT, Custom);
1477       setCondCodeAction(ISD::SETLE, VT, Custom);
1478     }
1479 
1480     setOperationAction(ISD::SETCC,          MVT::v4f64, Custom);
1481     setOperationAction(ISD::SETCC,          MVT::v8f32, Custom);
1482     setOperationAction(ISD::STRICT_FSETCC,  MVT::v4f64, Custom);
1483     setOperationAction(ISD::STRICT_FSETCC,  MVT::v8f32, Custom);
1484     setOperationAction(ISD::STRICT_FSETCCS, MVT::v4f64, Custom);
1485     setOperationAction(ISD::STRICT_FSETCCS, MVT::v8f32, Custom);
1486 
1487     if (Subtarget.hasAnyFMA()) {
1488       for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1489                        MVT::v2f64, MVT::v4f64 }) {
1490         setOperationAction(ISD::FMA, VT, Legal);
1491         setOperationAction(ISD::STRICT_FMA, VT, Legal);
1492       }
1493     }
1494 
1495     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1496       setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1497       setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1498     }
1499 
1500     setOperationAction(ISD::MUL,       MVT::v4i64,  Custom);
1501     setOperationAction(ISD::MUL,       MVT::v8i32,  HasInt256 ? Legal : Custom);
1502     setOperationAction(ISD::MUL,       MVT::v16i16, HasInt256 ? Legal : Custom);
1503     setOperationAction(ISD::MUL,       MVT::v32i8,  Custom);
1504 
1505     setOperationAction(ISD::MULHU,     MVT::v8i32,  Custom);
1506     setOperationAction(ISD::MULHS,     MVT::v8i32,  Custom);
1507     setOperationAction(ISD::MULHU,     MVT::v16i16, HasInt256 ? Legal : Custom);
1508     setOperationAction(ISD::MULHS,     MVT::v16i16, HasInt256 ? Legal : Custom);
1509     setOperationAction(ISD::MULHU,     MVT::v32i8,  Custom);
1510     setOperationAction(ISD::MULHS,     MVT::v32i8,  Custom);
1511     setOperationAction(ISD::AVGCEILU,  MVT::v16i16, HasInt256 ? Legal : Custom);
1512     setOperationAction(ISD::AVGCEILU,  MVT::v32i8,  HasInt256 ? Legal : Custom);
1513 
1514     setOperationAction(ISD::SMULO,     MVT::v32i8, Custom);
1515     setOperationAction(ISD::UMULO,     MVT::v32i8, Custom);
1516 
1517     setOperationAction(ISD::ABS,       MVT::v4i64,  Custom);
1518     setOperationAction(ISD::SMAX,      MVT::v4i64,  Custom);
1519     setOperationAction(ISD::UMAX,      MVT::v4i64,  Custom);
1520     setOperationAction(ISD::SMIN,      MVT::v4i64,  Custom);
1521     setOperationAction(ISD::UMIN,      MVT::v4i64,  Custom);
1522 
1523     setOperationAction(ISD::UADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1524     setOperationAction(ISD::SADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1525     setOperationAction(ISD::USUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1526     setOperationAction(ISD::SSUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1527     setOperationAction(ISD::UADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1528     setOperationAction(ISD::SADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1529     setOperationAction(ISD::USUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1530     setOperationAction(ISD::SSUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1531     setOperationAction(ISD::UADDSAT,   MVT::v8i32, Custom);
1532     setOperationAction(ISD::USUBSAT,   MVT::v8i32, Custom);
1533     setOperationAction(ISD::UADDSAT,   MVT::v4i64, Custom);
1534     setOperationAction(ISD::USUBSAT,   MVT::v4i64, Custom);
1535 
1536     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1537       setOperationAction(ISD::ABS,  VT, HasInt256 ? Legal : Custom);
1538       setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1539       setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1540       setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1541       setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1542     }
1543 
1544     for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1545       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1546       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1547     }
1548 
1549     if (HasInt256) {
1550       // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1551       // when we have a 256bit-wide blend with immediate.
1552       setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1553       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom);
1554 
1555       // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1556       for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1557         setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1558         setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i8,  Legal);
1559         setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i8,  Legal);
1560         setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i16, Legal);
1561         setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i16, Legal);
1562         setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i32, Legal);
1563       }
1564     }
1565 
1566     for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1567                      MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1568       setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
1569       setOperationAction(ISD::MSTORE, VT, Legal);
1570     }
1571 
1572     // Extract subvector is special because the value type
1573     // (result) is 128-bit but the source is 256-bit wide.
1574     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1575                      MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
1576       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1577     }
1578 
1579     // Custom lower several nodes for 256-bit types.
1580     for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1581                     MVT::v16f16, MVT::v8f32, MVT::v4f64 }) {
1582       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1583       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1584       setOperationAction(ISD::VSELECT,            VT, Custom);
1585       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1586       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1587       setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1588       setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Legal);
1589       setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
1590       setOperationAction(ISD::STORE,              VT, Custom);
1591     }
1592     setF16Action(MVT::v16f16, Expand);
1593     setOperationAction(ISD::FNEG, MVT::v16f16, Custom);
1594     setOperationAction(ISD::FABS, MVT::v16f16, Custom);
1595     setOperationAction(ISD::FCOPYSIGN, MVT::v16f16, Custom);
1596     setOperationAction(ISD::FADD, MVT::v16f16, Expand);
1597     setOperationAction(ISD::FSUB, MVT::v16f16, Expand);
1598     setOperationAction(ISD::FMUL, MVT::v16f16, Expand);
1599     setOperationAction(ISD::FDIV, MVT::v16f16, Expand);
1600 
1601     if (HasInt256) {
1602       setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1603 
1604       // Custom legalize 2x32 to get a little better code.
1605       setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1606       setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1607 
1608       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1609                        MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1610         setOperationAction(ISD::MGATHER,  VT, Custom);
1611     }
1612   }
1613 
1614   if (!Subtarget.useSoftFloat() && !Subtarget.hasFP16() &&
1615       Subtarget.hasF16C()) {
1616     for (MVT VT : { MVT::f16, MVT::v2f16, MVT::v4f16, MVT::v8f16 }) {
1617       setOperationAction(ISD::FP_ROUND,           VT, Custom);
1618       setOperationAction(ISD::STRICT_FP_ROUND,    VT, Custom);
1619     }
1620     for (MVT VT : { MVT::f32, MVT::v2f32, MVT::v4f32, MVT::v8f32 }) {
1621       setOperationAction(ISD::FP_EXTEND,          VT, Custom);
1622       setOperationAction(ISD::STRICT_FP_EXTEND,   VT, Custom);
1623     }
1624     for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1625       setOperationPromotedToType(Opc, MVT::v8f16, MVT::v8f32);
1626       setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1627     }
1628   }
1629 
1630   // This block controls legalization of the mask vector sizes that are
1631   // available with AVX512. 512-bit vectors are in a separate block controlled
1632   // by useAVX512Regs.
1633   if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1634     addRegisterClass(MVT::v1i1,   &X86::VK1RegClass);
1635     addRegisterClass(MVT::v2i1,   &X86::VK2RegClass);
1636     addRegisterClass(MVT::v4i1,   &X86::VK4RegClass);
1637     addRegisterClass(MVT::v8i1,   &X86::VK8RegClass);
1638     addRegisterClass(MVT::v16i1,  &X86::VK16RegClass);
1639 
1640     setOperationAction(ISD::SELECT,             MVT::v1i1, Custom);
1641     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1642     setOperationAction(ISD::BUILD_VECTOR,       MVT::v1i1, Custom);
1643 
1644     setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v8i1,  MVT::v8i32);
1645     setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v8i1,  MVT::v8i32);
1646     setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v4i1,  MVT::v4i32);
1647     setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v4i1,  MVT::v4i32);
1648     setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i1,  MVT::v8i32);
1649     setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1,  MVT::v8i32);
1650     setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1,  MVT::v4i32);
1651     setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1,  MVT::v4i32);
1652     setOperationAction(ISD::FP_TO_SINT,                MVT::v2i1,  Custom);
1653     setOperationAction(ISD::FP_TO_UINT,                MVT::v2i1,  Custom);
1654     setOperationAction(ISD::STRICT_FP_TO_SINT,         MVT::v2i1,  Custom);
1655     setOperationAction(ISD::STRICT_FP_TO_UINT,         MVT::v2i1,  Custom);
1656 
1657     // There is no byte sized k-register load or store without AVX512DQ.
1658     if (!Subtarget.hasDQI()) {
1659       setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1660       setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1661       setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1662       setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1663 
1664       setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1665       setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1666       setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1667       setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1668     }
1669 
1670     // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1671     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1672       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1673       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1674       setOperationAction(ISD::ANY_EXTEND,  VT, Custom);
1675     }
1676 
1677     for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 })
1678       setOperationAction(ISD::VSELECT,          VT, Expand);
1679 
1680     for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1681       setOperationAction(ISD::SETCC,            VT, Custom);
1682       setOperationAction(ISD::SELECT,           VT, Custom);
1683       setOperationAction(ISD::TRUNCATE,         VT, Custom);
1684 
1685       setOperationAction(ISD::BUILD_VECTOR,     VT, Custom);
1686       setOperationAction(ISD::CONCAT_VECTORS,   VT, Custom);
1687       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1688       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1689       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1690       setOperationAction(ISD::VECTOR_SHUFFLE,   VT,  Custom);
1691     }
1692 
1693     for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1694       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1695   }
1696 
1697   // This block controls legalization for 512-bit operations with 8/16/32/64 bit
1698   // elements. 512-bits can be disabled based on prefer-vector-width and
1699   // required-vector-width function attributes.
1700   if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1701     bool HasBWI = Subtarget.hasBWI();
1702 
1703     addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1704     addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1705     addRegisterClass(MVT::v8i64,  &X86::VR512RegClass);
1706     addRegisterClass(MVT::v8f64,  &X86::VR512RegClass);
1707     addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1708     addRegisterClass(MVT::v32f16, &X86::VR512RegClass);
1709     addRegisterClass(MVT::v64i8,  &X86::VR512RegClass);
1710 
1711     for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1712       setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8,  Legal);
1713       setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1714       setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i8,   Legal);
1715       setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i16,  Legal);
1716       setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i32,  Legal);
1717       if (HasBWI)
1718         setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1719     }
1720 
1721     for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1722       setOperationAction(ISD::FMAXIMUM, VT, Custom);
1723       setOperationAction(ISD::FMINIMUM, VT, Custom);
1724       setOperationAction(ISD::FNEG,  VT, Custom);
1725       setOperationAction(ISD::FABS,  VT, Custom);
1726       setOperationAction(ISD::FMA,   VT, Legal);
1727       setOperationAction(ISD::STRICT_FMA, VT, Legal);
1728       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1729     }
1730 
1731     for (MVT VT : { MVT::v16i1, MVT::v16i8 }) {
1732       setOperationPromotedToType(ISD::FP_TO_SINT       , VT, MVT::v16i32);
1733       setOperationPromotedToType(ISD::FP_TO_UINT       , VT, MVT::v16i32);
1734       setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32);
1735       setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32);
1736     }
1737 
1738     for (MVT VT : { MVT::v16i16, MVT::v16i32 }) {
1739       setOperationAction(ISD::FP_TO_SINT,        VT, Custom);
1740       setOperationAction(ISD::FP_TO_UINT,        VT, Custom);
1741       setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
1742       setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
1743     }
1744 
1745     setOperationAction(ISD::SINT_TO_FP,        MVT::v16i32, Custom);
1746     setOperationAction(ISD::UINT_TO_FP,        MVT::v16i32, Custom);
1747     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Custom);
1748     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Custom);
1749     setOperationAction(ISD::FP_EXTEND,         MVT::v8f64,  Custom);
1750     setOperationAction(ISD::STRICT_FP_EXTEND,  MVT::v8f64,  Custom);
1751 
1752     setOperationAction(ISD::STRICT_FADD,      MVT::v16f32, Legal);
1753     setOperationAction(ISD::STRICT_FADD,      MVT::v8f64,  Legal);
1754     setOperationAction(ISD::STRICT_FSUB,      MVT::v16f32, Legal);
1755     setOperationAction(ISD::STRICT_FSUB,      MVT::v8f64,  Legal);
1756     setOperationAction(ISD::STRICT_FMUL,      MVT::v16f32, Legal);
1757     setOperationAction(ISD::STRICT_FMUL,      MVT::v8f64,  Legal);
1758     setOperationAction(ISD::STRICT_FDIV,      MVT::v16f32, Legal);
1759     setOperationAction(ISD::STRICT_FDIV,      MVT::v8f64,  Legal);
1760     setOperationAction(ISD::STRICT_FSQRT,     MVT::v16f32, Legal);
1761     setOperationAction(ISD::STRICT_FSQRT,     MVT::v8f64,  Legal);
1762     setOperationAction(ISD::STRICT_FP_ROUND,  MVT::v8f32,  Legal);
1763 
1764     setTruncStoreAction(MVT::v8i64,   MVT::v8i8,   Legal);
1765     setTruncStoreAction(MVT::v8i64,   MVT::v8i16,  Legal);
1766     setTruncStoreAction(MVT::v8i64,   MVT::v8i32,  Legal);
1767     setTruncStoreAction(MVT::v16i32,  MVT::v16i8,  Legal);
1768     setTruncStoreAction(MVT::v16i32,  MVT::v16i16, Legal);
1769     if (HasBWI)
1770       setTruncStoreAction(MVT::v32i16,  MVT::v32i8, Legal);
1771 
1772     // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1773     // to 512-bit rather than use the AVX2 instructions so that we can use
1774     // k-masks.
1775     if (!Subtarget.hasVLX()) {
1776       for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1777            MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1778         setOperationAction(ISD::MLOAD,  VT, Custom);
1779         setOperationAction(ISD::MSTORE, VT, Custom);
1780       }
1781     }
1782 
1783     setOperationAction(ISD::TRUNCATE,    MVT::v8i32,  Legal);
1784     setOperationAction(ISD::TRUNCATE,    MVT::v16i16, Legal);
1785     setOperationAction(ISD::TRUNCATE,    MVT::v32i8,  HasBWI ? Legal : Custom);
1786     setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1787     setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1788     setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64,  Custom);
1789     setOperationAction(ISD::ANY_EXTEND,  MVT::v32i16, Custom);
1790     setOperationAction(ISD::ANY_EXTEND,  MVT::v16i32, Custom);
1791     setOperationAction(ISD::ANY_EXTEND,  MVT::v8i64,  Custom);
1792     setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1793     setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1794     setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64,  Custom);
1795 
1796     if (HasBWI) {
1797       // Extends from v64i1 masks to 512-bit vectors.
1798       setOperationAction(ISD::SIGN_EXTEND,        MVT::v64i8, Custom);
1799       setOperationAction(ISD::ZERO_EXTEND,        MVT::v64i8, Custom);
1800       setOperationAction(ISD::ANY_EXTEND,         MVT::v64i8, Custom);
1801     }
1802 
1803     for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1804       setOperationAction(ISD::FFLOOR,            VT, Legal);
1805       setOperationAction(ISD::STRICT_FFLOOR,     VT, Legal);
1806       setOperationAction(ISD::FCEIL,             VT, Legal);
1807       setOperationAction(ISD::STRICT_FCEIL,      VT, Legal);
1808       setOperationAction(ISD::FTRUNC,            VT, Legal);
1809       setOperationAction(ISD::STRICT_FTRUNC,     VT, Legal);
1810       setOperationAction(ISD::FRINT,             VT, Legal);
1811       setOperationAction(ISD::STRICT_FRINT,      VT, Legal);
1812       setOperationAction(ISD::FNEARBYINT,        VT, Legal);
1813       setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1814       setOperationAction(ISD::FROUNDEVEN,        VT, Legal);
1815       setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1816 
1817       setOperationAction(ISD::FROUND,            VT, Custom);
1818     }
1819 
1820     for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) {
1821       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1822       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1823     }
1824 
1825     setOperationAction(ISD::ADD, MVT::v32i16, HasBWI ? Legal : Custom);
1826     setOperationAction(ISD::SUB, MVT::v32i16, HasBWI ? Legal : Custom);
1827     setOperationAction(ISD::ADD, MVT::v64i8,  HasBWI ? Legal : Custom);
1828     setOperationAction(ISD::SUB, MVT::v64i8,  HasBWI ? Legal : Custom);
1829 
1830     setOperationAction(ISD::MUL, MVT::v8i64,  Custom);
1831     setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1832     setOperationAction(ISD::MUL, MVT::v32i16, HasBWI ? Legal : Custom);
1833     setOperationAction(ISD::MUL, MVT::v64i8,  Custom);
1834 
1835     setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1836     setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1837     setOperationAction(ISD::MULHS, MVT::v32i16, HasBWI ? Legal : Custom);
1838     setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom);
1839     setOperationAction(ISD::MULHS, MVT::v64i8,  Custom);
1840     setOperationAction(ISD::MULHU, MVT::v64i8,  Custom);
1841     setOperationAction(ISD::AVGCEILU, MVT::v32i16, HasBWI ? Legal : Custom);
1842     setOperationAction(ISD::AVGCEILU, MVT::v64i8,  HasBWI ? Legal : Custom);
1843 
1844     setOperationAction(ISD::SMULO, MVT::v64i8, Custom);
1845     setOperationAction(ISD::UMULO, MVT::v64i8, Custom);
1846 
1847     setOperationAction(ISD::BITREVERSE, MVT::v64i8,  Custom);
1848 
1849     for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1850       setOperationAction(ISD::SRL,              VT, Custom);
1851       setOperationAction(ISD::SHL,              VT, Custom);
1852       setOperationAction(ISD::SRA,              VT, Custom);
1853       setOperationAction(ISD::ROTL,             VT, Custom);
1854       setOperationAction(ISD::ROTR,             VT, Custom);
1855       setOperationAction(ISD::SETCC,            VT, Custom);
1856       setOperationAction(ISD::ABDS,             VT, Custom);
1857       setOperationAction(ISD::ABDU,             VT, Custom);
1858 
1859       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1860       // setcc all the way to isel and prefer SETGT in some isel patterns.
1861       setCondCodeAction(ISD::SETLT, VT, Custom);
1862       setCondCodeAction(ISD::SETLE, VT, Custom);
1863     }
1864 
1865     setOperationAction(ISD::SETCC,          MVT::v8f64, Custom);
1866     setOperationAction(ISD::SETCC,          MVT::v16f32, Custom);
1867     setOperationAction(ISD::STRICT_FSETCC,  MVT::v8f64, Custom);
1868     setOperationAction(ISD::STRICT_FSETCC,  MVT::v16f32, Custom);
1869     setOperationAction(ISD::STRICT_FSETCCS, MVT::v8f64, Custom);
1870     setOperationAction(ISD::STRICT_FSETCCS, MVT::v16f32, Custom);
1871 
1872     for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1873       setOperationAction(ISD::SMAX,             VT, Legal);
1874       setOperationAction(ISD::UMAX,             VT, Legal);
1875       setOperationAction(ISD::SMIN,             VT, Legal);
1876       setOperationAction(ISD::UMIN,             VT, Legal);
1877       setOperationAction(ISD::ABS,              VT, Legal);
1878       setOperationAction(ISD::CTPOP,            VT, Custom);
1879     }
1880 
1881     for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1882       setOperationAction(ISD::ABS,     VT, HasBWI ? Legal : Custom);
1883       setOperationAction(ISD::CTPOP,   VT, Subtarget.hasBITALG() ? Legal : Custom);
1884       setOperationAction(ISD::CTLZ,    VT, Custom);
1885       setOperationAction(ISD::SMAX,    VT, HasBWI ? Legal : Custom);
1886       setOperationAction(ISD::UMAX,    VT, HasBWI ? Legal : Custom);
1887       setOperationAction(ISD::SMIN,    VT, HasBWI ? Legal : Custom);
1888       setOperationAction(ISD::UMIN,    VT, HasBWI ? Legal : Custom);
1889       setOperationAction(ISD::UADDSAT, VT, HasBWI ? Legal : Custom);
1890       setOperationAction(ISD::SADDSAT, VT, HasBWI ? Legal : Custom);
1891       setOperationAction(ISD::USUBSAT, VT, HasBWI ? Legal : Custom);
1892       setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom);
1893     }
1894 
1895     setOperationAction(ISD::FSHL,       MVT::v64i8, Custom);
1896     setOperationAction(ISD::FSHR,       MVT::v64i8, Custom);
1897     setOperationAction(ISD::FSHL,      MVT::v32i16, Custom);
1898     setOperationAction(ISD::FSHR,      MVT::v32i16, Custom);
1899     setOperationAction(ISD::FSHL,      MVT::v16i32, Custom);
1900     setOperationAction(ISD::FSHR,      MVT::v16i32, Custom);
1901 
1902     if (Subtarget.hasDQI()) {
1903       for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
1904                        ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1905                        ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT})
1906         setOperationAction(Opc,           MVT::v8i64, Custom);
1907       setOperationAction(ISD::MUL,        MVT::v8i64, Legal);
1908     }
1909 
1910     if (Subtarget.hasCDI()) {
1911       // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1912       for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1913         setOperationAction(ISD::CTLZ,            VT, Legal);
1914       }
1915     } // Subtarget.hasCDI()
1916 
1917     if (Subtarget.hasVPOPCNTDQ()) {
1918       for (auto VT : { MVT::v16i32, MVT::v8i64 })
1919         setOperationAction(ISD::CTPOP, VT, Legal);
1920     }
1921 
1922     // Extract subvector is special because the value type
1923     // (result) is 256-bit but the source is 512-bit wide.
1924     // 128-bit was made Legal under AVX1.
1925     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1926                      MVT::v16f16, MVT::v8f32, MVT::v4f64 })
1927       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1928 
1929     for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64,
1930                      MVT::v32f16, MVT::v16f32, MVT::v8f64 }) {
1931       setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
1932       setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Legal);
1933       setOperationAction(ISD::SELECT,             VT, Custom);
1934       setOperationAction(ISD::VSELECT,            VT, Custom);
1935       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1936       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1937       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1938       setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1939       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1940     }
1941     setF16Action(MVT::v32f16, Expand);
1942     setOperationAction(ISD::FP_ROUND, MVT::v16f16, Custom);
1943     setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f16, Custom);
1944     setOperationAction(ISD::FP_EXTEND, MVT::v16f32, Custom);
1945     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v16f32, Custom);
1946     for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1947       setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1948       setOperationPromotedToType(Opc, MVT::v32f16, MVT::v32f32);
1949     }
1950 
1951     for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1952       setOperationAction(ISD::MLOAD,               VT, Legal);
1953       setOperationAction(ISD::MSTORE,              VT, Legal);
1954       setOperationAction(ISD::MGATHER,             VT, Custom);
1955       setOperationAction(ISD::MSCATTER,            VT, Custom);
1956     }
1957     if (HasBWI) {
1958       for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1959         setOperationAction(ISD::MLOAD,        VT, Legal);
1960         setOperationAction(ISD::MSTORE,       VT, Legal);
1961       }
1962     } else {
1963       setOperationAction(ISD::STORE, MVT::v32i16, Custom);
1964       setOperationAction(ISD::STORE, MVT::v64i8,  Custom);
1965     }
1966 
1967     if (Subtarget.hasVBMI2()) {
1968       for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) {
1969         setOperationAction(ISD::FSHL, VT, Custom);
1970         setOperationAction(ISD::FSHR, VT, Custom);
1971       }
1972 
1973       setOperationAction(ISD::ROTL, MVT::v32i16, Custom);
1974       setOperationAction(ISD::ROTR, MVT::v32i16, Custom);
1975     }
1976   }// useAVX512Regs
1977 
1978   if (!Subtarget.useSoftFloat() && Subtarget.hasVBMI2()) {
1979     for (auto VT : {MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v16i16, MVT::v8i32,
1980                     MVT::v4i64}) {
1981       setOperationAction(ISD::FSHL, VT, Custom);
1982       setOperationAction(ISD::FSHR, VT, Custom);
1983     }
1984   }
1985 
1986   // This block controls legalization for operations that don't have
1987   // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1988   // narrower widths.
1989   if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1990     // These operations are handled on non-VLX by artificially widening in
1991     // isel patterns.
1992 
1993     setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v8i32, Custom);
1994     setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v4i32, Custom);
1995     setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v2i32, Custom);
1996 
1997     if (Subtarget.hasDQI()) {
1998       // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1999       // v2f32 UINT_TO_FP is already custom under SSE2.
2000       assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
2001              isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) &&
2002              "Unexpected operation action!");
2003       // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
2004       setOperationAction(ISD::FP_TO_SINT,        MVT::v2f32, Custom);
2005       setOperationAction(ISD::FP_TO_UINT,        MVT::v2f32, Custom);
2006       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom);
2007       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom);
2008     }
2009 
2010     for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
2011       setOperationAction(ISD::SMAX, VT, Legal);
2012       setOperationAction(ISD::UMAX, VT, Legal);
2013       setOperationAction(ISD::SMIN, VT, Legal);
2014       setOperationAction(ISD::UMIN, VT, Legal);
2015       setOperationAction(ISD::ABS,  VT, Legal);
2016     }
2017 
2018     for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
2019       setOperationAction(ISD::ROTL,     VT, Custom);
2020       setOperationAction(ISD::ROTR,     VT, Custom);
2021     }
2022 
2023     // Custom legalize 2x32 to get a little better code.
2024     setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
2025     setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
2026 
2027     for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
2028                      MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
2029       setOperationAction(ISD::MSCATTER, VT, Custom);
2030 
2031     if (Subtarget.hasDQI()) {
2032       for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
2033                        ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
2034                        ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}) {
2035         setOperationAction(Opc, MVT::v2i64, Custom);
2036         setOperationAction(Opc, MVT::v4i64, Custom);
2037       }
2038       setOperationAction(ISD::MUL, MVT::v2i64, Legal);
2039       setOperationAction(ISD::MUL, MVT::v4i64, Legal);
2040     }
2041 
2042     if (Subtarget.hasCDI()) {
2043       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
2044         setOperationAction(ISD::CTLZ,            VT, Legal);
2045       }
2046     } // Subtarget.hasCDI()
2047 
2048     if (Subtarget.hasVPOPCNTDQ()) {
2049       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
2050         setOperationAction(ISD::CTPOP, VT, Legal);
2051     }
2052     setOperationAction(ISD::FNEG, MVT::v32f16, Custom);
2053     setOperationAction(ISD::FABS, MVT::v32f16, Custom);
2054     setOperationAction(ISD::FCOPYSIGN, MVT::v32f16, Custom);
2055   }
2056 
2057   // This block control legalization of v32i1/v64i1 which are available with
2058   // AVX512BW..
2059   if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
2060     addRegisterClass(MVT::v32i1,  &X86::VK32RegClass);
2061     addRegisterClass(MVT::v64i1,  &X86::VK64RegClass);
2062 
2063     for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
2064       setOperationAction(ISD::VSELECT,            VT, Expand);
2065       setOperationAction(ISD::TRUNCATE,           VT, Custom);
2066       setOperationAction(ISD::SETCC,              VT, Custom);
2067       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2068       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
2069       setOperationAction(ISD::SELECT,             VT, Custom);
2070       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
2071       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
2072       setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
2073       setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Custom);
2074     }
2075 
2076     for (auto VT : { MVT::v16i1, MVT::v32i1 })
2077       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
2078 
2079     // Extends from v32i1 masks to 256-bit vectors.
2080     setOperationAction(ISD::SIGN_EXTEND,        MVT::v32i8, Custom);
2081     setOperationAction(ISD::ZERO_EXTEND,        MVT::v32i8, Custom);
2082     setOperationAction(ISD::ANY_EXTEND,         MVT::v32i8, Custom);
2083 
2084     for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
2085       setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
2086       setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
2087     }
2088 
2089     // These operations are handled on non-VLX by artificially widening in
2090     // isel patterns.
2091     // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
2092 
2093     if (Subtarget.hasBITALG()) {
2094       for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
2095         setOperationAction(ISD::CTPOP, VT, Legal);
2096     }
2097   }
2098 
2099   if (!Subtarget.useSoftFloat() && Subtarget.hasFP16()) {
2100     auto setGroup = [&] (MVT VT) {
2101       setOperationAction(ISD::FADD,               VT, Legal);
2102       setOperationAction(ISD::STRICT_FADD,        VT, Legal);
2103       setOperationAction(ISD::FSUB,               VT, Legal);
2104       setOperationAction(ISD::STRICT_FSUB,        VT, Legal);
2105       setOperationAction(ISD::FMUL,               VT, Legal);
2106       setOperationAction(ISD::STRICT_FMUL,        VT, Legal);
2107       setOperationAction(ISD::FDIV,               VT, Legal);
2108       setOperationAction(ISD::STRICT_FDIV,        VT, Legal);
2109       setOperationAction(ISD::FSQRT,              VT, Legal);
2110       setOperationAction(ISD::STRICT_FSQRT,       VT, Legal);
2111 
2112       setOperationAction(ISD::FFLOOR,             VT, Legal);
2113       setOperationAction(ISD::STRICT_FFLOOR,      VT, Legal);
2114       setOperationAction(ISD::FCEIL,              VT, Legal);
2115       setOperationAction(ISD::STRICT_FCEIL,       VT, Legal);
2116       setOperationAction(ISD::FTRUNC,             VT, Legal);
2117       setOperationAction(ISD::STRICT_FTRUNC,      VT, Legal);
2118       setOperationAction(ISD::FRINT,              VT, Legal);
2119       setOperationAction(ISD::STRICT_FRINT,       VT, Legal);
2120       setOperationAction(ISD::FNEARBYINT,         VT, Legal);
2121       setOperationAction(ISD::STRICT_FNEARBYINT,  VT, Legal);
2122       setOperationAction(ISD::FROUNDEVEN, VT, Legal);
2123       setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
2124 
2125       setOperationAction(ISD::FROUND,             VT, Custom);
2126 
2127       setOperationAction(ISD::LOAD,               VT, Legal);
2128       setOperationAction(ISD::STORE,              VT, Legal);
2129 
2130       setOperationAction(ISD::FMA,                VT, Legal);
2131       setOperationAction(ISD::STRICT_FMA,         VT, Legal);
2132       setOperationAction(ISD::VSELECT,            VT, Legal);
2133       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
2134       setOperationAction(ISD::SELECT,             VT, Custom);
2135 
2136       setOperationAction(ISD::FNEG,               VT, Custom);
2137       setOperationAction(ISD::FABS,               VT, Custom);
2138       setOperationAction(ISD::FCOPYSIGN,          VT, Custom);
2139       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2140       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
2141 
2142       setOperationAction(ISD::SETCC,              VT, Custom);
2143       setOperationAction(ISD::STRICT_FSETCC,      VT, Custom);
2144       setOperationAction(ISD::STRICT_FSETCCS,     VT, Custom);
2145     };
2146 
2147     // AVX512_FP16 scalar operations
2148     setGroup(MVT::f16);
2149     setOperationAction(ISD::FREM,                 MVT::f16, Promote);
2150     setOperationAction(ISD::STRICT_FREM,          MVT::f16, Promote);
2151     setOperationAction(ISD::SELECT_CC,            MVT::f16, Expand);
2152     setOperationAction(ISD::BR_CC,                MVT::f16, Expand);
2153     setOperationAction(ISD::STRICT_FROUND,        MVT::f16, Promote);
2154     setOperationAction(ISD::FROUNDEVEN,           MVT::f16, Legal);
2155     setOperationAction(ISD::STRICT_FROUNDEVEN,    MVT::f16, Legal);
2156     setOperationAction(ISD::FP_ROUND,             MVT::f16, Custom);
2157     setOperationAction(ISD::STRICT_FP_ROUND,      MVT::f16, Custom);
2158     setOperationAction(ISD::FMAXIMUM,             MVT::f16, Custom);
2159     setOperationAction(ISD::FMINIMUM,             MVT::f16, Custom);
2160     setOperationAction(ISD::FP_EXTEND,            MVT::f32, Legal);
2161     setOperationAction(ISD::STRICT_FP_EXTEND,     MVT::f32, Legal);
2162 
2163     setCondCodeAction(ISD::SETOEQ, MVT::f16, Expand);
2164     setCondCodeAction(ISD::SETUNE, MVT::f16, Expand);
2165 
2166     if (Subtarget.useAVX512Regs()) {
2167       setGroup(MVT::v32f16);
2168       setOperationAction(ISD::SCALAR_TO_VECTOR,       MVT::v32f16, Custom);
2169       setOperationAction(ISD::SINT_TO_FP,             MVT::v32i16, Legal);
2170       setOperationAction(ISD::STRICT_SINT_TO_FP,      MVT::v32i16, Legal);
2171       setOperationAction(ISD::UINT_TO_FP,             MVT::v32i16, Legal);
2172       setOperationAction(ISD::STRICT_UINT_TO_FP,      MVT::v32i16, Legal);
2173       setOperationAction(ISD::FP_ROUND,               MVT::v16f16, Legal);
2174       setOperationAction(ISD::STRICT_FP_ROUND,        MVT::v16f16, Legal);
2175       setOperationAction(ISD::FP_EXTEND,              MVT::v16f32, Custom);
2176       setOperationAction(ISD::STRICT_FP_EXTEND,       MVT::v16f32, Legal);
2177       setOperationAction(ISD::FP_EXTEND,              MVT::v8f64,  Custom);
2178       setOperationAction(ISD::STRICT_FP_EXTEND,       MVT::v8f64,  Legal);
2179       setOperationAction(ISD::INSERT_VECTOR_ELT,      MVT::v32f16, Custom);
2180 
2181       setOperationAction(ISD::FP_TO_SINT,             MVT::v32i16, Custom);
2182       setOperationAction(ISD::STRICT_FP_TO_SINT,      MVT::v32i16, Custom);
2183       setOperationAction(ISD::FP_TO_UINT,             MVT::v32i16, Custom);
2184       setOperationAction(ISD::STRICT_FP_TO_UINT,      MVT::v32i16, Custom);
2185       setOperationPromotedToType(ISD::FP_TO_SINT,     MVT::v32i8,  MVT::v32i16);
2186       setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i8,
2187                                  MVT::v32i16);
2188       setOperationPromotedToType(ISD::FP_TO_UINT,     MVT::v32i8,  MVT::v32i16);
2189       setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i8,
2190                                  MVT::v32i16);
2191       setOperationPromotedToType(ISD::FP_TO_SINT,     MVT::v32i1,  MVT::v32i16);
2192       setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i1,
2193                                  MVT::v32i16);
2194       setOperationPromotedToType(ISD::FP_TO_UINT,     MVT::v32i1,  MVT::v32i16);
2195       setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i1,
2196                                  MVT::v32i16);
2197 
2198       setOperationAction(ISD::EXTRACT_SUBVECTOR,      MVT::v16f16, Legal);
2199       setOperationAction(ISD::INSERT_SUBVECTOR,       MVT::v32f16, Legal);
2200       setOperationAction(ISD::CONCAT_VECTORS,         MVT::v32f16, Custom);
2201 
2202       setLoadExtAction(ISD::EXTLOAD, MVT::v8f64,  MVT::v8f16,  Legal);
2203       setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Legal);
2204     }
2205 
2206     if (Subtarget.hasVLX()) {
2207       setGroup(MVT::v8f16);
2208       setGroup(MVT::v16f16);
2209 
2210       setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v8f16,  Legal);
2211       setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v16f16, Custom);
2212       setOperationAction(ISD::SINT_TO_FP,         MVT::v16i16, Legal);
2213       setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v16i16, Legal);
2214       setOperationAction(ISD::SINT_TO_FP,         MVT::v8i16,  Legal);
2215       setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v8i16,  Legal);
2216       setOperationAction(ISD::UINT_TO_FP,         MVT::v16i16, Legal);
2217       setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v16i16, Legal);
2218       setOperationAction(ISD::UINT_TO_FP,         MVT::v8i16,  Legal);
2219       setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v8i16,  Legal);
2220 
2221       setOperationAction(ISD::FP_TO_SINT,         MVT::v8i16, Custom);
2222       setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v8i16, Custom);
2223       setOperationAction(ISD::FP_TO_UINT,         MVT::v8i16, Custom);
2224       setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v8i16, Custom);
2225       setOperationAction(ISD::FP_ROUND,           MVT::v8f16, Legal);
2226       setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v8f16, Legal);
2227       setOperationAction(ISD::FP_EXTEND,          MVT::v8f32, Custom);
2228       setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v8f32, Legal);
2229       setOperationAction(ISD::FP_EXTEND,          MVT::v4f64, Custom);
2230       setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v4f64, Legal);
2231 
2232       // INSERT_VECTOR_ELT v8f16 extended to VECTOR_SHUFFLE
2233       setOperationAction(ISD::INSERT_VECTOR_ELT,    MVT::v8f16,  Custom);
2234       setOperationAction(ISD::INSERT_VECTOR_ELT,    MVT::v16f16, Custom);
2235 
2236       setOperationAction(ISD::EXTRACT_SUBVECTOR,    MVT::v8f16, Legal);
2237       setOperationAction(ISD::INSERT_SUBVECTOR,     MVT::v16f16, Legal);
2238       setOperationAction(ISD::CONCAT_VECTORS,       MVT::v16f16, Custom);
2239 
2240       setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Legal);
2241       setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Legal);
2242       setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Legal);
2243       setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Legal);
2244 
2245       // Need to custom widen these to prevent scalarization.
2246       setOperationAction(ISD::LOAD,  MVT::v4f16, Custom);
2247       setOperationAction(ISD::STORE, MVT::v4f16, Custom);
2248     }
2249   }
2250 
2251   if (!Subtarget.useSoftFloat() &&
2252       (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16())) {
2253     addRegisterClass(MVT::v8bf16, Subtarget.hasAVX512() ? &X86::VR128XRegClass
2254                                                         : &X86::VR128RegClass);
2255     addRegisterClass(MVT::v16bf16, Subtarget.hasAVX512() ? &X86::VR256XRegClass
2256                                                          : &X86::VR256RegClass);
2257     // We set the type action of bf16 to TypeSoftPromoteHalf, but we don't
2258     // provide the method to promote BUILD_VECTOR and INSERT_VECTOR_ELT.
2259     // Set the operation action Custom to do the customization later.
2260     setOperationAction(ISD::BUILD_VECTOR, MVT::bf16, Custom);
2261     setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::bf16, Custom);
2262     for (auto VT : {MVT::v8bf16, MVT::v16bf16}) {
2263       setF16Action(VT, Expand);
2264       setOperationAction(ISD::FADD, VT, Expand);
2265       setOperationAction(ISD::FSUB, VT, Expand);
2266       setOperationAction(ISD::FMUL, VT, Expand);
2267       setOperationAction(ISD::FDIV, VT, Expand);
2268       setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
2269       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
2270       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
2271       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
2272     }
2273     setOperationAction(ISD::FP_ROUND, MVT::v8bf16, Custom);
2274     addLegalFPImmediate(APFloat::getZero(APFloat::BFloat()));
2275   }
2276 
2277   if (!Subtarget.useSoftFloat() && Subtarget.hasBF16()) {
2278     addRegisterClass(MVT::v32bf16, &X86::VR512RegClass);
2279     setF16Action(MVT::v32bf16, Expand);
2280     setOperationAction(ISD::FADD, MVT::v32bf16, Expand);
2281     setOperationAction(ISD::FSUB, MVT::v32bf16, Expand);
2282     setOperationAction(ISD::FMUL, MVT::v32bf16, Expand);
2283     setOperationAction(ISD::FDIV, MVT::v32bf16, Expand);
2284     setOperationAction(ISD::BUILD_VECTOR, MVT::v32bf16, Custom);
2285     setOperationAction(ISD::FP_ROUND, MVT::v16bf16, Custom);
2286     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32bf16, Custom);
2287     setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32bf16, Legal);
2288     setOperationAction(ISD::CONCAT_VECTORS, MVT::v32bf16, Custom);
2289   }
2290 
2291   if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
2292     setTruncStoreAction(MVT::v4i64, MVT::v4i8,  Legal);
2293     setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
2294     setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
2295     setTruncStoreAction(MVT::v8i32, MVT::v8i8,  Legal);
2296     setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
2297 
2298     setTruncStoreAction(MVT::v2i64, MVT::v2i8,  Legal);
2299     setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
2300     setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
2301     setTruncStoreAction(MVT::v4i32, MVT::v4i8,  Legal);
2302     setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
2303 
2304     if (Subtarget.hasBWI()) {
2305       setTruncStoreAction(MVT::v16i16,  MVT::v16i8, Legal);
2306       setTruncStoreAction(MVT::v8i16,   MVT::v8i8,  Legal);
2307     }
2308 
2309     if (Subtarget.hasFP16()) {
2310       // vcvttph2[u]dq v4f16 -> v4i32/64, v2f16 -> v2i32/64
2311       setOperationAction(ISD::FP_TO_SINT,        MVT::v2f16, Custom);
2312       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f16, Custom);
2313       setOperationAction(ISD::FP_TO_UINT,        MVT::v2f16, Custom);
2314       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f16, Custom);
2315       setOperationAction(ISD::FP_TO_SINT,        MVT::v4f16, Custom);
2316       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f16, Custom);
2317       setOperationAction(ISD::FP_TO_UINT,        MVT::v4f16, Custom);
2318       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f16, Custom);
2319       // vcvt[u]dq2ph v4i32/64 -> v4f16, v2i32/64 -> v2f16
2320       setOperationAction(ISD::SINT_TO_FP,        MVT::v2f16, Custom);
2321       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f16, Custom);
2322       setOperationAction(ISD::UINT_TO_FP,        MVT::v2f16, Custom);
2323       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f16, Custom);
2324       setOperationAction(ISD::SINT_TO_FP,        MVT::v4f16, Custom);
2325       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f16, Custom);
2326       setOperationAction(ISD::UINT_TO_FP,        MVT::v4f16, Custom);
2327       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f16, Custom);
2328       // vcvtps2phx v4f32 -> v4f16, v2f32 -> v2f16
2329       setOperationAction(ISD::FP_ROUND,          MVT::v2f16, Custom);
2330       setOperationAction(ISD::STRICT_FP_ROUND,   MVT::v2f16, Custom);
2331       setOperationAction(ISD::FP_ROUND,          MVT::v4f16, Custom);
2332       setOperationAction(ISD::STRICT_FP_ROUND,   MVT::v4f16, Custom);
2333       // vcvtph2psx v4f16 -> v4f32, v2f16 -> v2f32
2334       setOperationAction(ISD::FP_EXTEND,         MVT::v2f16, Custom);
2335       setOperationAction(ISD::STRICT_FP_EXTEND,  MVT::v2f16, Custom);
2336       setOperationAction(ISD::FP_EXTEND,         MVT::v4f16, Custom);
2337       setOperationAction(ISD::STRICT_FP_EXTEND,  MVT::v4f16, Custom);
2338     }
2339   }
2340 
2341   if (!Subtarget.useSoftFloat() && Subtarget.hasAMXTILE()) {
2342     addRegisterClass(MVT::x86amx, &X86::TILERegClass);
2343   }
2344 
2345   // We want to custom lower some of our intrinsics.
2346   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
2347   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
2348   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
2349   if (!Subtarget.is64Bit()) {
2350     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
2351   }
2352 
2353   // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
2354   // handle type legalization for these operations here.
2355   //
2356   // FIXME: We really should do custom legalization for addition and
2357   // subtraction on x86-32 once PR3203 is fixed.  We really can't do much better
2358   // than generic legalization for 64-bit multiplication-with-overflow, though.
2359   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
2360     if (VT == MVT::i64 && !Subtarget.is64Bit())
2361       continue;
2362     // Add/Sub/Mul with overflow operations are custom lowered.
2363     setOperationAction(ISD::SADDO, VT, Custom);
2364     setOperationAction(ISD::UADDO, VT, Custom);
2365     setOperationAction(ISD::SSUBO, VT, Custom);
2366     setOperationAction(ISD::USUBO, VT, Custom);
2367     setOperationAction(ISD::SMULO, VT, Custom);
2368     setOperationAction(ISD::UMULO, VT, Custom);
2369 
2370     // Support carry in as value rather than glue.
2371     setOperationAction(ISD::UADDO_CARRY, VT, Custom);
2372     setOperationAction(ISD::USUBO_CARRY, VT, Custom);
2373     setOperationAction(ISD::SETCCCARRY, VT, Custom);
2374     setOperationAction(ISD::SADDO_CARRY, VT, Custom);
2375     setOperationAction(ISD::SSUBO_CARRY, VT, Custom);
2376   }
2377 
2378   if (!Subtarget.is64Bit()) {
2379     // These libcalls are not available in 32-bit.
2380     setLibcallName(RTLIB::SHL_I128, nullptr);
2381     setLibcallName(RTLIB::SRL_I128, nullptr);
2382     setLibcallName(RTLIB::SRA_I128, nullptr);
2383     setLibcallName(RTLIB::MUL_I128, nullptr);
2384     // The MULO libcall is not part of libgcc, only compiler-rt.
2385     setLibcallName(RTLIB::MULO_I64, nullptr);
2386   }
2387   // The MULO libcall is not part of libgcc, only compiler-rt.
2388   setLibcallName(RTLIB::MULO_I128, nullptr);
2389 
2390   // Combine sin / cos into _sincos_stret if it is available.
2391   if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
2392       getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
2393     setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
2394     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
2395   }
2396 
2397   if (Subtarget.isTargetWin64()) {
2398     setOperationAction(ISD::SDIV, MVT::i128, Custom);
2399     setOperationAction(ISD::UDIV, MVT::i128, Custom);
2400     setOperationAction(ISD::SREM, MVT::i128, Custom);
2401     setOperationAction(ISD::UREM, MVT::i128, Custom);
2402     setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
2403     setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
2404     setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
2405     setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
2406     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom);
2407     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom);
2408     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom);
2409     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
2410   }
2411 
2412   // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
2413   // is. We should promote the value to 64-bits to solve this.
2414   // This is what the CRT headers do - `fmodf` is an inline header
2415   // function casting to f64 and calling `fmod`.
2416   if (Subtarget.is32Bit() &&
2417       (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
2418     for (ISD::NodeType Op :
2419          {ISD::FCEIL,  ISD::STRICT_FCEIL,
2420           ISD::FCOS,   ISD::STRICT_FCOS,
2421           ISD::FEXP,   ISD::STRICT_FEXP,
2422           ISD::FFLOOR, ISD::STRICT_FFLOOR,
2423           ISD::FREM,   ISD::STRICT_FREM,
2424           ISD::FLOG,   ISD::STRICT_FLOG,
2425           ISD::FLOG10, ISD::STRICT_FLOG10,
2426           ISD::FPOW,   ISD::STRICT_FPOW,
2427           ISD::FSIN,   ISD::STRICT_FSIN})
2428       if (isOperationExpand(Op, MVT::f32))
2429         setOperationAction(Op, MVT::f32, Promote);
2430 
2431   // We have target-specific dag combine patterns for the following nodes:
2432   setTargetDAGCombine({ISD::VECTOR_SHUFFLE,
2433                        ISD::SCALAR_TO_VECTOR,
2434                        ISD::INSERT_VECTOR_ELT,
2435                        ISD::EXTRACT_VECTOR_ELT,
2436                        ISD::CONCAT_VECTORS,
2437                        ISD::INSERT_SUBVECTOR,
2438                        ISD::EXTRACT_SUBVECTOR,
2439                        ISD::BITCAST,
2440                        ISD::VSELECT,
2441                        ISD::SELECT,
2442                        ISD::SHL,
2443                        ISD::SRA,
2444                        ISD::SRL,
2445                        ISD::OR,
2446                        ISD::AND,
2447                        ISD::ADD,
2448                        ISD::FADD,
2449                        ISD::FSUB,
2450                        ISD::FNEG,
2451                        ISD::FMA,
2452                        ISD::STRICT_FMA,
2453                        ISD::FMINNUM,
2454                        ISD::FMAXNUM,
2455                        ISD::SUB,
2456                        ISD::LOAD,
2457                        ISD::MLOAD,
2458                        ISD::STORE,
2459                        ISD::MSTORE,
2460                        ISD::TRUNCATE,
2461                        ISD::ZERO_EXTEND,
2462                        ISD::ANY_EXTEND,
2463                        ISD::SIGN_EXTEND,
2464                        ISD::SIGN_EXTEND_INREG,
2465                        ISD::ANY_EXTEND_VECTOR_INREG,
2466                        ISD::SIGN_EXTEND_VECTOR_INREG,
2467                        ISD::ZERO_EXTEND_VECTOR_INREG,
2468                        ISD::SINT_TO_FP,
2469                        ISD::UINT_TO_FP,
2470                        ISD::STRICT_SINT_TO_FP,
2471                        ISD::STRICT_UINT_TO_FP,
2472                        ISD::SETCC,
2473                        ISD::MUL,
2474                        ISD::XOR,
2475                        ISD::MSCATTER,
2476                        ISD::MGATHER,
2477                        ISD::FP16_TO_FP,
2478                        ISD::FP_EXTEND,
2479                        ISD::STRICT_FP_EXTEND,
2480                        ISD::FP_ROUND,
2481                        ISD::STRICT_FP_ROUND});
2482 
2483   computeRegisterProperties(Subtarget.getRegisterInfo());
2484 
2485   MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
2486   MaxStoresPerMemsetOptSize = 8;
2487   MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
2488   MaxStoresPerMemcpyOptSize = 4;
2489   MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
2490   MaxStoresPerMemmoveOptSize = 4;
2491 
2492   // TODO: These control memcmp expansion in CGP and could be raised higher, but
2493   // that needs to benchmarked and balanced with the potential use of vector
2494   // load/store types (PR33329, PR33914).
2495   MaxLoadsPerMemcmp = 2;
2496   MaxLoadsPerMemcmpOptSize = 2;
2497 
2498   // Default loop alignment, which can be overridden by -align-loops.
2499   setPrefLoopAlignment(Align(16));
2500 
2501   // An out-of-order CPU can speculatively execute past a predictable branch,
2502   // but a conditional move could be stalled by an expensive earlier operation.
2503   PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
2504   EnableExtLdPromotion = true;
2505   setPrefFunctionAlignment(Align(16));
2506 
2507   verifyIntrinsicTables();
2508 
2509   // Default to having -disable-strictnode-mutation on
2510   IsStrictFPEnabled = true;
2511 }
2512 
2513 // This has so far only been implemented for 64-bit MachO.
2514 bool X86TargetLowering::useLoadStackGuardNode() const {
2515   return Subtarget.isTargetMachO() && Subtarget.is64Bit();
2516 }
2517 
2518 bool X86TargetLowering::useStackGuardXorFP() const {
2519   // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
2520   return Subtarget.getTargetTriple().isOSMSVCRT() && !Subtarget.isTargetMachO();
2521 }
2522 
2523 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
2524                                                const SDLoc &DL) const {
2525   EVT PtrTy = getPointerTy(DAG.getDataLayout());
2526   unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2527   MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
2528   return SDValue(Node, 0);
2529 }
2530 
2531 TargetLoweringBase::LegalizeTypeAction
2532 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
2533   if ((VT == MVT::v32i1 || VT == MVT::v64i1) && Subtarget.hasAVX512() &&
2534       !Subtarget.hasBWI())
2535     return TypeSplitVector;
2536 
2537   if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2538       !Subtarget.hasF16C() && VT.getVectorElementType() == MVT::f16)
2539     return TypeSplitVector;
2540 
2541   if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2542       VT.getVectorElementType() != MVT::i1)
2543     return TypeWidenVector;
2544 
2545   return TargetLoweringBase::getPreferredVectorAction(VT);
2546 }
2547 
2548 FastISel *
2549 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
2550                                   const TargetLibraryInfo *libInfo) const {
2551   return X86::createFastISel(funcInfo, libInfo);
2552 }
2553 
2554 //===----------------------------------------------------------------------===//
2555 //                           Other Lowering Hooks
2556 //===----------------------------------------------------------------------===//
2557 
2558 bool X86::mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget,
2559                       bool AssumeSingleUse) {
2560   if (!AssumeSingleUse && !Op.hasOneUse())
2561     return false;
2562   if (!ISD::isNormalLoad(Op.getNode()))
2563     return false;
2564 
2565   // If this is an unaligned vector, make sure the target supports folding it.
2566   auto *Ld = cast<LoadSDNode>(Op.getNode());
2567   if (!Subtarget.hasAVX() && !Subtarget.hasSSEUnalignedMem() &&
2568       Ld->getValueSizeInBits(0) == 128 && Ld->getAlign() < Align(16))
2569     return false;
2570 
2571   // TODO: If this is a non-temporal load and the target has an instruction
2572   //       for it, it should not be folded. See "useNonTemporalLoad()".
2573 
2574   return true;
2575 }
2576 
2577 bool X86::mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT,
2578                                           const X86Subtarget &Subtarget,
2579                                           bool AssumeSingleUse) {
2580   assert(Subtarget.hasAVX() && "Expected AVX for broadcast from memory");
2581   if (!X86::mayFoldLoad(Op, Subtarget, AssumeSingleUse))
2582     return false;
2583 
2584   // We can not replace a wide volatile load with a broadcast-from-memory,
2585   // because that would narrow the load, which isn't legal for volatiles.
2586   auto *Ld = cast<LoadSDNode>(Op.getNode());
2587   return !Ld->isVolatile() ||
2588          Ld->getValueSizeInBits(0) == EltVT.getScalarSizeInBits();
2589 }
2590 
2591 bool X86::mayFoldIntoStore(SDValue Op) {
2592   return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
2593 }
2594 
2595 bool X86::mayFoldIntoZeroExtend(SDValue Op) {
2596   if (Op.hasOneUse()) {
2597     unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
2598     return (ISD::ZERO_EXTEND == Opcode);
2599   }
2600   return false;
2601 }
2602 
2603 static bool isTargetShuffle(unsigned Opcode) {
2604   switch(Opcode) {
2605   default: return false;
2606   case X86ISD::BLENDI:
2607   case X86ISD::PSHUFB:
2608   case X86ISD::PSHUFD:
2609   case X86ISD::PSHUFHW:
2610   case X86ISD::PSHUFLW:
2611   case X86ISD::SHUFP:
2612   case X86ISD::INSERTPS:
2613   case X86ISD::EXTRQI:
2614   case X86ISD::INSERTQI:
2615   case X86ISD::VALIGN:
2616   case X86ISD::PALIGNR:
2617   case X86ISD::VSHLDQ:
2618   case X86ISD::VSRLDQ:
2619   case X86ISD::MOVLHPS:
2620   case X86ISD::MOVHLPS:
2621   case X86ISD::MOVSHDUP:
2622   case X86ISD::MOVSLDUP:
2623   case X86ISD::MOVDDUP:
2624   case X86ISD::MOVSS:
2625   case X86ISD::MOVSD:
2626   case X86ISD::MOVSH:
2627   case X86ISD::UNPCKL:
2628   case X86ISD::UNPCKH:
2629   case X86ISD::VBROADCAST:
2630   case X86ISD::VPERMILPI:
2631   case X86ISD::VPERMILPV:
2632   case X86ISD::VPERM2X128:
2633   case X86ISD::SHUF128:
2634   case X86ISD::VPERMIL2:
2635   case X86ISD::VPERMI:
2636   case X86ISD::VPPERM:
2637   case X86ISD::VPERMV:
2638   case X86ISD::VPERMV3:
2639   case X86ISD::VZEXT_MOVL:
2640     return true;
2641   }
2642 }
2643 
2644 static bool isTargetShuffleVariableMask(unsigned Opcode) {
2645   switch (Opcode) {
2646   default: return false;
2647   // Target Shuffles.
2648   case X86ISD::PSHUFB:
2649   case X86ISD::VPERMILPV:
2650   case X86ISD::VPERMIL2:
2651   case X86ISD::VPPERM:
2652   case X86ISD::VPERMV:
2653   case X86ISD::VPERMV3:
2654     return true;
2655   // 'Faux' Target Shuffles.
2656   case ISD::OR:
2657   case ISD::AND:
2658   case X86ISD::ANDNP:
2659     return true;
2660   }
2661 }
2662 
2663 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
2664   MachineFunction &MF = DAG.getMachineFunction();
2665   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
2666   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2667   int ReturnAddrIndex = FuncInfo->getRAIndex();
2668 
2669   if (ReturnAddrIndex == 0) {
2670     // Set up a frame object for the return address.
2671     unsigned SlotSize = RegInfo->getSlotSize();
2672     ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
2673                                                           -(int64_t)SlotSize,
2674                                                           false);
2675     FuncInfo->setRAIndex(ReturnAddrIndex);
2676   }
2677 
2678   return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
2679 }
2680 
2681 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model CM,
2682                                        bool HasSymbolicDisplacement) {
2683   // Offset should fit into 32 bit immediate field.
2684   if (!isInt<32>(Offset))
2685     return false;
2686 
2687   // If we don't have a symbolic displacement - we don't have any extra
2688   // restrictions.
2689   if (!HasSymbolicDisplacement)
2690     return true;
2691 
2692   // We can fold large offsets in the large code model because we always use
2693   // 64-bit offsets.
2694   if (CM == CodeModel::Large)
2695     return true;
2696 
2697   // For kernel code model we know that all object resist in the negative half
2698   // of 32bits address space. We may not accept negative offsets, since they may
2699   // be just off and we may accept pretty large positive ones.
2700   if (CM == CodeModel::Kernel)
2701     return Offset >= 0;
2702 
2703   // For other non-large code models we assume that latest small object is 16MB
2704   // before end of 31 bits boundary. We may also accept pretty large negative
2705   // constants knowing that all objects are in the positive half of address
2706   // space.
2707   return Offset < 16 * 1024 * 1024;
2708 }
2709 
2710 /// Return true if the condition is an signed comparison operation.
2711 static bool isX86CCSigned(unsigned X86CC) {
2712   switch (X86CC) {
2713   default:
2714     llvm_unreachable("Invalid integer condition!");
2715   case X86::COND_E:
2716   case X86::COND_NE:
2717   case X86::COND_B:
2718   case X86::COND_A:
2719   case X86::COND_BE:
2720   case X86::COND_AE:
2721     return false;
2722   case X86::COND_G:
2723   case X86::COND_GE:
2724   case X86::COND_L:
2725   case X86::COND_LE:
2726     return true;
2727   }
2728 }
2729 
2730 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
2731   switch (SetCCOpcode) {
2732   default: llvm_unreachable("Invalid integer condition!");
2733   case ISD::SETEQ:  return X86::COND_E;
2734   case ISD::SETGT:  return X86::COND_G;
2735   case ISD::SETGE:  return X86::COND_GE;
2736   case ISD::SETLT:  return X86::COND_L;
2737   case ISD::SETLE:  return X86::COND_LE;
2738   case ISD::SETNE:  return X86::COND_NE;
2739   case ISD::SETULT: return X86::COND_B;
2740   case ISD::SETUGT: return X86::COND_A;
2741   case ISD::SETULE: return X86::COND_BE;
2742   case ISD::SETUGE: return X86::COND_AE;
2743   }
2744 }
2745 
2746 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
2747 /// condition code, returning the condition code and the LHS/RHS of the
2748 /// comparison to make.
2749 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
2750                                     bool isFP, SDValue &LHS, SDValue &RHS,
2751                                     SelectionDAG &DAG) {
2752   if (!isFP) {
2753     if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
2754       if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnes()) {
2755         // X > -1   -> X == 0, jump !sign.
2756         RHS = DAG.getConstant(0, DL, RHS.getValueType());
2757         return X86::COND_NS;
2758       }
2759       if (SetCCOpcode == ISD::SETLT && RHSC->isZero()) {
2760         // X < 0   -> X == 0, jump on sign.
2761         return X86::COND_S;
2762       }
2763       if (SetCCOpcode == ISD::SETGE && RHSC->isZero()) {
2764         // X >= 0   -> X == 0, jump on !sign.
2765         return X86::COND_NS;
2766       }
2767       if (SetCCOpcode == ISD::SETLT && RHSC->isOne()) {
2768         // X < 1   -> X <= 0
2769         RHS = DAG.getConstant(0, DL, RHS.getValueType());
2770         return X86::COND_LE;
2771       }
2772     }
2773 
2774     return TranslateIntegerX86CC(SetCCOpcode);
2775   }
2776 
2777   // First determine if it is required or is profitable to flip the operands.
2778 
2779   // If LHS is a foldable load, but RHS is not, flip the condition.
2780   if (ISD::isNON_EXTLoad(LHS.getNode()) &&
2781       !ISD::isNON_EXTLoad(RHS.getNode())) {
2782     SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
2783     std::swap(LHS, RHS);
2784   }
2785 
2786   switch (SetCCOpcode) {
2787   default: break;
2788   case ISD::SETOLT:
2789   case ISD::SETOLE:
2790   case ISD::SETUGT:
2791   case ISD::SETUGE:
2792     std::swap(LHS, RHS);
2793     break;
2794   }
2795 
2796   // On a floating point condition, the flags are set as follows:
2797   // ZF  PF  CF   op
2798   //  0 | 0 | 0 | X > Y
2799   //  0 | 0 | 1 | X < Y
2800   //  1 | 0 | 0 | X == Y
2801   //  1 | 1 | 1 | unordered
2802   switch (SetCCOpcode) {
2803   default: llvm_unreachable("Condcode should be pre-legalized away");
2804   case ISD::SETUEQ:
2805   case ISD::SETEQ:   return X86::COND_E;
2806   case ISD::SETOLT:              // flipped
2807   case ISD::SETOGT:
2808   case ISD::SETGT:   return X86::COND_A;
2809   case ISD::SETOLE:              // flipped
2810   case ISD::SETOGE:
2811   case ISD::SETGE:   return X86::COND_AE;
2812   case ISD::SETUGT:              // flipped
2813   case ISD::SETULT:
2814   case ISD::SETLT:   return X86::COND_B;
2815   case ISD::SETUGE:              // flipped
2816   case ISD::SETULE:
2817   case ISD::SETLE:   return X86::COND_BE;
2818   case ISD::SETONE:
2819   case ISD::SETNE:   return X86::COND_NE;
2820   case ISD::SETUO:   return X86::COND_P;
2821   case ISD::SETO:    return X86::COND_NP;
2822   case ISD::SETOEQ:
2823   case ISD::SETUNE:  return X86::COND_INVALID;
2824   }
2825 }
2826 
2827 /// Is there a floating point cmov for the specific X86 condition code?
2828 /// Current x86 isa includes the following FP cmov instructions:
2829 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
2830 static bool hasFPCMov(unsigned X86CC) {
2831   switch (X86CC) {
2832   default:
2833     return false;
2834   case X86::COND_B:
2835   case X86::COND_BE:
2836   case X86::COND_E:
2837   case X86::COND_P:
2838   case X86::COND_A:
2839   case X86::COND_AE:
2840   case X86::COND_NE:
2841   case X86::COND_NP:
2842     return true;
2843   }
2844 }
2845 
2846 static bool useVPTERNLOG(const X86Subtarget &Subtarget, MVT VT) {
2847   return Subtarget.hasVLX() || Subtarget.canExtendTo512DQ() ||
2848          VT.is512BitVector();
2849 }
2850 
2851 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
2852                                            const CallInst &I,
2853                                            MachineFunction &MF,
2854                                            unsigned Intrinsic) const {
2855   Info.flags = MachineMemOperand::MONone;
2856   Info.offset = 0;
2857 
2858   const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
2859   if (!IntrData) {
2860     switch (Intrinsic) {
2861     case Intrinsic::x86_aesenc128kl:
2862     case Intrinsic::x86_aesdec128kl:
2863       Info.opc = ISD::INTRINSIC_W_CHAIN;
2864       Info.ptrVal = I.getArgOperand(1);
2865       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
2866       Info.align = Align(1);
2867       Info.flags |= MachineMemOperand::MOLoad;
2868       return true;
2869     case Intrinsic::x86_aesenc256kl:
2870     case Intrinsic::x86_aesdec256kl:
2871       Info.opc = ISD::INTRINSIC_W_CHAIN;
2872       Info.ptrVal = I.getArgOperand(1);
2873       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
2874       Info.align = Align(1);
2875       Info.flags |= MachineMemOperand::MOLoad;
2876       return true;
2877     case Intrinsic::x86_aesencwide128kl:
2878     case Intrinsic::x86_aesdecwide128kl:
2879       Info.opc = ISD::INTRINSIC_W_CHAIN;
2880       Info.ptrVal = I.getArgOperand(0);
2881       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
2882       Info.align = Align(1);
2883       Info.flags |= MachineMemOperand::MOLoad;
2884       return true;
2885     case Intrinsic::x86_aesencwide256kl:
2886     case Intrinsic::x86_aesdecwide256kl:
2887       Info.opc = ISD::INTRINSIC_W_CHAIN;
2888       Info.ptrVal = I.getArgOperand(0);
2889       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
2890       Info.align = Align(1);
2891       Info.flags |= MachineMemOperand::MOLoad;
2892       return true;
2893     case Intrinsic::x86_cmpccxadd32:
2894     case Intrinsic::x86_cmpccxadd64:
2895     case Intrinsic::x86_atomic_bts:
2896     case Intrinsic::x86_atomic_btc:
2897     case Intrinsic::x86_atomic_btr: {
2898       Info.opc = ISD::INTRINSIC_W_CHAIN;
2899       Info.ptrVal = I.getArgOperand(0);
2900       unsigned Size = I.getType()->getScalarSizeInBits();
2901       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
2902       Info.align = Align(Size);
2903       Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
2904                     MachineMemOperand::MOVolatile;
2905       return true;
2906     }
2907     case Intrinsic::x86_atomic_bts_rm:
2908     case Intrinsic::x86_atomic_btc_rm:
2909     case Intrinsic::x86_atomic_btr_rm: {
2910       Info.opc = ISD::INTRINSIC_W_CHAIN;
2911       Info.ptrVal = I.getArgOperand(0);
2912       unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
2913       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
2914       Info.align = Align(Size);
2915       Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
2916                     MachineMemOperand::MOVolatile;
2917       return true;
2918     }
2919     case Intrinsic::x86_aadd32:
2920     case Intrinsic::x86_aadd64:
2921     case Intrinsic::x86_aand32:
2922     case Intrinsic::x86_aand64:
2923     case Intrinsic::x86_aor32:
2924     case Intrinsic::x86_aor64:
2925     case Intrinsic::x86_axor32:
2926     case Intrinsic::x86_axor64:
2927     case Intrinsic::x86_atomic_add_cc:
2928     case Intrinsic::x86_atomic_sub_cc:
2929     case Intrinsic::x86_atomic_or_cc:
2930     case Intrinsic::x86_atomic_and_cc:
2931     case Intrinsic::x86_atomic_xor_cc: {
2932       Info.opc = ISD::INTRINSIC_W_CHAIN;
2933       Info.ptrVal = I.getArgOperand(0);
2934       unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
2935       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
2936       Info.align = Align(Size);
2937       Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
2938                     MachineMemOperand::MOVolatile;
2939       return true;
2940     }
2941     }
2942     return false;
2943   }
2944 
2945   switch (IntrData->Type) {
2946   case TRUNCATE_TO_MEM_VI8:
2947   case TRUNCATE_TO_MEM_VI16:
2948   case TRUNCATE_TO_MEM_VI32: {
2949     Info.opc = ISD::INTRINSIC_VOID;
2950     Info.ptrVal = I.getArgOperand(0);
2951     MVT VT  = MVT::getVT(I.getArgOperand(1)->getType());
2952     MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
2953     if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
2954       ScalarVT = MVT::i8;
2955     else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
2956       ScalarVT = MVT::i16;
2957     else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
2958       ScalarVT = MVT::i32;
2959 
2960     Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
2961     Info.align = Align(1);
2962     Info.flags |= MachineMemOperand::MOStore;
2963     break;
2964   }
2965   case GATHER:
2966   case GATHER_AVX2: {
2967     Info.opc = ISD::INTRINSIC_W_CHAIN;
2968     Info.ptrVal = nullptr;
2969     MVT DataVT = MVT::getVT(I.getType());
2970     MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
2971     unsigned NumElts = std::min(DataVT.getVectorNumElements(),
2972                                 IndexVT.getVectorNumElements());
2973     Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
2974     Info.align = Align(1);
2975     Info.flags |= MachineMemOperand::MOLoad;
2976     break;
2977   }
2978   case SCATTER: {
2979     Info.opc = ISD::INTRINSIC_VOID;
2980     Info.ptrVal = nullptr;
2981     MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
2982     MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
2983     unsigned NumElts = std::min(DataVT.getVectorNumElements(),
2984                                 IndexVT.getVectorNumElements());
2985     Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
2986     Info.align = Align(1);
2987     Info.flags |= MachineMemOperand::MOStore;
2988     break;
2989   }
2990   default:
2991     return false;
2992   }
2993 
2994   return true;
2995 }
2996 
2997 /// Returns true if the target can instruction select the
2998 /// specified FP immediate natively. If false, the legalizer will
2999 /// materialize the FP immediate as a load from a constant pool.
3000 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
3001                                      bool ForCodeSize) const {
3002   for (const APFloat &FPImm : LegalFPImmediates)
3003     if (Imm.bitwiseIsEqual(FPImm))
3004       return true;
3005   return false;
3006 }
3007 
3008 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3009                                               ISD::LoadExtType ExtTy,
3010                                               EVT NewVT) const {
3011   assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
3012 
3013   // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3014   // relocation target a movq or addq instruction: don't let the load shrink.
3015   SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3016   if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3017     if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3018       return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3019 
3020   // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
3021   // those uses are extracted directly into a store, then the extract + store
3022   // can be store-folded. Therefore, it's probably not worth splitting the load.
3023   EVT VT = Load->getValueType(0);
3024   if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
3025     for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
3026       // Skip uses of the chain value. Result 0 of the node is the load value.
3027       if (UI.getUse().getResNo() != 0)
3028         continue;
3029 
3030       // If this use is not an extract + store, it's probably worth splitting.
3031       if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
3032           UI->use_begin()->getOpcode() != ISD::STORE)
3033         return true;
3034     }
3035     // All non-chain uses are extract + store.
3036     return false;
3037   }
3038 
3039   return true;
3040 }
3041 
3042 /// Returns true if it is beneficial to convert a load of a constant
3043 /// to just the constant itself.
3044 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3045                                                           Type *Ty) const {
3046   assert(Ty->isIntegerTy());
3047 
3048   unsigned BitSize = Ty->getPrimitiveSizeInBits();
3049   if (BitSize == 0 || BitSize > 64)
3050     return false;
3051   return true;
3052 }
3053 
3054 bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
3055   // If we are using XMM registers in the ABI and the condition of the select is
3056   // a floating-point compare and we have blendv or conditional move, then it is
3057   // cheaper to select instead of doing a cross-register move and creating a
3058   // load that depends on the compare result.
3059   bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
3060   return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
3061 }
3062 
3063 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
3064   // TODO: It might be a win to ease or lift this restriction, but the generic
3065   // folds in DAGCombiner conflict with vector folds for an AVX512 target.
3066   if (VT.isVector() && Subtarget.hasAVX512())
3067     return false;
3068 
3069   return true;
3070 }
3071 
3072 bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
3073                                                SDValue C) const {
3074   // TODO: We handle scalars using custom code, but generic combining could make
3075   // that unnecessary.
3076   APInt MulC;
3077   if (!ISD::isConstantSplatVector(C.getNode(), MulC))
3078     return false;
3079 
3080   // Find the type this will be legalized too. Otherwise we might prematurely
3081   // convert this to shl+add/sub and then still have to type legalize those ops.
3082   // Another choice would be to defer the decision for illegal types until
3083   // after type legalization. But constant splat vectors of i64 can't make it
3084   // through type legalization on 32-bit targets so we would need to special
3085   // case vXi64.
3086   while (getTypeAction(Context, VT) != TypeLegal)
3087     VT = getTypeToTransformTo(Context, VT);
3088 
3089   // If vector multiply is legal, assume that's faster than shl + add/sub.
3090   // Multiply is a complex op with higher latency and lower throughput in
3091   // most implementations, sub-vXi32 vector multiplies are always fast,
3092   // vXi32 mustn't have a SlowMULLD implementation, and anything larger (vXi64)
3093   // is always going to be slow.
3094   unsigned EltSizeInBits = VT.getScalarSizeInBits();
3095   if (isOperationLegal(ISD::MUL, VT) && EltSizeInBits <= 32 &&
3096       (EltSizeInBits != 32 || !Subtarget.isPMULLDSlow()))
3097     return false;
3098 
3099   // shl+add, shl+sub, shl+add+neg
3100   return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
3101          (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
3102 }
3103 
3104 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
3105                                                 unsigned Index) const {
3106   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3107     return false;
3108 
3109   // Mask vectors support all subregister combinations and operations that
3110   // extract half of vector.
3111   if (ResVT.getVectorElementType() == MVT::i1)
3112     return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
3113                           (Index == ResVT.getVectorNumElements()));
3114 
3115   return (Index % ResVT.getVectorNumElements()) == 0;
3116 }
3117 
3118 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
3119   unsigned Opc = VecOp.getOpcode();
3120 
3121   // Assume target opcodes can't be scalarized.
3122   // TODO - do we have any exceptions?
3123   if (Opc >= ISD::BUILTIN_OP_END)
3124     return false;
3125 
3126   // If the vector op is not supported, try to convert to scalar.
3127   EVT VecVT = VecOp.getValueType();
3128   if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
3129     return true;
3130 
3131   // If the vector op is supported, but the scalar op is not, the transform may
3132   // not be worthwhile.
3133   EVT ScalarVT = VecVT.getScalarType();
3134   return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
3135 }
3136 
3137 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
3138                                              bool) const {
3139   // TODO: Allow vectors?
3140   if (VT.isVector())
3141     return false;
3142   return VT.isSimple() || !isOperationExpand(Opcode, VT);
3143 }
3144 
3145 bool X86TargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
3146   // Speculate cttz only if we can directly use TZCNT or can promote to i32.
3147   return Subtarget.hasBMI() ||
3148          (!Ty->isVectorTy() && Ty->getScalarSizeInBits() < 32);
3149 }
3150 
3151 bool X86TargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
3152   // Speculate ctlz only if we can directly use LZCNT.
3153   return Subtarget.hasLZCNT();
3154 }
3155 
3156 bool X86TargetLowering::ShouldShrinkFPConstant(EVT VT) const {
3157   // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
3158   // expensive than a straight movsd. On the other hand, it's important to
3159   // shrink long double fp constant since fldt is very slow.
3160   return !Subtarget.hasSSE2() || VT == MVT::f80;
3161 }
3162 
3163 bool X86TargetLowering::isScalarFPTypeInSSEReg(EVT VT) const {
3164   return (VT == MVT::f64 && Subtarget.hasSSE2()) ||
3165          (VT == MVT::f32 && Subtarget.hasSSE1()) || VT == MVT::f16;
3166 }
3167 
3168 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
3169                                                 const SelectionDAG &DAG,
3170                                                 const MachineMemOperand &MMO) const {
3171   if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
3172       BitcastVT.getVectorElementType() == MVT::i1)
3173     return false;
3174 
3175   if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
3176     return false;
3177 
3178   // If both types are legal vectors, it's always ok to convert them.
3179   if (LoadVT.isVector() && BitcastVT.isVector() &&
3180       isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
3181     return true;
3182 
3183   return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
3184 }
3185 
3186 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
3187                                          const MachineFunction &MF) const {
3188   // Do not merge to float value size (128 bytes) if no implicit
3189   // float attribute is set.
3190   bool NoFloat = MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat);
3191 
3192   if (NoFloat) {
3193     unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
3194     return (MemVT.getSizeInBits() <= MaxIntSize);
3195   }
3196   // Make sure we don't merge greater than our preferred vector
3197   // width.
3198   if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
3199     return false;
3200 
3201   return true;
3202 }
3203 
3204 bool X86TargetLowering::isCtlzFast() const {
3205   return Subtarget.hasFastLZCNT();
3206 }
3207 
3208 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
3209     const Instruction &AndI) const {
3210   return true;
3211 }
3212 
3213 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
3214   EVT VT = Y.getValueType();
3215 
3216   if (VT.isVector())
3217     return false;
3218 
3219   if (!Subtarget.hasBMI())
3220     return false;
3221 
3222   // There are only 32-bit and 64-bit forms for 'andn'.
3223   if (VT != MVT::i32 && VT != MVT::i64)
3224     return false;
3225 
3226   return !isa<ConstantSDNode>(Y);
3227 }
3228 
3229 bool X86TargetLowering::hasAndNot(SDValue Y) const {
3230   EVT VT = Y.getValueType();
3231 
3232   if (!VT.isVector())
3233     return hasAndNotCompare(Y);
3234 
3235   // Vector.
3236 
3237   if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
3238     return false;
3239 
3240   if (VT == MVT::v4i32)
3241     return true;
3242 
3243   return Subtarget.hasSSE2();
3244 }
3245 
3246 bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
3247   return X.getValueType().isScalarInteger(); // 'bt'
3248 }
3249 
3250 bool X86TargetLowering::
3251     shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
3252         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
3253         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
3254         SelectionDAG &DAG) const {
3255   // Does baseline recommend not to perform the fold by default?
3256   if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
3257           X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
3258     return false;
3259   // For scalars this transform is always beneficial.
3260   if (X.getValueType().isScalarInteger())
3261     return true;
3262   // If all the shift amounts are identical, then transform is beneficial even
3263   // with rudimentary SSE2 shifts.
3264   if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
3265     return true;
3266   // If we have AVX2 with it's powerful shift operations, then it's also good.
3267   if (Subtarget.hasAVX2())
3268     return true;
3269   // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
3270   return NewShiftOpcode == ISD::SHL;
3271 }
3272 
3273 unsigned X86TargetLowering::preferedOpcodeForCmpEqPiecesOfOperand(
3274     EVT VT, unsigned ShiftOpc, bool MayTransformRotate,
3275     const APInt &ShiftOrRotateAmt, const std::optional<APInt> &AndMask) const {
3276   if (!VT.isInteger())
3277     return ShiftOpc;
3278 
3279   bool PreferRotate = false;
3280   if (VT.isVector()) {
3281     // For vectors, if we have rotate instruction support, then its definetly
3282     // best. Otherwise its not clear what the best so just don't make changed.
3283     PreferRotate = Subtarget.hasAVX512() && (VT.getScalarType() == MVT::i32 ||
3284                                              VT.getScalarType() == MVT::i64);
3285   } else {
3286     // For scalar, if we have bmi prefer rotate for rorx. Otherwise prefer
3287     // rotate unless we have a zext mask+shr.
3288     PreferRotate = Subtarget.hasBMI2();
3289     if (!PreferRotate) {
3290       unsigned MaskBits =
3291           VT.getScalarSizeInBits() - ShiftOrRotateAmt.getZExtValue();
3292       PreferRotate = (MaskBits != 8) && (MaskBits != 16) && (MaskBits != 32);
3293     }
3294   }
3295 
3296   if (ShiftOpc == ISD::SHL || ShiftOpc == ISD::SRL) {
3297     assert(AndMask.has_value() && "Null andmask when querying about shift+and");
3298 
3299     if (PreferRotate && MayTransformRotate)
3300       return ISD::ROTL;
3301 
3302     // If vector we don't really get much benefit swapping around constants.
3303     // Maybe we could check if the DAG has the flipped node already in the
3304     // future.
3305     if (VT.isVector())
3306       return ShiftOpc;
3307 
3308     // See if the beneficial to swap shift type.
3309     if (ShiftOpc == ISD::SHL) {
3310       // If the current setup has imm64 mask, then inverse will have
3311       // at least imm32 mask (or be zext i32 -> i64).
3312       if (VT == MVT::i64)
3313         return AndMask->getSignificantBits() > 32 ? (unsigned)ISD::SRL
3314                                                   : ShiftOpc;
3315 
3316       // We can only benefit if req at least 7-bit for the mask. We
3317       // don't want to replace shl of 1,2,3 as they can be implemented
3318       // with lea/add.
3319       return ShiftOrRotateAmt.uge(7) ? (unsigned)ISD::SRL : ShiftOpc;
3320     }
3321 
3322     if (VT == MVT::i64)
3323       // Keep exactly 32-bit imm64, this is zext i32 -> i64 which is
3324       // extremely efficient.
3325       return AndMask->getSignificantBits() > 33 ? (unsigned)ISD::SHL : ShiftOpc;
3326 
3327     // Keep small shifts as shl so we can generate add/lea.
3328     return ShiftOrRotateAmt.ult(7) ? (unsigned)ISD::SHL : ShiftOpc;
3329   }
3330 
3331   // We prefer rotate for vectors of if we won't get a zext mask with SRL
3332   // (PreferRotate will be set in the latter case).
3333   if (PreferRotate || VT.isVector())
3334     return ShiftOpc;
3335 
3336   // Non-vector type and we have a zext mask with SRL.
3337   return ISD::SRL;
3338 }
3339 
3340 bool X86TargetLowering::preferScalarizeSplat(SDNode *N) const {
3341   return N->getOpcode() != ISD::FP_EXTEND;
3342 }
3343 
3344 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
3345     const SDNode *N, CombineLevel Level) const {
3346   assert(((N->getOpcode() == ISD::SHL &&
3347            N->getOperand(0).getOpcode() == ISD::SRL) ||
3348           (N->getOpcode() == ISD::SRL &&
3349            N->getOperand(0).getOpcode() == ISD::SHL)) &&
3350          "Expected shift-shift mask");
3351   // TODO: Should we always create i64 masks? Or only folded immediates?
3352   EVT VT = N->getValueType(0);
3353   if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
3354       (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
3355     // Only fold if the shift values are equal - so it folds to AND.
3356     // TODO - we should fold if either is a non-uniform vector but we don't do
3357     // the fold for non-splats yet.
3358     return N->getOperand(1) == N->getOperand(0).getOperand(1);
3359   }
3360   return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
3361 }
3362 
3363 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
3364   EVT VT = Y.getValueType();
3365 
3366   // For vectors, we don't have a preference, but we probably want a mask.
3367   if (VT.isVector())
3368     return false;
3369 
3370   // 64-bit shifts on 32-bit targets produce really bad bloated code.
3371   if (VT == MVT::i64 && !Subtarget.is64Bit())
3372     return false;
3373 
3374   return true;
3375 }
3376 
3377 TargetLowering::ShiftLegalizationStrategy
3378 X86TargetLowering::preferredShiftLegalizationStrategy(
3379     SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const {
3380   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
3381       !Subtarget.isOSWindows())
3382     return ShiftLegalizationStrategy::LowerToLibcall;
3383   return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,
3384                                                             ExpansionFactor);
3385 }
3386 
3387 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
3388   // Any legal vector type can be splatted more efficiently than
3389   // loading/spilling from memory.
3390   return isTypeLegal(VT);
3391 }
3392 
3393 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
3394   MVT VT = MVT::getIntegerVT(NumBits);
3395   if (isTypeLegal(VT))
3396     return VT;
3397 
3398   // PMOVMSKB can handle this.
3399   if (NumBits == 128 && isTypeLegal(MVT::v16i8))
3400     return MVT::v16i8;
3401 
3402   // VPMOVMSKB can handle this.
3403   if (NumBits == 256 && isTypeLegal(MVT::v32i8))
3404     return MVT::v32i8;
3405 
3406   // TODO: Allow 64-bit type for 32-bit target.
3407   // TODO: 512-bit types should be allowed, but make sure that those
3408   // cases are handled in combineVectorSizedSetCCEquality().
3409 
3410   return MVT::INVALID_SIMPLE_VALUE_TYPE;
3411 }
3412 
3413 /// Val is the undef sentinel value or equal to the specified value.
3414 static bool isUndefOrEqual(int Val, int CmpVal) {
3415   return ((Val == SM_SentinelUndef) || (Val == CmpVal));
3416 }
3417 
3418 /// Return true if every element in Mask is the undef sentinel value or equal to
3419 /// the specified value.
3420 static bool isUndefOrEqual(ArrayRef<int> Mask, int CmpVal) {
3421   return llvm::all_of(Mask, [CmpVal](int M) {
3422     return (M == SM_SentinelUndef) || (M == CmpVal);
3423   });
3424 }
3425 
3426 /// Return true if every element in Mask, beginning from position Pos and ending
3427 /// in Pos+Size is the undef sentinel value or equal to the specified value.
3428 static bool isUndefOrEqualInRange(ArrayRef<int> Mask, int CmpVal, unsigned Pos,
3429                                   unsigned Size) {
3430   return llvm::all_of(Mask.slice(Pos, Size),
3431                       [CmpVal](int M) { return isUndefOrEqual(M, CmpVal); });
3432 }
3433 
3434 /// Val is either the undef or zero sentinel value.
3435 static bool isUndefOrZero(int Val) {
3436   return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
3437 }
3438 
3439 /// Return true if every element in Mask, beginning from position Pos and ending
3440 /// in Pos+Size is the undef sentinel value.
3441 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
3442   return llvm::all_of(Mask.slice(Pos, Size),
3443                       [](int M) { return M == SM_SentinelUndef; });
3444 }
3445 
3446 /// Return true if the mask creates a vector whose lower half is undefined.
3447 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
3448   unsigned NumElts = Mask.size();
3449   return isUndefInRange(Mask, 0, NumElts / 2);
3450 }
3451 
3452 /// Return true if the mask creates a vector whose upper half is undefined.
3453 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
3454   unsigned NumElts = Mask.size();
3455   return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
3456 }
3457 
3458 /// Return true if Val falls within the specified range (L, H].
3459 static bool isInRange(int Val, int Low, int Hi) {
3460   return (Val >= Low && Val < Hi);
3461 }
3462 
3463 /// Return true if the value of any element in Mask falls within the specified
3464 /// range (L, H].
3465 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
3466   return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
3467 }
3468 
3469 /// Return true if the value of any element in Mask is the zero sentinel value.
3470 static bool isAnyZero(ArrayRef<int> Mask) {
3471   return llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
3472 }
3473 
3474 /// Return true if the value of any element in Mask is the zero or undef
3475 /// sentinel values.
3476 static bool isAnyZeroOrUndef(ArrayRef<int> Mask) {
3477   return llvm::any_of(Mask, [](int M) {
3478     return M == SM_SentinelZero || M == SM_SentinelUndef;
3479   });
3480 }
3481 
3482 /// Return true if Val is undef or if its value falls within the
3483 /// specified range (L, H].
3484 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3485   return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
3486 }
3487 
3488 /// Return true if every element in Mask is undef or if its value
3489 /// falls within the specified range (L, H].
3490 static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
3491   return llvm::all_of(
3492       Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
3493 }
3494 
3495 /// Return true if Val is undef, zero or if its value falls within the
3496 /// specified range (L, H].
3497 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
3498   return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
3499 }
3500 
3501 /// Return true if every element in Mask is undef, zero or if its value
3502 /// falls within the specified range (L, H].
3503 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
3504   return llvm::all_of(
3505       Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
3506 }
3507 
3508 /// Return true if every element in Mask, beginning
3509 /// from position Pos and ending in Pos + Size, falls within the specified
3510 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
3511 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
3512                                        unsigned Size, int Low, int Step = 1) {
3513   for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
3514     if (!isUndefOrEqual(Mask[i], Low))
3515       return false;
3516   return true;
3517 }
3518 
3519 /// Return true if every element in Mask, beginning
3520 /// from position Pos and ending in Pos+Size, falls within the specified
3521 /// sequential range (Low, Low+Size], or is undef or is zero.
3522 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
3523                                              unsigned Size, int Low,
3524                                              int Step = 1) {
3525   for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
3526     if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
3527       return false;
3528   return true;
3529 }
3530 
3531 /// Return true if every element in Mask, beginning
3532 /// from position Pos and ending in Pos+Size is undef or is zero.
3533 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
3534                                  unsigned Size) {
3535   return llvm::all_of(Mask.slice(Pos, Size), isUndefOrZero);
3536 }
3537 
3538 /// Helper function to test whether a shuffle mask could be
3539 /// simplified by widening the elements being shuffled.
3540 ///
3541 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
3542 /// leaves it in an unspecified state.
3543 ///
3544 /// NOTE: This must handle normal vector shuffle masks and *target* vector
3545 /// shuffle masks. The latter have the special property of a '-2' representing
3546 /// a zero-ed lane of a vector.
3547 static bool canWidenShuffleElements(ArrayRef<int> Mask,
3548                                     SmallVectorImpl<int> &WidenedMask) {
3549   WidenedMask.assign(Mask.size() / 2, 0);
3550   for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
3551     int M0 = Mask[i];
3552     int M1 = Mask[i + 1];
3553 
3554     // If both elements are undef, its trivial.
3555     if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
3556       WidenedMask[i / 2] = SM_SentinelUndef;
3557       continue;
3558     }
3559 
3560     // Check for an undef mask and a mask value properly aligned to fit with
3561     // a pair of values. If we find such a case, use the non-undef mask's value.
3562     if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
3563       WidenedMask[i / 2] = M1 / 2;
3564       continue;
3565     }
3566     if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
3567       WidenedMask[i / 2] = M0 / 2;
3568       continue;
3569     }
3570 
3571     // When zeroing, we need to spread the zeroing across both lanes to widen.
3572     if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
3573       if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
3574           (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
3575         WidenedMask[i / 2] = SM_SentinelZero;
3576         continue;
3577       }
3578       return false;
3579     }
3580 
3581     // Finally check if the two mask values are adjacent and aligned with
3582     // a pair.
3583     if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
3584       WidenedMask[i / 2] = M0 / 2;
3585       continue;
3586     }
3587 
3588     // Otherwise we can't safely widen the elements used in this shuffle.
3589     return false;
3590   }
3591   assert(WidenedMask.size() == Mask.size() / 2 &&
3592          "Incorrect size of mask after widening the elements!");
3593 
3594   return true;
3595 }
3596 
3597 static bool canWidenShuffleElements(ArrayRef<int> Mask,
3598                                     const APInt &Zeroable,
3599                                     bool V2IsZero,
3600                                     SmallVectorImpl<int> &WidenedMask) {
3601   // Create an alternative mask with info about zeroable elements.
3602   // Here we do not set undef elements as zeroable.
3603   SmallVector<int, 64> ZeroableMask(Mask);
3604   if (V2IsZero) {
3605     assert(!Zeroable.isZero() && "V2's non-undef elements are used?!");
3606     for (int i = 0, Size = Mask.size(); i != Size; ++i)
3607       if (Mask[i] != SM_SentinelUndef && Zeroable[i])
3608         ZeroableMask[i] = SM_SentinelZero;
3609   }
3610   return canWidenShuffleElements(ZeroableMask, WidenedMask);
3611 }
3612 
3613 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
3614   SmallVector<int, 32> WidenedMask;
3615   return canWidenShuffleElements(Mask, WidenedMask);
3616 }
3617 
3618 // Attempt to narrow/widen shuffle mask until it matches the target number of
3619 // elements.
3620 static bool scaleShuffleElements(ArrayRef<int> Mask, unsigned NumDstElts,
3621                                  SmallVectorImpl<int> &ScaledMask) {
3622   unsigned NumSrcElts = Mask.size();
3623   assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
3624          "Illegal shuffle scale factor");
3625 
3626   // Narrowing is guaranteed to work.
3627   if (NumDstElts >= NumSrcElts) {
3628     int Scale = NumDstElts / NumSrcElts;
3629     llvm::narrowShuffleMaskElts(Scale, Mask, ScaledMask);
3630     return true;
3631   }
3632 
3633   // We have to repeat the widening until we reach the target size, but we can
3634   // split out the first widening as it sets up ScaledMask for us.
3635   if (canWidenShuffleElements(Mask, ScaledMask)) {
3636     while (ScaledMask.size() > NumDstElts) {
3637       SmallVector<int, 16> WidenedMask;
3638       if (!canWidenShuffleElements(ScaledMask, WidenedMask))
3639         return false;
3640       ScaledMask = std::move(WidenedMask);
3641     }
3642     return true;
3643   }
3644 
3645   return false;
3646 }
3647 
3648 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
3649 bool X86::isZeroNode(SDValue Elt) {
3650   return isNullConstant(Elt) || isNullFPConstant(Elt);
3651 }
3652 
3653 // Build a vector of constants.
3654 // Use an UNDEF node if MaskElt == -1.
3655 // Split 64-bit constants in the 32-bit mode.
3656 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
3657                               const SDLoc &dl, bool IsMask = false) {
3658 
3659   SmallVector<SDValue, 32>  Ops;
3660   bool Split = false;
3661 
3662   MVT ConstVecVT = VT;
3663   unsigned NumElts = VT.getVectorNumElements();
3664   bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
3665   if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
3666     ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
3667     Split = true;
3668   }
3669 
3670   MVT EltVT = ConstVecVT.getVectorElementType();
3671   for (unsigned i = 0; i < NumElts; ++i) {
3672     bool IsUndef = Values[i] < 0 && IsMask;
3673     SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
3674       DAG.getConstant(Values[i], dl, EltVT);
3675     Ops.push_back(OpNode);
3676     if (Split)
3677       Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
3678                     DAG.getConstant(0, dl, EltVT));
3679   }
3680   SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
3681   if (Split)
3682     ConstsNode = DAG.getBitcast(VT, ConstsNode);
3683   return ConstsNode;
3684 }
3685 
3686 static SDValue getConstVector(ArrayRef<APInt> Bits, const APInt &Undefs,
3687                               MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
3688   assert(Bits.size() == Undefs.getBitWidth() &&
3689          "Unequal constant and undef arrays");
3690   SmallVector<SDValue, 32> Ops;
3691   bool Split = false;
3692 
3693   MVT ConstVecVT = VT;
3694   unsigned NumElts = VT.getVectorNumElements();
3695   bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
3696   if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
3697     ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
3698     Split = true;
3699   }
3700 
3701   MVT EltVT = ConstVecVT.getVectorElementType();
3702   for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
3703     if (Undefs[i]) {
3704       Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
3705       continue;
3706     }
3707     const APInt &V = Bits[i];
3708     assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
3709     if (Split) {
3710       Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
3711       Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
3712     } else if (EltVT == MVT::f32) {
3713       APFloat FV(APFloat::IEEEsingle(), V);
3714       Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
3715     } else if (EltVT == MVT::f64) {
3716       APFloat FV(APFloat::IEEEdouble(), V);
3717       Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
3718     } else {
3719       Ops.push_back(DAG.getConstant(V, dl, EltVT));
3720     }
3721   }
3722 
3723   SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
3724   return DAG.getBitcast(VT, ConstsNode);
3725 }
3726 
3727 static SDValue getConstVector(ArrayRef<APInt> Bits, MVT VT,
3728                               SelectionDAG &DAG, const SDLoc &dl) {
3729   APInt Undefs = APInt::getZero(Bits.size());
3730   return getConstVector(Bits, Undefs, VT, DAG, dl);
3731 }
3732 
3733 /// Returns a vector of specified type with all zero elements.
3734 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
3735                              SelectionDAG &DAG, const SDLoc &dl) {
3736   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
3737           VT.getVectorElementType() == MVT::i1) &&
3738          "Unexpected vector type");
3739 
3740   // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
3741   // type. This ensures they get CSE'd. But if the integer type is not
3742   // available, use a floating-point +0.0 instead.
3743   SDValue Vec;
3744   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3745   if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
3746     Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
3747   } else if (VT.isFloatingPoint() &&
3748              TLI.isTypeLegal(VT.getVectorElementType())) {
3749     Vec = DAG.getConstantFP(+0.0, dl, VT);
3750   } else if (VT.getVectorElementType() == MVT::i1) {
3751     assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
3752            "Unexpected vector type");
3753     Vec = DAG.getConstant(0, dl, VT);
3754   } else {
3755     unsigned Num32BitElts = VT.getSizeInBits() / 32;
3756     Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
3757   }
3758   return DAG.getBitcast(VT, Vec);
3759 }
3760 
3761 // Helper to determine if the ops are all the extracted subvectors come from a
3762 // single source. If we allow commute they don't have to be in order (Lo/Hi).
3763 static SDValue getSplitVectorSrc(SDValue LHS, SDValue RHS, bool AllowCommute) {
3764   if (LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
3765       RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
3766       LHS.getValueType() != RHS.getValueType() ||
3767       LHS.getOperand(0) != RHS.getOperand(0))
3768     return SDValue();
3769 
3770   SDValue Src = LHS.getOperand(0);
3771   if (Src.getValueSizeInBits() != (LHS.getValueSizeInBits() * 2))
3772     return SDValue();
3773 
3774   unsigned NumElts = LHS.getValueType().getVectorNumElements();
3775   if ((LHS.getConstantOperandAPInt(1) == 0 &&
3776        RHS.getConstantOperandAPInt(1) == NumElts) ||
3777       (AllowCommute && RHS.getConstantOperandAPInt(1) == 0 &&
3778        LHS.getConstantOperandAPInt(1) == NumElts))
3779     return Src;
3780 
3781   return SDValue();
3782 }
3783 
3784 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
3785                                 const SDLoc &dl, unsigned vectorWidth) {
3786   EVT VT = Vec.getValueType();
3787   EVT ElVT = VT.getVectorElementType();
3788   unsigned Factor = VT.getSizeInBits() / vectorWidth;
3789   EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
3790                                   VT.getVectorNumElements() / Factor);
3791 
3792   // Extract the relevant vectorWidth bits.  Generate an EXTRACT_SUBVECTOR
3793   unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
3794   assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
3795 
3796   // This is the index of the first element of the vectorWidth-bit chunk
3797   // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
3798   IdxVal &= ~(ElemsPerChunk - 1);
3799 
3800   // If the input is a buildvector just emit a smaller one.
3801   if (Vec.getOpcode() == ISD::BUILD_VECTOR)
3802     return DAG.getBuildVector(ResultVT, dl,
3803                               Vec->ops().slice(IdxVal, ElemsPerChunk));
3804 
3805   // Check if we're extracting the upper undef of a widening pattern.
3806   if (Vec.getOpcode() == ISD::INSERT_SUBVECTOR && Vec.getOperand(0).isUndef() &&
3807       Vec.getOperand(1).getValueType().getVectorNumElements() <= IdxVal &&
3808       isNullConstant(Vec.getOperand(2)))
3809     return DAG.getUNDEF(ResultVT);
3810 
3811   SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
3812   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
3813 }
3814 
3815 /// Generate a DAG to grab 128-bits from a vector > 128 bits.  This
3816 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
3817 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
3818 /// instructions or a simple subregister reference. Idx is an index in the
3819 /// 128 bits we want.  It need not be aligned to a 128-bit boundary.  That makes
3820 /// lowering EXTRACT_VECTOR_ELT operations easier.
3821 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
3822                                    SelectionDAG &DAG, const SDLoc &dl) {
3823   assert((Vec.getValueType().is256BitVector() ||
3824           Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
3825   return extractSubVector(Vec, IdxVal, DAG, dl, 128);
3826 }
3827 
3828 /// Generate a DAG to grab 256-bits from a 512-bit vector.
3829 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
3830                                    SelectionDAG &DAG, const SDLoc &dl) {
3831   assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
3832   return extractSubVector(Vec, IdxVal, DAG, dl, 256);
3833 }
3834 
3835 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
3836                                SelectionDAG &DAG, const SDLoc &dl,
3837                                unsigned vectorWidth) {
3838   assert((vectorWidth == 128 || vectorWidth == 256) &&
3839          "Unsupported vector width");
3840   // Inserting UNDEF is Result
3841   if (Vec.isUndef())
3842     return Result;
3843   EVT VT = Vec.getValueType();
3844   EVT ElVT = VT.getVectorElementType();
3845   EVT ResultVT = Result.getValueType();
3846 
3847   // Insert the relevant vectorWidth bits.
3848   unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
3849   assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
3850 
3851   // This is the index of the first element of the vectorWidth-bit chunk
3852   // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
3853   IdxVal &= ~(ElemsPerChunk - 1);
3854 
3855   SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
3856   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
3857 }
3858 
3859 /// Generate a DAG to put 128-bits into a vector > 128 bits.  This
3860 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
3861 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
3862 /// simple superregister reference.  Idx is an index in the 128 bits
3863 /// we want.  It need not be aligned to a 128-bit boundary.  That makes
3864 /// lowering INSERT_VECTOR_ELT operations easier.
3865 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
3866                                   SelectionDAG &DAG, const SDLoc &dl) {
3867   assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
3868   return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
3869 }
3870 
3871 /// Widen a vector to a larger size with the same scalar type, with the new
3872 /// elements either zero or undef.
3873 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
3874                               const X86Subtarget &Subtarget, SelectionDAG &DAG,
3875                               const SDLoc &dl) {
3876   assert(Vec.getValueSizeInBits().getFixedValue() <= VT.getFixedSizeInBits() &&
3877          Vec.getValueType().getScalarType() == VT.getScalarType() &&
3878          "Unsupported vector widening type");
3879   SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
3880                                 : DAG.getUNDEF(VT);
3881   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
3882                      DAG.getIntPtrConstant(0, dl));
3883 }
3884 
3885 /// Widen a vector to a larger size with the same scalar type, with the new
3886 /// elements either zero or undef.
3887 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
3888                               const X86Subtarget &Subtarget, SelectionDAG &DAG,
3889                               const SDLoc &dl, unsigned WideSizeInBits) {
3890   assert(Vec.getValueSizeInBits() <= WideSizeInBits &&
3891          (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
3892          "Unsupported vector widening type");
3893   unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
3894   MVT SVT = Vec.getSimpleValueType().getScalarType();
3895   MVT VT = MVT::getVectorVT(SVT, WideNumElts);
3896   return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
3897 }
3898 
3899 /// Widen a mask vector type to a minimum of v8i1/v16i1 to allow use of KSHIFT
3900 /// and bitcast with integer types.
3901 static MVT widenMaskVectorType(MVT VT, const X86Subtarget &Subtarget) {
3902   assert(VT.getVectorElementType() == MVT::i1 && "Expected bool vector");
3903   unsigned NumElts = VT.getVectorNumElements();
3904   if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
3905     return Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
3906   return VT;
3907 }
3908 
3909 /// Widen a mask vector to a minimum of v8i1/v16i1 to allow use of KSHIFT and
3910 /// bitcast with integer types.
3911 static SDValue widenMaskVector(SDValue Vec, bool ZeroNewElements,
3912                                const X86Subtarget &Subtarget, SelectionDAG &DAG,
3913                                const SDLoc &dl) {
3914   MVT VT = widenMaskVectorType(Vec.getSimpleValueType(), Subtarget);
3915   return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
3916 }
3917 
3918 // Helper function to collect subvector ops that are concatenated together,
3919 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
3920 // The subvectors in Ops are guaranteed to be the same type.
3921 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops,
3922                              SelectionDAG &DAG) {
3923   assert(Ops.empty() && "Expected an empty ops vector");
3924 
3925   if (N->getOpcode() == ISD::CONCAT_VECTORS) {
3926     Ops.append(N->op_begin(), N->op_end());
3927     return true;
3928   }
3929 
3930   if (N->getOpcode() == ISD::INSERT_SUBVECTOR) {
3931     SDValue Src = N->getOperand(0);
3932     SDValue Sub = N->getOperand(1);
3933     const APInt &Idx = N->getConstantOperandAPInt(2);
3934     EVT VT = Src.getValueType();
3935     EVT SubVT = Sub.getValueType();
3936 
3937     // TODO - Handle more general insert_subvector chains.
3938     if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2)) {
3939       // insert_subvector(undef, x, lo)
3940       if (Idx == 0 && Src.isUndef()) {
3941         Ops.push_back(Sub);
3942         Ops.push_back(DAG.getUNDEF(SubVT));
3943         return true;
3944       }
3945       if (Idx == (VT.getVectorNumElements() / 2)) {
3946         // insert_subvector(insert_subvector(undef, x, lo), y, hi)
3947         if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
3948             Src.getOperand(1).getValueType() == SubVT &&
3949             isNullConstant(Src.getOperand(2))) {
3950           Ops.push_back(Src.getOperand(1));
3951           Ops.push_back(Sub);
3952           return true;
3953         }
3954         // insert_subvector(x, extract_subvector(x, lo), hi)
3955         if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
3956             Sub.getOperand(0) == Src && isNullConstant(Sub.getOperand(1))) {
3957           Ops.append(2, Sub);
3958           return true;
3959         }
3960         // insert_subvector(undef, x, hi)
3961         if (Src.isUndef()) {
3962           Ops.push_back(DAG.getUNDEF(SubVT));
3963           Ops.push_back(Sub);
3964           return true;
3965         }
3966       }
3967     }
3968   }
3969 
3970   return false;
3971 }
3972 
3973 // Helper to check if \p V can be split into subvectors and the upper subvectors
3974 // are all undef. In which case return the lower subvector.
3975 static SDValue isUpperSubvectorUndef(SDValue V, const SDLoc &DL,
3976                                      SelectionDAG &DAG) {
3977   SmallVector<SDValue> SubOps;
3978   if (!collectConcatOps(V.getNode(), SubOps, DAG))
3979     return SDValue();
3980 
3981   unsigned NumSubOps = SubOps.size();
3982   unsigned HalfNumSubOps = NumSubOps / 2;
3983   assert((NumSubOps % 2) == 0 && "Unexpected number of subvectors");
3984 
3985   ArrayRef<SDValue> UpperOps(SubOps.begin() + HalfNumSubOps, SubOps.end());
3986   if (any_of(UpperOps, [](SDValue Op) { return !Op.isUndef(); }))
3987     return SDValue();
3988 
3989   EVT HalfVT = V.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
3990   ArrayRef<SDValue> LowerOps(SubOps.begin(), SubOps.begin() + HalfNumSubOps);
3991   return DAG.getNode(ISD::CONCAT_VECTORS, DL, HalfVT, LowerOps);
3992 }
3993 
3994 // Helper to check if we can access all the constituent subvectors without any
3995 // extract ops.
3996 static bool isFreeToSplitVector(SDNode *N, SelectionDAG &DAG) {
3997   SmallVector<SDValue> Ops;
3998   return collectConcatOps(N, Ops, DAG);
3999 }
4000 
4001 static std::pair<SDValue, SDValue> splitVector(SDValue Op, SelectionDAG &DAG,
4002                                                const SDLoc &dl) {
4003   EVT VT = Op.getValueType();
4004   unsigned NumElems = VT.getVectorNumElements();
4005   unsigned SizeInBits = VT.getSizeInBits();
4006   assert((NumElems % 2) == 0 && (SizeInBits % 2) == 0 &&
4007          "Can't split odd sized vector");
4008 
4009   // If this is a splat value (with no-undefs) then use the lower subvector,
4010   // which should be a free extraction.
4011   SDValue Lo = extractSubVector(Op, 0, DAG, dl, SizeInBits / 2);
4012   if (DAG.isSplatValue(Op, /*AllowUndefs*/ false))
4013     return std::make_pair(Lo, Lo);
4014 
4015   SDValue Hi = extractSubVector(Op, NumElems / 2, DAG, dl, SizeInBits / 2);
4016   return std::make_pair(Lo, Hi);
4017 }
4018 
4019 /// Break an operation into 2 half sized ops and then concatenate the results.
4020 static SDValue splitVectorOp(SDValue Op, SelectionDAG &DAG) {
4021   unsigned NumOps = Op.getNumOperands();
4022   EVT VT = Op.getValueType();
4023   SDLoc dl(Op);
4024 
4025   // Extract the LHS Lo/Hi vectors
4026   SmallVector<SDValue> LoOps(NumOps, SDValue());
4027   SmallVector<SDValue> HiOps(NumOps, SDValue());
4028   for (unsigned I = 0; I != NumOps; ++I) {
4029     SDValue SrcOp = Op.getOperand(I);
4030     if (!SrcOp.getValueType().isVector()) {
4031       LoOps[I] = HiOps[I] = SrcOp;
4032       continue;
4033     }
4034     std::tie(LoOps[I], HiOps[I]) = splitVector(SrcOp, DAG, dl);
4035   }
4036 
4037   EVT LoVT, HiVT;
4038   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
4039   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
4040                      DAG.getNode(Op.getOpcode(), dl, LoVT, LoOps),
4041                      DAG.getNode(Op.getOpcode(), dl, HiVT, HiOps));
4042 }
4043 
4044 /// Break an unary integer operation into 2 half sized ops and then
4045 /// concatenate the result back.
4046 static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
4047   // Make sure we only try to split 256/512-bit types to avoid creating
4048   // narrow vectors.
4049   EVT VT = Op.getValueType();
4050   (void)VT;
4051   assert((Op.getOperand(0).getValueType().is256BitVector() ||
4052           Op.getOperand(0).getValueType().is512BitVector()) &&
4053          (VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
4054   assert(Op.getOperand(0).getValueType().getVectorNumElements() ==
4055              VT.getVectorNumElements() &&
4056          "Unexpected VTs!");
4057   return splitVectorOp(Op, DAG);
4058 }
4059 
4060 /// Break a binary integer operation into 2 half sized ops and then
4061 /// concatenate the result back.
4062 static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG) {
4063   // Assert that all the types match.
4064   EVT VT = Op.getValueType();
4065   (void)VT;
4066   assert(Op.getOperand(0).getValueType() == VT &&
4067          Op.getOperand(1).getValueType() == VT && "Unexpected VTs!");
4068   assert((VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
4069   return splitVectorOp(Op, DAG);
4070 }
4071 
4072 // Helper for splitting operands of an operation to legal target size and
4073 // apply a function on each part.
4074 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
4075 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
4076 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
4077 // The argument Builder is a function that will be applied on each split part:
4078 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
4079 template <typename F>
4080 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
4081                          const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
4082                          F Builder, bool CheckBWI = true) {
4083   assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
4084   unsigned NumSubs = 1;
4085   if ((CheckBWI && Subtarget.useBWIRegs()) ||
4086       (!CheckBWI && Subtarget.useAVX512Regs())) {
4087     if (VT.getSizeInBits() > 512) {
4088       NumSubs = VT.getSizeInBits() / 512;
4089       assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
4090     }
4091   } else if (Subtarget.hasAVX2()) {
4092     if (VT.getSizeInBits() > 256) {
4093       NumSubs = VT.getSizeInBits() / 256;
4094       assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
4095     }
4096   } else {
4097     if (VT.getSizeInBits() > 128) {
4098       NumSubs = VT.getSizeInBits() / 128;
4099       assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
4100     }
4101   }
4102 
4103   if (NumSubs == 1)
4104     return Builder(DAG, DL, Ops);
4105 
4106   SmallVector<SDValue, 4> Subs;
4107   for (unsigned i = 0; i != NumSubs; ++i) {
4108     SmallVector<SDValue, 2> SubOps;
4109     for (SDValue Op : Ops) {
4110       EVT OpVT = Op.getValueType();
4111       unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
4112       unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
4113       SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
4114     }
4115     Subs.push_back(Builder(DAG, DL, SubOps));
4116   }
4117   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
4118 }
4119 
4120 // Helper function that extends a non-512-bit vector op to 512-bits on non-VLX
4121 // targets.
4122 static SDValue getAVX512Node(unsigned Opcode, const SDLoc &DL, MVT VT,
4123                              ArrayRef<SDValue> Ops, SelectionDAG &DAG,
4124                              const X86Subtarget &Subtarget) {
4125   assert(Subtarget.hasAVX512() && "AVX512 target expected");
4126   MVT SVT = VT.getScalarType();
4127 
4128   // If we have a 32/64 splatted constant, splat it to DstTy to
4129   // encourage a foldable broadcast'd operand.
4130   auto MakeBroadcastOp = [&](SDValue Op, MVT OpVT, MVT DstVT) {
4131     unsigned OpEltSizeInBits = OpVT.getScalarSizeInBits();
4132     // AVX512 broadcasts 32/64-bit operands.
4133     // TODO: Support float once getAVX512Node is used by fp-ops.
4134     if (!OpVT.isInteger() || OpEltSizeInBits < 32 ||
4135         !DAG.getTargetLoweringInfo().isTypeLegal(SVT))
4136       return SDValue();
4137     // If we're not widening, don't bother if we're not bitcasting.
4138     if (OpVT == DstVT && Op.getOpcode() != ISD::BITCAST)
4139       return SDValue();
4140     if (auto *BV = dyn_cast<BuildVectorSDNode>(peekThroughBitcasts(Op))) {
4141       APInt SplatValue, SplatUndef;
4142       unsigned SplatBitSize;
4143       bool HasAnyUndefs;
4144       if (BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
4145                               HasAnyUndefs, OpEltSizeInBits) &&
4146           !HasAnyUndefs && SplatValue.getBitWidth() == OpEltSizeInBits)
4147         return DAG.getConstant(SplatValue, DL, DstVT);
4148     }
4149     return SDValue();
4150   };
4151 
4152   bool Widen = !(Subtarget.hasVLX() || VT.is512BitVector());
4153 
4154   MVT DstVT = VT;
4155   if (Widen)
4156     DstVT = MVT::getVectorVT(SVT, 512 / SVT.getSizeInBits());
4157 
4158   // Canonicalize src operands.
4159   SmallVector<SDValue> SrcOps(Ops.begin(), Ops.end());
4160   for (SDValue &Op : SrcOps) {
4161     MVT OpVT = Op.getSimpleValueType();
4162     // Just pass through scalar operands.
4163     if (!OpVT.isVector())
4164       continue;
4165     assert(OpVT == VT && "Vector type mismatch");
4166 
4167     if (SDValue BroadcastOp = MakeBroadcastOp(Op, OpVT, DstVT)) {
4168       Op = BroadcastOp;
4169       continue;
4170     }
4171 
4172     // Just widen the subvector by inserting into an undef wide vector.
4173     if (Widen)
4174       Op = widenSubVector(Op, false, Subtarget, DAG, DL, 512);
4175   }
4176 
4177   SDValue Res = DAG.getNode(Opcode, DL, DstVT, SrcOps);
4178 
4179   // Perform the 512-bit op then extract the bottom subvector.
4180   if (Widen)
4181     Res = extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
4182   return Res;
4183 }
4184 
4185 /// Insert i1-subvector to i1-vector.
4186 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
4187                                 const X86Subtarget &Subtarget) {
4188 
4189   SDLoc dl(Op);
4190   SDValue Vec = Op.getOperand(0);
4191   SDValue SubVec = Op.getOperand(1);
4192   SDValue Idx = Op.getOperand(2);
4193   unsigned IdxVal = Op.getConstantOperandVal(2);
4194 
4195   // Inserting undef is a nop. We can just return the original vector.
4196   if (SubVec.isUndef())
4197     return Vec;
4198 
4199   if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
4200     return Op;
4201 
4202   MVT OpVT = Op.getSimpleValueType();
4203   unsigned NumElems = OpVT.getVectorNumElements();
4204   SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
4205 
4206   // Extend to natively supported kshift.
4207   MVT WideOpVT = widenMaskVectorType(OpVT, Subtarget);
4208 
4209   // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
4210   // if necessary.
4211   if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
4212     // May need to promote to a legal type.
4213     Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4214                      DAG.getConstant(0, dl, WideOpVT),
4215                      SubVec, Idx);
4216     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
4217   }
4218 
4219   MVT SubVecVT = SubVec.getSimpleValueType();
4220   unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
4221   assert(IdxVal + SubVecNumElems <= NumElems &&
4222          IdxVal % SubVecVT.getSizeInBits() == 0 &&
4223          "Unexpected index value in INSERT_SUBVECTOR");
4224 
4225   SDValue Undef = DAG.getUNDEF(WideOpVT);
4226 
4227   if (IdxVal == 0) {
4228     // Zero lower bits of the Vec
4229     SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
4230     Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
4231                       ZeroIdx);
4232     Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
4233     Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
4234     // Merge them together, SubVec should be zero extended.
4235     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4236                          DAG.getConstant(0, dl, WideOpVT),
4237                          SubVec, ZeroIdx);
4238     Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
4239     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
4240   }
4241 
4242   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4243                        Undef, SubVec, ZeroIdx);
4244 
4245   if (Vec.isUndef()) {
4246     assert(IdxVal != 0 && "Unexpected index");
4247     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4248                          DAG.getTargetConstant(IdxVal, dl, MVT::i8));
4249     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
4250   }
4251 
4252   if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
4253     assert(IdxVal != 0 && "Unexpected index");
4254     // If upper elements of Vec are known undef, then just shift into place.
4255     if (llvm::all_of(Vec->ops().slice(IdxVal + SubVecNumElems),
4256                      [](SDValue V) { return V.isUndef(); })) {
4257       SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4258                            DAG.getTargetConstant(IdxVal, dl, MVT::i8));
4259     } else {
4260       NumElems = WideOpVT.getVectorNumElements();
4261       unsigned ShiftLeft = NumElems - SubVecNumElems;
4262       unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
4263       SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4264                            DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
4265       if (ShiftRight != 0)
4266         SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
4267                              DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
4268     }
4269     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
4270   }
4271 
4272   // Simple case when we put subvector in the upper part
4273   if (IdxVal + SubVecNumElems == NumElems) {
4274     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4275                          DAG.getTargetConstant(IdxVal, dl, MVT::i8));
4276     if (SubVecNumElems * 2 == NumElems) {
4277       // Special case, use legal zero extending insert_subvector. This allows
4278       // isel to optimize when bits are known zero.
4279       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
4280       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4281                         DAG.getConstant(0, dl, WideOpVT),
4282                         Vec, ZeroIdx);
4283     } else {
4284       // Otherwise use explicit shifts to zero the bits.
4285       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4286                         Undef, Vec, ZeroIdx);
4287       NumElems = WideOpVT.getVectorNumElements();
4288       SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
4289       Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
4290       Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
4291     }
4292     Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
4293     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
4294   }
4295 
4296   // Inserting into the middle is more complicated.
4297 
4298   NumElems = WideOpVT.getVectorNumElements();
4299 
4300   // Widen the vector if needed.
4301   Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
4302 
4303   unsigned ShiftLeft = NumElems - SubVecNumElems;
4304   unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
4305 
4306   // Do an optimization for the most frequently used types.
4307   if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
4308     APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
4309     Mask0.flipAllBits();
4310     SDValue CMask0 = DAG.getConstant(Mask0, dl, MVT::getIntegerVT(NumElems));
4311     SDValue VMask0 = DAG.getNode(ISD::BITCAST, dl, WideOpVT, CMask0);
4312     Vec = DAG.getNode(ISD::AND, dl, WideOpVT, Vec, VMask0);
4313     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4314                          DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
4315     SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
4316                          DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
4317     Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
4318 
4319     // Reduce to original width if needed.
4320     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
4321   }
4322 
4323   // Clear the upper bits of the subvector and move it to its insert position.
4324   SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4325                        DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
4326   SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
4327                        DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
4328 
4329   // Isolate the bits below the insertion point.
4330   unsigned LowShift = NumElems - IdxVal;
4331   SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
4332                             DAG.getTargetConstant(LowShift, dl, MVT::i8));
4333   Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
4334                     DAG.getTargetConstant(LowShift, dl, MVT::i8));
4335 
4336   // Isolate the bits after the last inserted bit.
4337   unsigned HighShift = IdxVal + SubVecNumElems;
4338   SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
4339                             DAG.getTargetConstant(HighShift, dl, MVT::i8));
4340   High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
4341                     DAG.getTargetConstant(HighShift, dl, MVT::i8));
4342 
4343   // Now OR all 3 pieces together.
4344   Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
4345   SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
4346 
4347   // Reduce to original width if needed.
4348   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
4349 }
4350 
4351 static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
4352                                 const SDLoc &dl) {
4353   assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
4354   EVT SubVT = V1.getValueType();
4355   EVT SubSVT = SubVT.getScalarType();
4356   unsigned SubNumElts = SubVT.getVectorNumElements();
4357   unsigned SubVectorWidth = SubVT.getSizeInBits();
4358   EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
4359   SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
4360   return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
4361 }
4362 
4363 /// Returns a vector of specified type with all bits set.
4364 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
4365 /// Then bitcast to their original type, ensuring they get CSE'd.
4366 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
4367   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
4368          "Expected a 128/256/512-bit vector type");
4369 
4370   APInt Ones = APInt::getAllOnes(32);
4371   unsigned NumElts = VT.getSizeInBits() / 32;
4372   SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
4373   return DAG.getBitcast(VT, Vec);
4374 }
4375 
4376 static SDValue getEXTEND_VECTOR_INREG(unsigned Opcode, const SDLoc &DL, EVT VT,
4377                                       SDValue In, SelectionDAG &DAG) {
4378   EVT InVT = In.getValueType();
4379   assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
4380   assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
4381           ISD::ZERO_EXTEND == Opcode) &&
4382          "Unknown extension opcode");
4383 
4384   // For 256-bit vectors, we only need the lower (128-bit) input half.
4385   // For 512-bit vectors, we only need the lower input half or quarter.
4386   if (InVT.getSizeInBits() > 128) {
4387     assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
4388            "Expected VTs to be the same size!");
4389     unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
4390     In = extractSubVector(In, 0, DAG, DL,
4391                           std::max(128U, (unsigned)VT.getSizeInBits() / Scale));
4392     InVT = In.getValueType();
4393   }
4394 
4395   if (VT.getVectorNumElements() != InVT.getVectorNumElements())
4396     Opcode = DAG.getOpcode_EXTEND_VECTOR_INREG(Opcode);
4397 
4398   return DAG.getNode(Opcode, DL, VT, In);
4399 }
4400 
4401 // Create OR(AND(LHS,MASK),AND(RHS,~MASK)) bit select pattern
4402 static SDValue getBitSelect(const SDLoc &DL, MVT VT, SDValue LHS, SDValue RHS,
4403                             SDValue Mask, SelectionDAG &DAG) {
4404   LHS = DAG.getNode(ISD::AND, DL, VT, LHS, Mask);
4405   RHS = DAG.getNode(X86ISD::ANDNP, DL, VT, Mask, RHS);
4406   return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
4407 }
4408 
4409 void llvm::createUnpackShuffleMask(EVT VT, SmallVectorImpl<int> &Mask,
4410                                    bool Lo, bool Unary) {
4411   assert(VT.getScalarType().isSimple() && (VT.getSizeInBits() % 128) == 0 &&
4412          "Illegal vector type to unpack");
4413   assert(Mask.empty() && "Expected an empty shuffle mask vector");
4414   int NumElts = VT.getVectorNumElements();
4415   int NumEltsInLane = 128 / VT.getScalarSizeInBits();
4416   for (int i = 0; i < NumElts; ++i) {
4417     unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
4418     int Pos = (i % NumEltsInLane) / 2 + LaneStart;
4419     Pos += (Unary ? 0 : NumElts * (i % 2));
4420     Pos += (Lo ? 0 : NumEltsInLane / 2);
4421     Mask.push_back(Pos);
4422   }
4423 }
4424 
4425 /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
4426 /// imposed by AVX and specific to the unary pattern. Example:
4427 /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
4428 /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
4429 void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
4430                                    bool Lo) {
4431   assert(Mask.empty() && "Expected an empty shuffle mask vector");
4432   int NumElts = VT.getVectorNumElements();
4433   for (int i = 0; i < NumElts; ++i) {
4434     int Pos = i / 2;
4435     Pos += (Lo ? 0 : NumElts / 2);
4436     Mask.push_back(Pos);
4437   }
4438 }
4439 
4440 // Attempt to constant fold, else just create a VECTOR_SHUFFLE.
4441 static SDValue getVectorShuffle(SelectionDAG &DAG, EVT VT, const SDLoc &dl,
4442                                 SDValue V1, SDValue V2, ArrayRef<int> Mask) {
4443   if ((ISD::isBuildVectorOfConstantSDNodes(V1.getNode()) || V1.isUndef()) &&
4444       (ISD::isBuildVectorOfConstantSDNodes(V2.getNode()) || V2.isUndef())) {
4445     SmallVector<SDValue> Ops(Mask.size(), DAG.getUNDEF(VT.getScalarType()));
4446     for (int I = 0, NumElts = Mask.size(); I != NumElts; ++I) {
4447       int M = Mask[I];
4448       if (M < 0)
4449         continue;
4450       SDValue V = (M < NumElts) ? V1 : V2;
4451       if (V.isUndef())
4452         continue;
4453       Ops[I] = V.getOperand(M % NumElts);
4454     }
4455     return DAG.getBuildVector(VT, dl, Ops);
4456   }
4457 
4458   return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
4459 }
4460 
4461 /// Returns a vector_shuffle node for an unpackl operation.
4462 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
4463                           SDValue V1, SDValue V2) {
4464   SmallVector<int, 8> Mask;
4465   createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
4466   return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
4467 }
4468 
4469 /// Returns a vector_shuffle node for an unpackh operation.
4470 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
4471                           SDValue V1, SDValue V2) {
4472   SmallVector<int, 8> Mask;
4473   createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
4474   return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
4475 }
4476 
4477 /// Returns a node that packs the LHS + RHS nodes together at half width.
4478 /// May return X86ISD::PACKSS/PACKUS, packing the top/bottom half.
4479 /// TODO: Add subvector splitting if/when we have a need for it.
4480 static SDValue getPack(SelectionDAG &DAG, const X86Subtarget &Subtarget,
4481                        const SDLoc &dl, MVT VT, SDValue LHS, SDValue RHS,
4482                        bool PackHiHalf = false) {
4483   MVT OpVT = LHS.getSimpleValueType();
4484   unsigned EltSizeInBits = VT.getScalarSizeInBits();
4485   bool UsePackUS = Subtarget.hasSSE41() || EltSizeInBits == 8;
4486   assert(OpVT == RHS.getSimpleValueType() &&
4487          VT.getSizeInBits() == OpVT.getSizeInBits() &&
4488          (EltSizeInBits * 2) == OpVT.getScalarSizeInBits() &&
4489          "Unexpected PACK operand types");
4490   assert((EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) &&
4491          "Unexpected PACK result type");
4492 
4493   // Rely on vector shuffles for vXi64 -> vXi32 packing.
4494   if (EltSizeInBits == 32) {
4495     SmallVector<int> PackMask;
4496     int Offset = PackHiHalf ? 1 : 0;
4497     int NumElts = VT.getVectorNumElements();
4498     for (int I = 0; I != NumElts; I += 4) {
4499       PackMask.push_back(I + Offset);
4500       PackMask.push_back(I + Offset + 2);
4501       PackMask.push_back(I + Offset + NumElts);
4502       PackMask.push_back(I + Offset + NumElts + 2);
4503     }
4504     return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, LHS),
4505                                 DAG.getBitcast(VT, RHS), PackMask);
4506   }
4507 
4508   // See if we already have sufficient leading bits for PACKSS/PACKUS.
4509   if (!PackHiHalf) {
4510     if (UsePackUS &&
4511         DAG.computeKnownBits(LHS).countMaxActiveBits() <= EltSizeInBits &&
4512         DAG.computeKnownBits(RHS).countMaxActiveBits() <= EltSizeInBits)
4513       return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
4514 
4515     if (DAG.ComputeMaxSignificantBits(LHS) <= EltSizeInBits &&
4516         DAG.ComputeMaxSignificantBits(RHS) <= EltSizeInBits)
4517       return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
4518   }
4519 
4520   // Fallback to sign/zero extending the requested half and pack.
4521   SDValue Amt = DAG.getTargetConstant(EltSizeInBits, dl, MVT::i8);
4522   if (UsePackUS) {
4523     if (PackHiHalf) {
4524       LHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, LHS, Amt);
4525       RHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, RHS, Amt);
4526     } else {
4527       SDValue Mask = DAG.getConstant((1ULL << EltSizeInBits) - 1, dl, OpVT);
4528       LHS = DAG.getNode(ISD::AND, dl, OpVT, LHS, Mask);
4529       RHS = DAG.getNode(ISD::AND, dl, OpVT, RHS, Mask);
4530     };
4531     return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
4532   };
4533 
4534   if (!PackHiHalf) {
4535     LHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, LHS, Amt);
4536     RHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, RHS, Amt);
4537   }
4538   LHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, LHS, Amt);
4539   RHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, RHS, Amt);
4540   return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
4541 }
4542 
4543 /// Return a vector_shuffle of the specified vector of zero or undef vector.
4544 /// This produces a shuffle where the low element of V2 is swizzled into the
4545 /// zero/undef vector, landing at element Idx.
4546 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or  0,1,2,4 (idx=3).
4547 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
4548                                            bool IsZero,
4549                                            const X86Subtarget &Subtarget,
4550                                            SelectionDAG &DAG) {
4551   MVT VT = V2.getSimpleValueType();
4552   SDValue V1 = IsZero
4553     ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
4554   int NumElems = VT.getVectorNumElements();
4555   SmallVector<int, 16> MaskVec(NumElems);
4556   for (int i = 0; i != NumElems; ++i)
4557     // If this is the insertion idx, put the low elt of V2 here.
4558     MaskVec[i] = (i == Idx) ? NumElems : i;
4559   return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
4560 }
4561 
4562 static ConstantPoolSDNode *getTargetConstantPoolFromBasePtr(SDValue Ptr) {
4563   if (Ptr.getOpcode() == X86ISD::Wrapper ||
4564       Ptr.getOpcode() == X86ISD::WrapperRIP)
4565     Ptr = Ptr.getOperand(0);
4566   return dyn_cast<ConstantPoolSDNode>(Ptr);
4567 }
4568 
4569 static const Constant *getTargetConstantFromBasePtr(SDValue Ptr) {
4570   ConstantPoolSDNode *CNode = getTargetConstantPoolFromBasePtr(Ptr);
4571   if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
4572     return nullptr;
4573   return CNode->getConstVal();
4574 }
4575 
4576 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
4577   if (!Load || !ISD::isNormalLoad(Load))
4578     return nullptr;
4579   return getTargetConstantFromBasePtr(Load->getBasePtr());
4580 }
4581 
4582 static const Constant *getTargetConstantFromNode(SDValue Op) {
4583   Op = peekThroughBitcasts(Op);
4584   return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
4585 }
4586 
4587 const Constant *
4588 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
4589   assert(LD && "Unexpected null LoadSDNode");
4590   return getTargetConstantFromNode(LD);
4591 }
4592 
4593 // Extract raw constant bits from constant pools.
4594 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
4595                                           APInt &UndefElts,
4596                                           SmallVectorImpl<APInt> &EltBits,
4597                                           bool AllowWholeUndefs = true,
4598                                           bool AllowPartialUndefs = true) {
4599   assert(EltBits.empty() && "Expected an empty EltBits vector");
4600 
4601   Op = peekThroughBitcasts(Op);
4602 
4603   EVT VT = Op.getValueType();
4604   unsigned SizeInBits = VT.getSizeInBits();
4605   assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
4606   unsigned NumElts = SizeInBits / EltSizeInBits;
4607 
4608   // Bitcast a source array of element bits to the target size.
4609   auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
4610     unsigned NumSrcElts = UndefSrcElts.getBitWidth();
4611     unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
4612     assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
4613            "Constant bit sizes don't match");
4614 
4615     // Don't split if we don't allow undef bits.
4616     bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
4617     if (UndefSrcElts.getBoolValue() && !AllowUndefs)
4618       return false;
4619 
4620     // If we're already the right size, don't bother bitcasting.
4621     if (NumSrcElts == NumElts) {
4622       UndefElts = UndefSrcElts;
4623       EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
4624       return true;
4625     }
4626 
4627     // Extract all the undef/constant element data and pack into single bitsets.
4628     APInt UndefBits(SizeInBits, 0);
4629     APInt MaskBits(SizeInBits, 0);
4630 
4631     for (unsigned i = 0; i != NumSrcElts; ++i) {
4632       unsigned BitOffset = i * SrcEltSizeInBits;
4633       if (UndefSrcElts[i])
4634         UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
4635       MaskBits.insertBits(SrcEltBits[i], BitOffset);
4636     }
4637 
4638     // Split the undef/constant single bitset data into the target elements.
4639     UndefElts = APInt(NumElts, 0);
4640     EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
4641 
4642     for (unsigned i = 0; i != NumElts; ++i) {
4643       unsigned BitOffset = i * EltSizeInBits;
4644       APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
4645 
4646       // Only treat an element as UNDEF if all bits are UNDEF.
4647       if (UndefEltBits.isAllOnes()) {
4648         if (!AllowWholeUndefs)
4649           return false;
4650         UndefElts.setBit(i);
4651         continue;
4652       }
4653 
4654       // If only some bits are UNDEF then treat them as zero (or bail if not
4655       // supported).
4656       if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
4657         return false;
4658 
4659       EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
4660     }
4661     return true;
4662   };
4663 
4664   // Collect constant bits and insert into mask/undef bit masks.
4665   auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
4666                                 unsigned UndefBitIndex) {
4667     if (!Cst)
4668       return false;
4669     if (isa<UndefValue>(Cst)) {
4670       Undefs.setBit(UndefBitIndex);
4671       return true;
4672     }
4673     if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
4674       Mask = CInt->getValue();
4675       return true;
4676     }
4677     if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
4678       Mask = CFP->getValueAPF().bitcastToAPInt();
4679       return true;
4680     }
4681     if (auto *CDS = dyn_cast<ConstantDataSequential>(Cst)) {
4682       Type *Ty = CDS->getType();
4683       Mask = APInt::getZero(Ty->getPrimitiveSizeInBits());
4684       Type *EltTy = CDS->getElementType();
4685       bool IsInteger = EltTy->isIntegerTy();
4686       bool IsFP =
4687           EltTy->isHalfTy() || EltTy->isFloatTy() || EltTy->isDoubleTy();
4688       if (!IsInteger && !IsFP)
4689         return false;
4690       unsigned EltBits = EltTy->getPrimitiveSizeInBits();
4691       for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I)
4692         if (IsInteger)
4693           Mask.insertBits(CDS->getElementAsAPInt(I), I * EltBits);
4694         else
4695           Mask.insertBits(CDS->getElementAsAPFloat(I).bitcastToAPInt(),
4696                           I * EltBits);
4697       return true;
4698     }
4699     return false;
4700   };
4701 
4702   // Handle UNDEFs.
4703   if (Op.isUndef()) {
4704     APInt UndefSrcElts = APInt::getAllOnes(NumElts);
4705     SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
4706     return CastBitData(UndefSrcElts, SrcEltBits);
4707   }
4708 
4709   // Extract scalar constant bits.
4710   if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
4711     APInt UndefSrcElts = APInt::getZero(1);
4712     SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
4713     return CastBitData(UndefSrcElts, SrcEltBits);
4714   }
4715   if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
4716     APInt UndefSrcElts = APInt::getZero(1);
4717     APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
4718     SmallVector<APInt, 64> SrcEltBits(1, RawBits);
4719     return CastBitData(UndefSrcElts, SrcEltBits);
4720   }
4721 
4722   // Extract constant bits from build vector.
4723   if (auto *BV = dyn_cast<BuildVectorSDNode>(Op)) {
4724     BitVector Undefs;
4725     SmallVector<APInt> SrcEltBits;
4726     unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
4727     if (BV->getConstantRawBits(true, SrcEltSizeInBits, SrcEltBits, Undefs)) {
4728       APInt UndefSrcElts = APInt::getZero(SrcEltBits.size());
4729       for (unsigned I = 0, E = SrcEltBits.size(); I != E; ++I)
4730         if (Undefs[I])
4731           UndefSrcElts.setBit(I);
4732       return CastBitData(UndefSrcElts, SrcEltBits);
4733     }
4734   }
4735 
4736   // Extract constant bits from constant pool vector.
4737   if (auto *Cst = getTargetConstantFromNode(Op)) {
4738     Type *CstTy = Cst->getType();
4739     unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
4740     if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
4741       return false;
4742 
4743     unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
4744     unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
4745     if ((SizeInBits % SrcEltSizeInBits) != 0)
4746       return false;
4747 
4748     APInt UndefSrcElts(NumSrcElts, 0);
4749     SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
4750     for (unsigned i = 0; i != NumSrcElts; ++i)
4751       if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
4752                                UndefSrcElts, i))
4753         return false;
4754 
4755     return CastBitData(UndefSrcElts, SrcEltBits);
4756   }
4757 
4758   // Extract constant bits from a broadcasted constant pool scalar.
4759   if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
4760       EltSizeInBits <= VT.getScalarSizeInBits()) {
4761     auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
4762     if (MemIntr->getMemoryVT().getStoreSizeInBits() != VT.getScalarSizeInBits())
4763       return false;
4764 
4765     SDValue Ptr = MemIntr->getBasePtr();
4766     if (const Constant *C = getTargetConstantFromBasePtr(Ptr)) {
4767       unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
4768       unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
4769 
4770       APInt UndefSrcElts(NumSrcElts, 0);
4771       SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
4772       if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
4773         if (UndefSrcElts[0])
4774           UndefSrcElts.setBits(0, NumSrcElts);
4775         if (SrcEltBits[0].getBitWidth() != SrcEltSizeInBits)
4776           SrcEltBits[0] = SrcEltBits[0].trunc(SrcEltSizeInBits);
4777         SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
4778         return CastBitData(UndefSrcElts, SrcEltBits);
4779       }
4780     }
4781   }
4782 
4783   // Extract constant bits from a subvector broadcast.
4784   if (Op.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
4785     auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
4786     SDValue Ptr = MemIntr->getBasePtr();
4787     // The source constant may be larger than the subvector broadcast,
4788     // ensure we extract the correct subvector constants.
4789     if (const Constant *Cst = getTargetConstantFromBasePtr(Ptr)) {
4790       Type *CstTy = Cst->getType();
4791       unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
4792       unsigned SubVecSizeInBits = MemIntr->getMemoryVT().getStoreSizeInBits();
4793       if (!CstTy->isVectorTy() || (CstSizeInBits % SubVecSizeInBits) != 0 ||
4794           (SizeInBits % SubVecSizeInBits) != 0)
4795         return false;
4796       unsigned CstEltSizeInBits = CstTy->getScalarSizeInBits();
4797       unsigned NumSubElts = SubVecSizeInBits / CstEltSizeInBits;
4798       unsigned NumSubVecs = SizeInBits / SubVecSizeInBits;
4799       APInt UndefSubElts(NumSubElts, 0);
4800       SmallVector<APInt, 64> SubEltBits(NumSubElts * NumSubVecs,
4801                                         APInt(CstEltSizeInBits, 0));
4802       for (unsigned i = 0; i != NumSubElts; ++i) {
4803         if (!CollectConstantBits(Cst->getAggregateElement(i), SubEltBits[i],
4804                                  UndefSubElts, i))
4805           return false;
4806         for (unsigned j = 1; j != NumSubVecs; ++j)
4807           SubEltBits[i + (j * NumSubElts)] = SubEltBits[i];
4808       }
4809       UndefSubElts = APInt::getSplat(NumSubVecs * UndefSubElts.getBitWidth(),
4810                                      UndefSubElts);
4811       return CastBitData(UndefSubElts, SubEltBits);
4812     }
4813   }
4814 
4815   // Extract a rematerialized scalar constant insertion.
4816   if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
4817       Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
4818       isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
4819     unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
4820     unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
4821 
4822     APInt UndefSrcElts(NumSrcElts, 0);
4823     SmallVector<APInt, 64> SrcEltBits;
4824     auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
4825     SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
4826     SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
4827     return CastBitData(UndefSrcElts, SrcEltBits);
4828   }
4829 
4830   // Insert constant bits from a base and sub vector sources.
4831   if (Op.getOpcode() == ISD::INSERT_SUBVECTOR) {
4832     // If bitcasts to larger elements we might lose track of undefs - don't
4833     // allow any to be safe.
4834     unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
4835     bool AllowUndefs = EltSizeInBits >= SrcEltSizeInBits;
4836 
4837     APInt UndefSrcElts, UndefSubElts;
4838     SmallVector<APInt, 32> EltSrcBits, EltSubBits;
4839     if (getTargetConstantBitsFromNode(Op.getOperand(1), SrcEltSizeInBits,
4840                                       UndefSubElts, EltSubBits,
4841                                       AllowWholeUndefs && AllowUndefs,
4842                                       AllowPartialUndefs && AllowUndefs) &&
4843         getTargetConstantBitsFromNode(Op.getOperand(0), SrcEltSizeInBits,
4844                                       UndefSrcElts, EltSrcBits,
4845                                       AllowWholeUndefs && AllowUndefs,
4846                                       AllowPartialUndefs && AllowUndefs)) {
4847       unsigned BaseIdx = Op.getConstantOperandVal(2);
4848       UndefSrcElts.insertBits(UndefSubElts, BaseIdx);
4849       for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
4850         EltSrcBits[BaseIdx + i] = EltSubBits[i];
4851       return CastBitData(UndefSrcElts, EltSrcBits);
4852     }
4853   }
4854 
4855   // Extract constant bits from a subvector's source.
4856   if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
4857     // TODO - support extract_subvector through bitcasts.
4858     if (EltSizeInBits != VT.getScalarSizeInBits())
4859       return false;
4860 
4861     if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
4862                                       UndefElts, EltBits, AllowWholeUndefs,
4863                                       AllowPartialUndefs)) {
4864       EVT SrcVT = Op.getOperand(0).getValueType();
4865       unsigned NumSrcElts = SrcVT.getVectorNumElements();
4866       unsigned NumSubElts = VT.getVectorNumElements();
4867       unsigned BaseIdx = Op.getConstantOperandVal(1);
4868       UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
4869       if ((BaseIdx + NumSubElts) != NumSrcElts)
4870         EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
4871       if (BaseIdx != 0)
4872         EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
4873       return true;
4874     }
4875   }
4876 
4877   // Extract constant bits from shuffle node sources.
4878   if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
4879     // TODO - support shuffle through bitcasts.
4880     if (EltSizeInBits != VT.getScalarSizeInBits())
4881       return false;
4882 
4883     ArrayRef<int> Mask = SVN->getMask();
4884     if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
4885         llvm::any_of(Mask, [](int M) { return M < 0; }))
4886       return false;
4887 
4888     APInt UndefElts0, UndefElts1;
4889     SmallVector<APInt, 32> EltBits0, EltBits1;
4890     if (isAnyInRange(Mask, 0, NumElts) &&
4891         !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
4892                                        UndefElts0, EltBits0, AllowWholeUndefs,
4893                                        AllowPartialUndefs))
4894       return false;
4895     if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
4896         !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
4897                                        UndefElts1, EltBits1, AllowWholeUndefs,
4898                                        AllowPartialUndefs))
4899       return false;
4900 
4901     UndefElts = APInt::getZero(NumElts);
4902     for (int i = 0; i != (int)NumElts; ++i) {
4903       int M = Mask[i];
4904       if (M < 0) {
4905         UndefElts.setBit(i);
4906         EltBits.push_back(APInt::getZero(EltSizeInBits));
4907       } else if (M < (int)NumElts) {
4908         if (UndefElts0[M])
4909           UndefElts.setBit(i);
4910         EltBits.push_back(EltBits0[M]);
4911       } else {
4912         if (UndefElts1[M - NumElts])
4913           UndefElts.setBit(i);
4914         EltBits.push_back(EltBits1[M - NumElts]);
4915       }
4916     }
4917     return true;
4918   }
4919 
4920   return false;
4921 }
4922 
4923 namespace llvm {
4924 namespace X86 {
4925 bool isConstantSplat(SDValue Op, APInt &SplatVal, bool AllowPartialUndefs) {
4926   APInt UndefElts;
4927   SmallVector<APInt, 16> EltBits;
4928   if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
4929                                     UndefElts, EltBits, true,
4930                                     AllowPartialUndefs)) {
4931     int SplatIndex = -1;
4932     for (int i = 0, e = EltBits.size(); i != e; ++i) {
4933       if (UndefElts[i])
4934         continue;
4935       if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
4936         SplatIndex = -1;
4937         break;
4938       }
4939       SplatIndex = i;
4940     }
4941     if (0 <= SplatIndex) {
4942       SplatVal = EltBits[SplatIndex];
4943       return true;
4944     }
4945   }
4946 
4947   return false;
4948 }
4949 } // namespace X86
4950 } // namespace llvm
4951 
4952 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
4953                                         unsigned MaskEltSizeInBits,
4954                                         SmallVectorImpl<uint64_t> &RawMask,
4955                                         APInt &UndefElts) {
4956   // Extract the raw target constant bits.
4957   SmallVector<APInt, 64> EltBits;
4958   if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
4959                                      EltBits, /* AllowWholeUndefs */ true,
4960                                      /* AllowPartialUndefs */ false))
4961     return false;
4962 
4963   // Insert the extracted elements into the mask.
4964   for (const APInt &Elt : EltBits)
4965     RawMask.push_back(Elt.getZExtValue());
4966 
4967   return true;
4968 }
4969 
4970 // Match not(xor X, -1) -> X.
4971 // Match not(pcmpgt(C, X)) -> pcmpgt(X, C - 1).
4972 // Match not(extract_subvector(xor X, -1)) -> extract_subvector(X).
4973 // Match not(concat_vectors(xor X, -1, xor Y, -1)) -> concat_vectors(X, Y).
4974 static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
4975   V = peekThroughBitcasts(V);
4976   if (V.getOpcode() == ISD::XOR &&
4977       (ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()) ||
4978        isAllOnesConstant(V.getOperand(1))))
4979     return V.getOperand(0);
4980   if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4981       (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
4982     if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
4983       Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
4984       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
4985                          Not, V.getOperand(1));
4986     }
4987   }
4988   if (V.getOpcode() == X86ISD::PCMPGT &&
4989       !ISD::isBuildVectorAllZeros(V.getOperand(0).getNode()) &&
4990       !ISD::isBuildVectorAllOnes(V.getOperand(0).getNode()) &&
4991       V.getOperand(0).hasOneUse()) {
4992     APInt UndefElts;
4993     SmallVector<APInt> EltBits;
4994     if (getTargetConstantBitsFromNode(V.getOperand(0),
4995                                       V.getScalarValueSizeInBits(), UndefElts,
4996                                       EltBits)) {
4997       // Don't fold min_signed_value -> (min_signed_value - 1)
4998       bool MinSigned = false;
4999       for (APInt &Elt : EltBits) {
5000         MinSigned |= Elt.isMinSignedValue();
5001         Elt -= 1;
5002       }
5003       if (!MinSigned) {
5004         SDLoc DL(V);
5005         MVT VT = V.getSimpleValueType();
5006         return DAG.getNode(X86ISD::PCMPGT, DL, VT, V.getOperand(1),
5007                            getConstVector(EltBits, UndefElts, VT, DAG, DL));
5008       }
5009     }
5010   }
5011   SmallVector<SDValue, 2> CatOps;
5012   if (collectConcatOps(V.getNode(), CatOps, DAG)) {
5013     for (SDValue &CatOp : CatOps) {
5014       SDValue NotCat = IsNOT(CatOp, DAG);
5015       if (!NotCat) return SDValue();
5016       CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
5017     }
5018     return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
5019   }
5020   return SDValue();
5021 }
5022 
5023 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
5024 /// A multi-stage pack shuffle mask is created by specifying NumStages > 1.
5025 /// Note: This ignores saturation, so inputs must be checked first.
5026 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
5027                                   bool Unary, unsigned NumStages = 1) {
5028   assert(Mask.empty() && "Expected an empty shuffle mask vector");
5029   unsigned NumElts = VT.getVectorNumElements();
5030   unsigned NumLanes = VT.getSizeInBits() / 128;
5031   unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
5032   unsigned Offset = Unary ? 0 : NumElts;
5033   unsigned Repetitions = 1u << (NumStages - 1);
5034   unsigned Increment = 1u << NumStages;
5035   assert((NumEltsPerLane >> NumStages) > 0 && "Illegal packing compaction");
5036 
5037   for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
5038     for (unsigned Stage = 0; Stage != Repetitions; ++Stage) {
5039       for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
5040         Mask.push_back(Elt + (Lane * NumEltsPerLane));
5041       for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
5042         Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
5043     }
5044   }
5045 }
5046 
5047 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
5048 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
5049                                 APInt &DemandedLHS, APInt &DemandedRHS) {
5050   int NumLanes = VT.getSizeInBits() / 128;
5051   int NumElts = DemandedElts.getBitWidth();
5052   int NumInnerElts = NumElts / 2;
5053   int NumEltsPerLane = NumElts / NumLanes;
5054   int NumInnerEltsPerLane = NumInnerElts / NumLanes;
5055 
5056   DemandedLHS = APInt::getZero(NumInnerElts);
5057   DemandedRHS = APInt::getZero(NumInnerElts);
5058 
5059   // Map DemandedElts to the packed operands.
5060   for (int Lane = 0; Lane != NumLanes; ++Lane) {
5061     for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
5062       int OuterIdx = (Lane * NumEltsPerLane) + Elt;
5063       int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
5064       if (DemandedElts[OuterIdx])
5065         DemandedLHS.setBit(InnerIdx);
5066       if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
5067         DemandedRHS.setBit(InnerIdx);
5068     }
5069   }
5070 }
5071 
5072 // Split the demanded elts of a HADD/HSUB node between its operands.
5073 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
5074                                  APInt &DemandedLHS, APInt &DemandedRHS) {
5075   int NumLanes = VT.getSizeInBits() / 128;
5076   int NumElts = DemandedElts.getBitWidth();
5077   int NumEltsPerLane = NumElts / NumLanes;
5078   int HalfEltsPerLane = NumEltsPerLane / 2;
5079 
5080   DemandedLHS = APInt::getZero(NumElts);
5081   DemandedRHS = APInt::getZero(NumElts);
5082 
5083   // Map DemandedElts to the horizontal operands.
5084   for (int Idx = 0; Idx != NumElts; ++Idx) {
5085     if (!DemandedElts[Idx])
5086       continue;
5087     int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
5088     int LocalIdx = Idx % NumEltsPerLane;
5089     if (LocalIdx < HalfEltsPerLane) {
5090       DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
5091       DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
5092     } else {
5093       LocalIdx -= HalfEltsPerLane;
5094       DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
5095       DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
5096     }
5097   }
5098 }
5099 
5100 /// Calculates the shuffle mask corresponding to the target-specific opcode.
5101 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
5102 /// operands in \p Ops, and returns true.
5103 /// Sets \p IsUnary to true if only one source is used. Note that this will set
5104 /// IsUnary for shuffles which use a single input multiple times, and in those
5105 /// cases it will adjust the mask to only have indices within that single input.
5106 /// It is an error to call this with non-empty Mask/Ops vectors.
5107 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
5108                                  SmallVectorImpl<SDValue> &Ops,
5109                                  SmallVectorImpl<int> &Mask, bool &IsUnary) {
5110   unsigned NumElems = VT.getVectorNumElements();
5111   unsigned MaskEltSize = VT.getScalarSizeInBits();
5112   SmallVector<uint64_t, 32> RawMask;
5113   APInt RawUndefs;
5114   uint64_t ImmN;
5115 
5116   assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
5117   assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
5118 
5119   IsUnary = false;
5120   bool IsFakeUnary = false;
5121   switch (N->getOpcode()) {
5122   case X86ISD::BLENDI:
5123     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5124     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5125     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5126     DecodeBLENDMask(NumElems, ImmN, Mask);
5127     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5128     break;
5129   case X86ISD::SHUFP:
5130     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5131     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5132     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5133     DecodeSHUFPMask(NumElems, MaskEltSize, ImmN, Mask);
5134     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5135     break;
5136   case X86ISD::INSERTPS:
5137     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5138     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5139     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5140     DecodeINSERTPSMask(ImmN, Mask);
5141     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5142     break;
5143   case X86ISD::EXTRQI:
5144     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5145     if (isa<ConstantSDNode>(N->getOperand(1)) &&
5146         isa<ConstantSDNode>(N->getOperand(2))) {
5147       int BitLen = N->getConstantOperandVal(1);
5148       int BitIdx = N->getConstantOperandVal(2);
5149       DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
5150       IsUnary = true;
5151     }
5152     break;
5153   case X86ISD::INSERTQI:
5154     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5155     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5156     if (isa<ConstantSDNode>(N->getOperand(2)) &&
5157         isa<ConstantSDNode>(N->getOperand(3))) {
5158       int BitLen = N->getConstantOperandVal(2);
5159       int BitIdx = N->getConstantOperandVal(3);
5160       DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
5161       IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5162     }
5163     break;
5164   case X86ISD::UNPCKH:
5165     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5166     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5167     DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
5168     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5169     break;
5170   case X86ISD::UNPCKL:
5171     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5172     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5173     DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
5174     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5175     break;
5176   case X86ISD::MOVHLPS:
5177     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5178     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5179     DecodeMOVHLPSMask(NumElems, Mask);
5180     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5181     break;
5182   case X86ISD::MOVLHPS:
5183     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5184     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5185     DecodeMOVLHPSMask(NumElems, Mask);
5186     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5187     break;
5188   case X86ISD::VALIGN:
5189     assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
5190            "Only 32-bit and 64-bit elements are supported!");
5191     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5192     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5193     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5194     DecodeVALIGNMask(NumElems, ImmN, Mask);
5195     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5196     Ops.push_back(N->getOperand(1));
5197     Ops.push_back(N->getOperand(0));
5198     break;
5199   case X86ISD::PALIGNR:
5200     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5201     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5202     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5203     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5204     DecodePALIGNRMask(NumElems, ImmN, Mask);
5205     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5206     Ops.push_back(N->getOperand(1));
5207     Ops.push_back(N->getOperand(0));
5208     break;
5209   case X86ISD::VSHLDQ:
5210     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5211     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5212     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5213     DecodePSLLDQMask(NumElems, ImmN, Mask);
5214     IsUnary = true;
5215     break;
5216   case X86ISD::VSRLDQ:
5217     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5218     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5219     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5220     DecodePSRLDQMask(NumElems, ImmN, Mask);
5221     IsUnary = true;
5222     break;
5223   case X86ISD::PSHUFD:
5224   case X86ISD::VPERMILPI:
5225     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5226     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5227     DecodePSHUFMask(NumElems, MaskEltSize, ImmN, Mask);
5228     IsUnary = true;
5229     break;
5230   case X86ISD::PSHUFHW:
5231     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5232     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5233     DecodePSHUFHWMask(NumElems, ImmN, Mask);
5234     IsUnary = true;
5235     break;
5236   case X86ISD::PSHUFLW:
5237     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5238     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5239     DecodePSHUFLWMask(NumElems, ImmN, Mask);
5240     IsUnary = true;
5241     break;
5242   case X86ISD::VZEXT_MOVL:
5243     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5244     DecodeZeroMoveLowMask(NumElems, Mask);
5245     IsUnary = true;
5246     break;
5247   case X86ISD::VBROADCAST:
5248     // We only decode broadcasts of same-sized vectors, peeking through to
5249     // extracted subvectors is likely to cause hasOneUse issues with
5250     // SimplifyDemandedBits etc.
5251     if (N->getOperand(0).getValueType() == VT) {
5252       DecodeVectorBroadcast(NumElems, Mask);
5253       IsUnary = true;
5254       break;
5255     }
5256     return false;
5257   case X86ISD::VPERMILPV: {
5258     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5259     IsUnary = true;
5260     SDValue MaskNode = N->getOperand(1);
5261     if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
5262                                     RawUndefs)) {
5263       DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
5264       break;
5265     }
5266     return false;
5267   }
5268   case X86ISD::PSHUFB: {
5269     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5270     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5271     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5272     IsUnary = true;
5273     SDValue MaskNode = N->getOperand(1);
5274     if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
5275       DecodePSHUFBMask(RawMask, RawUndefs, Mask);
5276       break;
5277     }
5278     return false;
5279   }
5280   case X86ISD::VPERMI:
5281     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5282     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5283     DecodeVPERMMask(NumElems, ImmN, Mask);
5284     IsUnary = true;
5285     break;
5286   case X86ISD::MOVSS:
5287   case X86ISD::MOVSD:
5288   case X86ISD::MOVSH:
5289     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5290     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5291     DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
5292     break;
5293   case X86ISD::VPERM2X128:
5294     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5295     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5296     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5297     DecodeVPERM2X128Mask(NumElems, ImmN, Mask);
5298     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5299     break;
5300   case X86ISD::SHUF128:
5301     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5302     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5303     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5304     decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize, ImmN, Mask);
5305     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5306     break;
5307   case X86ISD::MOVSLDUP:
5308     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5309     DecodeMOVSLDUPMask(NumElems, Mask);
5310     IsUnary = true;
5311     break;
5312   case X86ISD::MOVSHDUP:
5313     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5314     DecodeMOVSHDUPMask(NumElems, Mask);
5315     IsUnary = true;
5316     break;
5317   case X86ISD::MOVDDUP:
5318     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5319     DecodeMOVDDUPMask(NumElems, Mask);
5320     IsUnary = true;
5321     break;
5322   case X86ISD::VPERMIL2: {
5323     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5324     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5325     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5326     SDValue MaskNode = N->getOperand(2);
5327     SDValue CtrlNode = N->getOperand(3);
5328     if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
5329       unsigned CtrlImm = CtrlOp->getZExtValue();
5330       if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
5331                                       RawUndefs)) {
5332         DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
5333                             Mask);
5334         break;
5335       }
5336     }
5337     return false;
5338   }
5339   case X86ISD::VPPERM: {
5340     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5341     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5342     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5343     SDValue MaskNode = N->getOperand(2);
5344     if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
5345       DecodeVPPERMMask(RawMask, RawUndefs, Mask);
5346       break;
5347     }
5348     return false;
5349   }
5350   case X86ISD::VPERMV: {
5351     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5352     IsUnary = true;
5353     // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
5354     Ops.push_back(N->getOperand(1));
5355     SDValue MaskNode = N->getOperand(0);
5356     if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
5357                                     RawUndefs)) {
5358       DecodeVPERMVMask(RawMask, RawUndefs, Mask);
5359       break;
5360     }
5361     return false;
5362   }
5363   case X86ISD::VPERMV3: {
5364     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5365     assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
5366     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
5367     // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
5368     Ops.push_back(N->getOperand(0));
5369     Ops.push_back(N->getOperand(2));
5370     SDValue MaskNode = N->getOperand(1);
5371     if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
5372                                     RawUndefs)) {
5373       DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
5374       break;
5375     }
5376     return false;
5377   }
5378   default: llvm_unreachable("unknown target shuffle node");
5379   }
5380 
5381   // Empty mask indicates the decode failed.
5382   if (Mask.empty())
5383     return false;
5384 
5385   // Check if we're getting a shuffle mask with zero'd elements.
5386   if (!AllowSentinelZero && isAnyZero(Mask))
5387     return false;
5388 
5389   // If we have a fake unary shuffle, the shuffle mask is spread across two
5390   // inputs that are actually the same node. Re-map the mask to always point
5391   // into the first input.
5392   if (IsFakeUnary)
5393     for (int &M : Mask)
5394       if (M >= (int)Mask.size())
5395         M -= Mask.size();
5396 
5397   // If we didn't already add operands in the opcode-specific code, default to
5398   // adding 1 or 2 operands starting at 0.
5399   if (Ops.empty()) {
5400     Ops.push_back(N->getOperand(0));
5401     if (!IsUnary || IsFakeUnary)
5402       Ops.push_back(N->getOperand(1));
5403   }
5404 
5405   return true;
5406 }
5407 
5408 // Wrapper for getTargetShuffleMask with InUnary;
5409 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
5410                                  SmallVectorImpl<SDValue> &Ops,
5411                                  SmallVectorImpl<int> &Mask) {
5412   bool IsUnary;
5413   return getTargetShuffleMask(N, VT, AllowSentinelZero, Ops, Mask, IsUnary);
5414 }
5415 
5416 /// Compute whether each element of a shuffle is zeroable.
5417 ///
5418 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
5419 /// Either it is an undef element in the shuffle mask, the element of the input
5420 /// referenced is undef, or the element of the input referenced is known to be
5421 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
5422 /// as many lanes with this technique as possible to simplify the remaining
5423 /// shuffle.
5424 static void computeZeroableShuffleElements(ArrayRef<int> Mask,
5425                                            SDValue V1, SDValue V2,
5426                                            APInt &KnownUndef, APInt &KnownZero) {
5427   int Size = Mask.size();
5428   KnownUndef = KnownZero = APInt::getZero(Size);
5429 
5430   V1 = peekThroughBitcasts(V1);
5431   V2 = peekThroughBitcasts(V2);
5432 
5433   bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
5434   bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
5435 
5436   int VectorSizeInBits = V1.getValueSizeInBits();
5437   int ScalarSizeInBits = VectorSizeInBits / Size;
5438   assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
5439 
5440   for (int i = 0; i < Size; ++i) {
5441     int M = Mask[i];
5442     // Handle the easy cases.
5443     if (M < 0) {
5444       KnownUndef.setBit(i);
5445       continue;
5446     }
5447     if ((M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
5448       KnownZero.setBit(i);
5449       continue;
5450     }
5451 
5452     // Determine shuffle input and normalize the mask.
5453     SDValue V = M < Size ? V1 : V2;
5454     M %= Size;
5455 
5456     // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
5457     if (V.getOpcode() != ISD::BUILD_VECTOR)
5458       continue;
5459 
5460     // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
5461     // the (larger) source element must be UNDEF/ZERO.
5462     if ((Size % V.getNumOperands()) == 0) {
5463       int Scale = Size / V->getNumOperands();
5464       SDValue Op = V.getOperand(M / Scale);
5465       if (Op.isUndef())
5466         KnownUndef.setBit(i);
5467       if (X86::isZeroNode(Op))
5468         KnownZero.setBit(i);
5469       else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
5470         APInt Val = Cst->getAPIntValue();
5471         Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
5472         if (Val == 0)
5473           KnownZero.setBit(i);
5474       } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
5475         APInt Val = Cst->getValueAPF().bitcastToAPInt();
5476         Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
5477         if (Val == 0)
5478           KnownZero.setBit(i);
5479       }
5480       continue;
5481     }
5482 
5483     // If the BUILD_VECTOR has more elements then all the (smaller) source
5484     // elements must be UNDEF or ZERO.
5485     if ((V.getNumOperands() % Size) == 0) {
5486       int Scale = V->getNumOperands() / Size;
5487       bool AllUndef = true;
5488       bool AllZero = true;
5489       for (int j = 0; j < Scale; ++j) {
5490         SDValue Op = V.getOperand((M * Scale) + j);
5491         AllUndef &= Op.isUndef();
5492         AllZero &= X86::isZeroNode(Op);
5493       }
5494       if (AllUndef)
5495         KnownUndef.setBit(i);
5496       if (AllZero)
5497         KnownZero.setBit(i);
5498       continue;
5499     }
5500   }
5501 }
5502 
5503 /// Decode a target shuffle mask and inputs and see if any values are
5504 /// known to be undef or zero from their inputs.
5505 /// Returns true if the target shuffle mask was decoded.
5506 /// FIXME: Merge this with computeZeroableShuffleElements?
5507 static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
5508                                          SmallVectorImpl<SDValue> &Ops,
5509                                          APInt &KnownUndef, APInt &KnownZero) {
5510   bool IsUnary;
5511   if (!isTargetShuffle(N.getOpcode()))
5512     return false;
5513 
5514   MVT VT = N.getSimpleValueType();
5515   if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
5516     return false;
5517 
5518   int Size = Mask.size();
5519   SDValue V1 = Ops[0];
5520   SDValue V2 = IsUnary ? V1 : Ops[1];
5521   KnownUndef = KnownZero = APInt::getZero(Size);
5522 
5523   V1 = peekThroughBitcasts(V1);
5524   V2 = peekThroughBitcasts(V2);
5525 
5526   assert((VT.getSizeInBits() % Size) == 0 &&
5527          "Illegal split of shuffle value type");
5528   unsigned EltSizeInBits = VT.getSizeInBits() / Size;
5529 
5530   // Extract known constant input data.
5531   APInt UndefSrcElts[2];
5532   SmallVector<APInt, 32> SrcEltBits[2];
5533   bool IsSrcConstant[2] = {
5534       getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
5535                                     SrcEltBits[0], true, false),
5536       getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
5537                                     SrcEltBits[1], true, false)};
5538 
5539   for (int i = 0; i < Size; ++i) {
5540     int M = Mask[i];
5541 
5542     // Already decoded as SM_SentinelZero / SM_SentinelUndef.
5543     if (M < 0) {
5544       assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
5545       if (SM_SentinelUndef == M)
5546         KnownUndef.setBit(i);
5547       if (SM_SentinelZero == M)
5548         KnownZero.setBit(i);
5549       continue;
5550     }
5551 
5552     // Determine shuffle input and normalize the mask.
5553     unsigned SrcIdx = M / Size;
5554     SDValue V = M < Size ? V1 : V2;
5555     M %= Size;
5556 
5557     // We are referencing an UNDEF input.
5558     if (V.isUndef()) {
5559       KnownUndef.setBit(i);
5560       continue;
5561     }
5562 
5563     // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
5564     // TODO: We currently only set UNDEF for integer types - floats use the same
5565     // registers as vectors and many of the scalar folded loads rely on the
5566     // SCALAR_TO_VECTOR pattern.
5567     if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
5568         (Size % V.getValueType().getVectorNumElements()) == 0) {
5569       int Scale = Size / V.getValueType().getVectorNumElements();
5570       int Idx = M / Scale;
5571       if (Idx != 0 && !VT.isFloatingPoint())
5572         KnownUndef.setBit(i);
5573       else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
5574         KnownZero.setBit(i);
5575       continue;
5576     }
5577 
5578     // INSERT_SUBVECTOR - to widen vectors we often insert them into UNDEF
5579     // base vectors.
5580     if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
5581       SDValue Vec = V.getOperand(0);
5582       int NumVecElts = Vec.getValueType().getVectorNumElements();
5583       if (Vec.isUndef() && Size == NumVecElts) {
5584         int Idx = V.getConstantOperandVal(2);
5585         int NumSubElts = V.getOperand(1).getValueType().getVectorNumElements();
5586         if (M < Idx || (Idx + NumSubElts) <= M)
5587           KnownUndef.setBit(i);
5588       }
5589       continue;
5590     }
5591 
5592     // Attempt to extract from the source's constant bits.
5593     if (IsSrcConstant[SrcIdx]) {
5594       if (UndefSrcElts[SrcIdx][M])
5595         KnownUndef.setBit(i);
5596       else if (SrcEltBits[SrcIdx][M] == 0)
5597         KnownZero.setBit(i);
5598     }
5599   }
5600 
5601   assert(VT.getVectorNumElements() == (unsigned)Size &&
5602          "Different mask size from vector size!");
5603   return true;
5604 }
5605 
5606 // Replace target shuffle mask elements with known undef/zero sentinels.
5607 static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
5608                                               const APInt &KnownUndef,
5609                                               const APInt &KnownZero,
5610                                               bool ResolveKnownZeros= true) {
5611   unsigned NumElts = Mask.size();
5612   assert(KnownUndef.getBitWidth() == NumElts &&
5613          KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
5614 
5615   for (unsigned i = 0; i != NumElts; ++i) {
5616     if (KnownUndef[i])
5617       Mask[i] = SM_SentinelUndef;
5618     else if (ResolveKnownZeros && KnownZero[i])
5619       Mask[i] = SM_SentinelZero;
5620   }
5621 }
5622 
5623 // Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
5624 static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
5625                                               APInt &KnownUndef,
5626                                               APInt &KnownZero) {
5627   unsigned NumElts = Mask.size();
5628   KnownUndef = KnownZero = APInt::getZero(NumElts);
5629 
5630   for (unsigned i = 0; i != NumElts; ++i) {
5631     int M = Mask[i];
5632     if (SM_SentinelUndef == M)
5633       KnownUndef.setBit(i);
5634     if (SM_SentinelZero == M)
5635       KnownZero.setBit(i);
5636   }
5637 }
5638 
5639 // Attempt to create a shuffle mask from a VSELECT/BLENDV condition mask.
5640 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
5641                                          SDValue Cond, bool IsBLENDV = false) {
5642   EVT CondVT = Cond.getValueType();
5643   unsigned EltSizeInBits = CondVT.getScalarSizeInBits();
5644   unsigned NumElts = CondVT.getVectorNumElements();
5645 
5646   APInt UndefElts;
5647   SmallVector<APInt, 32> EltBits;
5648   if (!getTargetConstantBitsFromNode(Cond, EltSizeInBits, UndefElts, EltBits,
5649                                      true, false))
5650     return false;
5651 
5652   Mask.resize(NumElts, SM_SentinelUndef);
5653 
5654   for (int i = 0; i != (int)NumElts; ++i) {
5655     Mask[i] = i;
5656     // Arbitrarily choose from the 2nd operand if the select condition element
5657     // is undef.
5658     // TODO: Can we do better by matching patterns such as even/odd?
5659     if (UndefElts[i] || (!IsBLENDV && EltBits[i].isZero()) ||
5660         (IsBLENDV && EltBits[i].isNonNegative()))
5661       Mask[i] += NumElts;
5662   }
5663 
5664   return true;
5665 }
5666 
5667 // Forward declaration (for getFauxShuffleMask recursive check).
5668 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
5669                                    SmallVectorImpl<SDValue> &Inputs,
5670                                    SmallVectorImpl<int> &Mask,
5671                                    const SelectionDAG &DAG, unsigned Depth,
5672                                    bool ResolveKnownElts);
5673 
5674 // Attempt to decode ops that could be represented as a shuffle mask.
5675 // The decoded shuffle mask may contain a different number of elements to the
5676 // destination value type.
5677 // TODO: Merge into getTargetShuffleInputs()
5678 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
5679                                SmallVectorImpl<int> &Mask,
5680                                SmallVectorImpl<SDValue> &Ops,
5681                                const SelectionDAG &DAG, unsigned Depth,
5682                                bool ResolveKnownElts) {
5683   Mask.clear();
5684   Ops.clear();
5685 
5686   MVT VT = N.getSimpleValueType();
5687   unsigned NumElts = VT.getVectorNumElements();
5688   unsigned NumSizeInBits = VT.getSizeInBits();
5689   unsigned NumBitsPerElt = VT.getScalarSizeInBits();
5690   if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
5691     return false;
5692   assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
5693   unsigned NumSizeInBytes = NumSizeInBits / 8;
5694   unsigned NumBytesPerElt = NumBitsPerElt / 8;
5695 
5696   unsigned Opcode = N.getOpcode();
5697   switch (Opcode) {
5698   case ISD::VECTOR_SHUFFLE: {
5699     // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
5700     ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
5701     if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
5702       Mask.append(ShuffleMask.begin(), ShuffleMask.end());
5703       Ops.push_back(N.getOperand(0));
5704       Ops.push_back(N.getOperand(1));
5705       return true;
5706     }
5707     return false;
5708   }
5709   case ISD::AND:
5710   case X86ISD::ANDNP: {
5711     // Attempt to decode as a per-byte mask.
5712     APInt UndefElts;
5713     SmallVector<APInt, 32> EltBits;
5714     SDValue N0 = N.getOperand(0);
5715     SDValue N1 = N.getOperand(1);
5716     bool IsAndN = (X86ISD::ANDNP == Opcode);
5717     uint64_t ZeroMask = IsAndN ? 255 : 0;
5718     if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
5719       return false;
5720     // We can't assume an undef src element gives an undef dst - the other src
5721     // might be zero.
5722     if (!UndefElts.isZero())
5723       return false;
5724     for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
5725       const APInt &ByteBits = EltBits[i];
5726       if (ByteBits != 0 && ByteBits != 255)
5727         return false;
5728       Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
5729     }
5730     Ops.push_back(IsAndN ? N1 : N0);
5731     return true;
5732   }
5733   case ISD::OR: {
5734     // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
5735     // is a valid shuffle index.
5736     SDValue N0 = peekThroughBitcasts(N.getOperand(0));
5737     SDValue N1 = peekThroughBitcasts(N.getOperand(1));
5738     if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
5739       return false;
5740 
5741     SmallVector<int, 64> SrcMask0, SrcMask1;
5742     SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
5743     APInt Demand0 = APInt::getAllOnes(N0.getValueType().getVectorNumElements());
5744     APInt Demand1 = APInt::getAllOnes(N1.getValueType().getVectorNumElements());
5745     if (!getTargetShuffleInputs(N0, Demand0, SrcInputs0, SrcMask0, DAG,
5746                                 Depth + 1, true) ||
5747         !getTargetShuffleInputs(N1, Demand1, SrcInputs1, SrcMask1, DAG,
5748                                 Depth + 1, true))
5749       return false;
5750 
5751     size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
5752     SmallVector<int, 64> Mask0, Mask1;
5753     narrowShuffleMaskElts(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
5754     narrowShuffleMaskElts(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
5755     for (int i = 0; i != (int)MaskSize; ++i) {
5756       // NOTE: Don't handle SM_SentinelUndef, as we can end up in infinite
5757       // loops converting between OR and BLEND shuffles due to
5758       // canWidenShuffleElements merging away undef elements, meaning we
5759       // fail to recognise the OR as the undef element isn't known zero.
5760       if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
5761         Mask.push_back(SM_SentinelZero);
5762       else if (Mask1[i] == SM_SentinelZero)
5763         Mask.push_back(i);
5764       else if (Mask0[i] == SM_SentinelZero)
5765         Mask.push_back(i + MaskSize);
5766       else
5767         return false;
5768     }
5769     Ops.push_back(N0);
5770     Ops.push_back(N1);
5771     return true;
5772   }
5773   case ISD::INSERT_SUBVECTOR: {
5774     SDValue Src = N.getOperand(0);
5775     SDValue Sub = N.getOperand(1);
5776     EVT SubVT = Sub.getValueType();
5777     unsigned NumSubElts = SubVT.getVectorNumElements();
5778     if (!N->isOnlyUserOf(Sub.getNode()))
5779       return false;
5780     SDValue SubBC = peekThroughBitcasts(Sub);
5781     uint64_t InsertIdx = N.getConstantOperandVal(2);
5782     // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
5783     if (SubBC.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5784         SubBC.getOperand(0).getValueSizeInBits() == NumSizeInBits) {
5785       uint64_t ExtractIdx = SubBC.getConstantOperandVal(1);
5786       SDValue SubBCSrc = SubBC.getOperand(0);
5787       unsigned NumSubSrcBCElts = SubBCSrc.getValueType().getVectorNumElements();
5788       unsigned MaxElts = std::max(NumElts, NumSubSrcBCElts);
5789       assert((MaxElts % NumElts) == 0 && (MaxElts % NumSubSrcBCElts) == 0 &&
5790              "Subvector valuetype mismatch");
5791       InsertIdx *= (MaxElts / NumElts);
5792       ExtractIdx *= (MaxElts / NumSubSrcBCElts);
5793       NumSubElts *= (MaxElts / NumElts);
5794       bool SrcIsUndef = Src.isUndef();
5795       for (int i = 0; i != (int)MaxElts; ++i)
5796         Mask.push_back(SrcIsUndef ? SM_SentinelUndef : i);
5797       for (int i = 0; i != (int)NumSubElts; ++i)
5798         Mask[InsertIdx + i] = (SrcIsUndef ? 0 : MaxElts) + ExtractIdx + i;
5799       if (!SrcIsUndef)
5800         Ops.push_back(Src);
5801       Ops.push_back(SubBCSrc);
5802       return true;
5803     }
5804     // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
5805     SmallVector<int, 64> SubMask;
5806     SmallVector<SDValue, 2> SubInputs;
5807     SDValue SubSrc = peekThroughOneUseBitcasts(Sub);
5808     EVT SubSrcVT = SubSrc.getValueType();
5809     if (!SubSrcVT.isVector())
5810       return false;
5811 
5812     APInt SubDemand = APInt::getAllOnes(SubSrcVT.getVectorNumElements());
5813     if (!getTargetShuffleInputs(SubSrc, SubDemand, SubInputs, SubMask, DAG,
5814                                 Depth + 1, ResolveKnownElts))
5815       return false;
5816 
5817     // Subvector shuffle inputs must not be larger than the subvector.
5818     if (llvm::any_of(SubInputs, [SubVT](SDValue SubInput) {
5819           return SubVT.getFixedSizeInBits() <
5820                  SubInput.getValueSizeInBits().getFixedValue();
5821         }))
5822       return false;
5823 
5824     if (SubMask.size() != NumSubElts) {
5825       assert(((SubMask.size() % NumSubElts) == 0 ||
5826               (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
5827       if ((NumSubElts % SubMask.size()) == 0) {
5828         int Scale = NumSubElts / SubMask.size();
5829         SmallVector<int,64> ScaledSubMask;
5830         narrowShuffleMaskElts(Scale, SubMask, ScaledSubMask);
5831         SubMask = ScaledSubMask;
5832       } else {
5833         int Scale = SubMask.size() / NumSubElts;
5834         NumSubElts = SubMask.size();
5835         NumElts *= Scale;
5836         InsertIdx *= Scale;
5837       }
5838     }
5839     Ops.push_back(Src);
5840     Ops.append(SubInputs.begin(), SubInputs.end());
5841     if (ISD::isBuildVectorAllZeros(Src.getNode()))
5842       Mask.append(NumElts, SM_SentinelZero);
5843     else
5844       for (int i = 0; i != (int)NumElts; ++i)
5845         Mask.push_back(i);
5846     for (int i = 0; i != (int)NumSubElts; ++i) {
5847       int M = SubMask[i];
5848       if (0 <= M) {
5849         int InputIdx = M / NumSubElts;
5850         M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
5851       }
5852       Mask[i + InsertIdx] = M;
5853     }
5854     return true;
5855   }
5856   case X86ISD::PINSRB:
5857   case X86ISD::PINSRW:
5858   case ISD::SCALAR_TO_VECTOR:
5859   case ISD::INSERT_VECTOR_ELT: {
5860     // Match against a insert_vector_elt/scalar_to_vector of an extract from a
5861     // vector, for matching src/dst vector types.
5862     SDValue Scl = N.getOperand(Opcode == ISD::SCALAR_TO_VECTOR ? 0 : 1);
5863 
5864     unsigned DstIdx = 0;
5865     if (Opcode != ISD::SCALAR_TO_VECTOR) {
5866       // Check we have an in-range constant insertion index.
5867       if (!isa<ConstantSDNode>(N.getOperand(2)) ||
5868           N.getConstantOperandAPInt(2).uge(NumElts))
5869         return false;
5870       DstIdx = N.getConstantOperandVal(2);
5871 
5872       // Attempt to recognise an INSERT*(VEC, 0, DstIdx) shuffle pattern.
5873       if (X86::isZeroNode(Scl)) {
5874         Ops.push_back(N.getOperand(0));
5875         for (unsigned i = 0; i != NumElts; ++i)
5876           Mask.push_back(i == DstIdx ? SM_SentinelZero : (int)i);
5877         return true;
5878       }
5879     }
5880 
5881     // Peek through trunc/aext/zext.
5882     // TODO: aext shouldn't require SM_SentinelZero padding.
5883     // TODO: handle shift of scalars.
5884     unsigned MinBitsPerElt = Scl.getScalarValueSizeInBits();
5885     while (Scl.getOpcode() == ISD::TRUNCATE ||
5886            Scl.getOpcode() == ISD::ANY_EXTEND ||
5887            Scl.getOpcode() == ISD::ZERO_EXTEND) {
5888       Scl = Scl.getOperand(0);
5889       MinBitsPerElt =
5890           std::min<unsigned>(MinBitsPerElt, Scl.getScalarValueSizeInBits());
5891     }
5892     if ((MinBitsPerElt % 8) != 0)
5893       return false;
5894 
5895     // Attempt to find the source vector the scalar was extracted from.
5896     SDValue SrcExtract;
5897     if ((Scl.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
5898          Scl.getOpcode() == X86ISD::PEXTRW ||
5899          Scl.getOpcode() == X86ISD::PEXTRB) &&
5900         Scl.getOperand(0).getValueSizeInBits() == NumSizeInBits) {
5901       SrcExtract = Scl;
5902     }
5903     if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
5904       return false;
5905 
5906     SDValue SrcVec = SrcExtract.getOperand(0);
5907     EVT SrcVT = SrcVec.getValueType();
5908     if (!SrcVT.getScalarType().isByteSized())
5909       return false;
5910     unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
5911     unsigned SrcByte = SrcIdx * (SrcVT.getScalarSizeInBits() / 8);
5912     unsigned DstByte = DstIdx * NumBytesPerElt;
5913     MinBitsPerElt =
5914         std::min<unsigned>(MinBitsPerElt, SrcVT.getScalarSizeInBits());
5915 
5916     // Create 'identity' byte level shuffle mask and then add inserted bytes.
5917     if (Opcode == ISD::SCALAR_TO_VECTOR) {
5918       Ops.push_back(SrcVec);
5919       Mask.append(NumSizeInBytes, SM_SentinelUndef);
5920     } else {
5921       Ops.push_back(SrcVec);
5922       Ops.push_back(N.getOperand(0));
5923       for (int i = 0; i != (int)NumSizeInBytes; ++i)
5924         Mask.push_back(NumSizeInBytes + i);
5925     }
5926 
5927     unsigned MinBytesPerElts = MinBitsPerElt / 8;
5928     MinBytesPerElts = std::min(MinBytesPerElts, NumBytesPerElt);
5929     for (unsigned i = 0; i != MinBytesPerElts; ++i)
5930       Mask[DstByte + i] = SrcByte + i;
5931     for (unsigned i = MinBytesPerElts; i < NumBytesPerElt; ++i)
5932       Mask[DstByte + i] = SM_SentinelZero;
5933     return true;
5934   }
5935   case X86ISD::PACKSS:
5936   case X86ISD::PACKUS: {
5937     SDValue N0 = N.getOperand(0);
5938     SDValue N1 = N.getOperand(1);
5939     assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
5940            N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
5941            "Unexpected input value type");
5942 
5943     APInt EltsLHS, EltsRHS;
5944     getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
5945 
5946     // If we know input saturation won't happen (or we don't care for particular
5947     // lanes), we can treat this as a truncation shuffle.
5948     bool Offset0 = false, Offset1 = false;
5949     if (Opcode == X86ISD::PACKSS) {
5950       if ((!(N0.isUndef() || EltsLHS.isZero()) &&
5951            DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
5952           (!(N1.isUndef() || EltsRHS.isZero()) &&
5953            DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
5954         return false;
5955       // We can't easily fold ASHR into a shuffle, but if it was feeding a
5956       // PACKSS then it was likely being used for sign-extension for a
5957       // truncation, so just peek through and adjust the mask accordingly.
5958       if (N0.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N0.getNode()) &&
5959           N0.getConstantOperandAPInt(1) == NumBitsPerElt) {
5960         Offset0 = true;
5961         N0 = N0.getOperand(0);
5962       }
5963       if (N1.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N1.getNode()) &&
5964           N1.getConstantOperandAPInt(1) == NumBitsPerElt) {
5965         Offset1 = true;
5966         N1 = N1.getOperand(0);
5967       }
5968     } else {
5969       APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
5970       if ((!(N0.isUndef() || EltsLHS.isZero()) &&
5971            !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
5972           (!(N1.isUndef() || EltsRHS.isZero()) &&
5973            !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
5974         return false;
5975     }
5976 
5977     bool IsUnary = (N0 == N1);
5978 
5979     Ops.push_back(N0);
5980     if (!IsUnary)
5981       Ops.push_back(N1);
5982 
5983     createPackShuffleMask(VT, Mask, IsUnary);
5984 
5985     if (Offset0 || Offset1) {
5986       for (int &M : Mask)
5987         if ((Offset0 && isInRange(M, 0, NumElts)) ||
5988             (Offset1 && isInRange(M, NumElts, 2 * NumElts)))
5989           ++M;
5990     }
5991     return true;
5992   }
5993   case ISD::VSELECT:
5994   case X86ISD::BLENDV: {
5995     SDValue Cond = N.getOperand(0);
5996     if (createShuffleMaskFromVSELECT(Mask, Cond, Opcode == X86ISD::BLENDV)) {
5997       Ops.push_back(N.getOperand(1));
5998       Ops.push_back(N.getOperand(2));
5999       return true;
6000     }
6001     return false;
6002   }
6003   case X86ISD::VTRUNC: {
6004     SDValue Src = N.getOperand(0);
6005     EVT SrcVT = Src.getValueType();
6006     // Truncated source must be a simple vector.
6007     if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
6008         (SrcVT.getScalarSizeInBits() % 8) != 0)
6009       return false;
6010     unsigned NumSrcElts = SrcVT.getVectorNumElements();
6011     unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
6012     unsigned Scale = NumBitsPerSrcElt / NumBitsPerElt;
6013     assert((NumBitsPerSrcElt % NumBitsPerElt) == 0 && "Illegal truncation");
6014     for (unsigned i = 0; i != NumSrcElts; ++i)
6015       Mask.push_back(i * Scale);
6016     Mask.append(NumElts - NumSrcElts, SM_SentinelZero);
6017     Ops.push_back(Src);
6018     return true;
6019   }
6020   case X86ISD::VSHLI:
6021   case X86ISD::VSRLI: {
6022     uint64_t ShiftVal = N.getConstantOperandVal(1);
6023     // Out of range bit shifts are guaranteed to be zero.
6024     if (NumBitsPerElt <= ShiftVal) {
6025       Mask.append(NumElts, SM_SentinelZero);
6026       return true;
6027     }
6028 
6029     // We can only decode 'whole byte' bit shifts as shuffles.
6030     if ((ShiftVal % 8) != 0)
6031       break;
6032 
6033     uint64_t ByteShift = ShiftVal / 8;
6034     Ops.push_back(N.getOperand(0));
6035 
6036     // Clear mask to all zeros and insert the shifted byte indices.
6037     Mask.append(NumSizeInBytes, SM_SentinelZero);
6038 
6039     if (X86ISD::VSHLI == Opcode) {
6040       for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
6041         for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
6042           Mask[i + j] = i + j - ByteShift;
6043     } else {
6044       for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
6045         for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
6046           Mask[i + j - ByteShift] = i + j;
6047     }
6048     return true;
6049   }
6050   case X86ISD::VROTLI:
6051   case X86ISD::VROTRI: {
6052     // We can only decode 'whole byte' bit rotates as shuffles.
6053     uint64_t RotateVal = N.getConstantOperandAPInt(1).urem(NumBitsPerElt);
6054     if ((RotateVal % 8) != 0)
6055       return false;
6056     Ops.push_back(N.getOperand(0));
6057     int Offset = RotateVal / 8;
6058     Offset = (X86ISD::VROTLI == Opcode ? NumBytesPerElt - Offset : Offset);
6059     for (int i = 0; i != (int)NumElts; ++i) {
6060       int BaseIdx = i * NumBytesPerElt;
6061       for (int j = 0; j != (int)NumBytesPerElt; ++j) {
6062         Mask.push_back(BaseIdx + ((Offset + j) % NumBytesPerElt));
6063       }
6064     }
6065     return true;
6066   }
6067   case X86ISD::VBROADCAST: {
6068     SDValue Src = N.getOperand(0);
6069     if (!Src.getSimpleValueType().isVector()) {
6070       if (Src.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6071           !isNullConstant(Src.getOperand(1)) ||
6072           Src.getOperand(0).getValueType().getScalarType() !=
6073               VT.getScalarType())
6074         return false;
6075       Src = Src.getOperand(0);
6076     }
6077     Ops.push_back(Src);
6078     Mask.append(NumElts, 0);
6079     return true;
6080   }
6081   case ISD::SIGN_EXTEND_VECTOR_INREG: {
6082     SDValue Src = N.getOperand(0);
6083     EVT SrcVT = Src.getValueType();
6084     unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
6085 
6086     // Extended source must be a simple vector.
6087     if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
6088         (NumBitsPerSrcElt % 8) != 0)
6089       return false;
6090 
6091     // We can only handle all-signbits extensions.
6092     APInt DemandedSrcElts =
6093         DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
6094     if (DAG.ComputeNumSignBits(Src, DemandedSrcElts) != NumBitsPerSrcElt)
6095       return false;
6096 
6097     assert((NumBitsPerElt % NumBitsPerSrcElt) == 0 && "Unexpected extension");
6098     unsigned Scale = NumBitsPerElt / NumBitsPerSrcElt;
6099     for (unsigned I = 0; I != NumElts; ++I)
6100       Mask.append(Scale, I);
6101     Ops.push_back(Src);
6102     return true;
6103   }
6104   case ISD::ZERO_EXTEND:
6105   case ISD::ANY_EXTEND:
6106   case ISD::ZERO_EXTEND_VECTOR_INREG:
6107   case ISD::ANY_EXTEND_VECTOR_INREG: {
6108     SDValue Src = N.getOperand(0);
6109     EVT SrcVT = Src.getValueType();
6110 
6111     // Extended source must be a simple vector.
6112     if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
6113         (SrcVT.getScalarSizeInBits() % 8) != 0)
6114       return false;
6115 
6116     bool IsAnyExtend =
6117         (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
6118     DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), NumBitsPerElt, NumElts,
6119                          IsAnyExtend, Mask);
6120     Ops.push_back(Src);
6121     return true;
6122   }
6123   }
6124 
6125   return false;
6126 }
6127 
6128 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
6129 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
6130                                               SmallVectorImpl<int> &Mask) {
6131   int MaskWidth = Mask.size();
6132   SmallVector<SDValue, 16> UsedInputs;
6133   for (int i = 0, e = Inputs.size(); i < e; ++i) {
6134     int lo = UsedInputs.size() * MaskWidth;
6135     int hi = lo + MaskWidth;
6136 
6137     // Strip UNDEF input usage.
6138     if (Inputs[i].isUndef())
6139       for (int &M : Mask)
6140         if ((lo <= M) && (M < hi))
6141           M = SM_SentinelUndef;
6142 
6143     // Check for unused inputs.
6144     if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
6145       for (int &M : Mask)
6146         if (lo <= M)
6147           M -= MaskWidth;
6148       continue;
6149     }
6150 
6151     // Check for repeated inputs.
6152     bool IsRepeat = false;
6153     for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
6154       if (UsedInputs[j] != Inputs[i])
6155         continue;
6156       for (int &M : Mask)
6157         if (lo <= M)
6158           M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
6159       IsRepeat = true;
6160       break;
6161     }
6162     if (IsRepeat)
6163       continue;
6164 
6165     UsedInputs.push_back(Inputs[i]);
6166   }
6167   Inputs = UsedInputs;
6168 }
6169 
6170 /// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
6171 /// and then sets the SM_SentinelUndef and SM_SentinelZero values.
6172 /// Returns true if the target shuffle mask was decoded.
6173 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
6174                                    SmallVectorImpl<SDValue> &Inputs,
6175                                    SmallVectorImpl<int> &Mask,
6176                                    APInt &KnownUndef, APInt &KnownZero,
6177                                    const SelectionDAG &DAG, unsigned Depth,
6178                                    bool ResolveKnownElts) {
6179   if (Depth >= SelectionDAG::MaxRecursionDepth)
6180     return false; // Limit search depth.
6181 
6182   EVT VT = Op.getValueType();
6183   if (!VT.isSimple() || !VT.isVector())
6184     return false;
6185 
6186   if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
6187     if (ResolveKnownElts)
6188       resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
6189     return true;
6190   }
6191   if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
6192                          ResolveKnownElts)) {
6193     resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
6194     return true;
6195   }
6196   return false;
6197 }
6198 
6199 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
6200                                    SmallVectorImpl<SDValue> &Inputs,
6201                                    SmallVectorImpl<int> &Mask,
6202                                    const SelectionDAG &DAG, unsigned Depth,
6203                                    bool ResolveKnownElts) {
6204   APInt KnownUndef, KnownZero;
6205   return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
6206                                 KnownZero, DAG, Depth, ResolveKnownElts);
6207 }
6208 
6209 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
6210                                    SmallVectorImpl<int> &Mask,
6211                                    const SelectionDAG &DAG, unsigned Depth = 0,
6212                                    bool ResolveKnownElts = true) {
6213   EVT VT = Op.getValueType();
6214   if (!VT.isSimple() || !VT.isVector())
6215     return false;
6216 
6217   unsigned NumElts = Op.getValueType().getVectorNumElements();
6218   APInt DemandedElts = APInt::getAllOnes(NumElts);
6219   return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, DAG, Depth,
6220                                 ResolveKnownElts);
6221 }
6222 
6223 // Attempt to create a scalar/subvector broadcast from the base MemSDNode.
6224 static SDValue getBROADCAST_LOAD(unsigned Opcode, const SDLoc &DL, EVT VT,
6225                                  EVT MemVT, MemSDNode *Mem, unsigned Offset,
6226                                  SelectionDAG &DAG) {
6227   assert((Opcode == X86ISD::VBROADCAST_LOAD ||
6228           Opcode == X86ISD::SUBV_BROADCAST_LOAD) &&
6229          "Unknown broadcast load type");
6230 
6231   // Ensure this is a simple (non-atomic, non-voltile), temporal read memop.
6232   if (!Mem || !Mem->readMem() || !Mem->isSimple() || Mem->isNonTemporal())
6233     return SDValue();
6234 
6235   SDValue Ptr = DAG.getMemBasePlusOffset(Mem->getBasePtr(),
6236                                          TypeSize::getFixed(Offset), DL);
6237   SDVTList Tys = DAG.getVTList(VT, MVT::Other);
6238   SDValue Ops[] = {Mem->getChain(), Ptr};
6239   SDValue BcstLd = DAG.getMemIntrinsicNode(
6240       Opcode, DL, Tys, Ops, MemVT,
6241       DAG.getMachineFunction().getMachineMemOperand(
6242           Mem->getMemOperand(), Offset, MemVT.getStoreSize()));
6243   DAG.makeEquivalentMemoryOrdering(SDValue(Mem, 1), BcstLd.getValue(1));
6244   return BcstLd;
6245 }
6246 
6247 /// Returns the scalar element that will make up the i'th
6248 /// element of the result of the vector shuffle.
6249 static SDValue getShuffleScalarElt(SDValue Op, unsigned Index,
6250                                    SelectionDAG &DAG, unsigned Depth) {
6251   if (Depth >= SelectionDAG::MaxRecursionDepth)
6252     return SDValue(); // Limit search depth.
6253 
6254   EVT VT = Op.getValueType();
6255   unsigned Opcode = Op.getOpcode();
6256   unsigned NumElems = VT.getVectorNumElements();
6257 
6258   // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
6259   if (auto *SV = dyn_cast<ShuffleVectorSDNode>(Op)) {
6260     int Elt = SV->getMaskElt(Index);
6261 
6262     if (Elt < 0)
6263       return DAG.getUNDEF(VT.getVectorElementType());
6264 
6265     SDValue Src = (Elt < (int)NumElems) ? SV->getOperand(0) : SV->getOperand(1);
6266     return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
6267   }
6268 
6269   // Recurse into target specific vector shuffles to find scalars.
6270   if (isTargetShuffle(Opcode)) {
6271     MVT ShufVT = VT.getSimpleVT();
6272     MVT ShufSVT = ShufVT.getVectorElementType();
6273     int NumElems = (int)ShufVT.getVectorNumElements();
6274     SmallVector<int, 16> ShuffleMask;
6275     SmallVector<SDValue, 16> ShuffleOps;
6276     if (!getTargetShuffleMask(Op.getNode(), ShufVT, true, ShuffleOps,
6277                               ShuffleMask))
6278       return SDValue();
6279 
6280     int Elt = ShuffleMask[Index];
6281     if (Elt == SM_SentinelZero)
6282       return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(Op), ShufSVT)
6283                                  : DAG.getConstantFP(+0.0, SDLoc(Op), ShufSVT);
6284     if (Elt == SM_SentinelUndef)
6285       return DAG.getUNDEF(ShufSVT);
6286 
6287     assert(0 <= Elt && Elt < (2 * NumElems) && "Shuffle index out of range");
6288     SDValue Src = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
6289     return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
6290   }
6291 
6292   // Recurse into insert_subvector base/sub vector to find scalars.
6293   if (Opcode == ISD::INSERT_SUBVECTOR) {
6294     SDValue Vec = Op.getOperand(0);
6295     SDValue Sub = Op.getOperand(1);
6296     uint64_t SubIdx = Op.getConstantOperandVal(2);
6297     unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
6298 
6299     if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
6300       return getShuffleScalarElt(Sub, Index - SubIdx, DAG, Depth + 1);
6301     return getShuffleScalarElt(Vec, Index, DAG, Depth + 1);
6302   }
6303 
6304   // Recurse into concat_vectors sub vector to find scalars.
6305   if (Opcode == ISD::CONCAT_VECTORS) {
6306     EVT SubVT = Op.getOperand(0).getValueType();
6307     unsigned NumSubElts = SubVT.getVectorNumElements();
6308     uint64_t SubIdx = Index / NumSubElts;
6309     uint64_t SubElt = Index % NumSubElts;
6310     return getShuffleScalarElt(Op.getOperand(SubIdx), SubElt, DAG, Depth + 1);
6311   }
6312 
6313   // Recurse into extract_subvector src vector to find scalars.
6314   if (Opcode == ISD::EXTRACT_SUBVECTOR) {
6315     SDValue Src = Op.getOperand(0);
6316     uint64_t SrcIdx = Op.getConstantOperandVal(1);
6317     return getShuffleScalarElt(Src, Index + SrcIdx, DAG, Depth + 1);
6318   }
6319 
6320   // We only peek through bitcasts of the same vector width.
6321   if (Opcode == ISD::BITCAST) {
6322     SDValue Src = Op.getOperand(0);
6323     EVT SrcVT = Src.getValueType();
6324     if (SrcVT.isVector() && SrcVT.getVectorNumElements() == NumElems)
6325       return getShuffleScalarElt(Src, Index, DAG, Depth + 1);
6326     return SDValue();
6327   }
6328 
6329   // Actual nodes that may contain scalar elements
6330 
6331   // For insert_vector_elt - either return the index matching scalar or recurse
6332   // into the base vector.
6333   if (Opcode == ISD::INSERT_VECTOR_ELT &&
6334       isa<ConstantSDNode>(Op.getOperand(2))) {
6335     if (Op.getConstantOperandAPInt(2) == Index)
6336       return Op.getOperand(1);
6337     return getShuffleScalarElt(Op.getOperand(0), Index, DAG, Depth + 1);
6338   }
6339 
6340   if (Opcode == ISD::SCALAR_TO_VECTOR)
6341     return (Index == 0) ? Op.getOperand(0)
6342                         : DAG.getUNDEF(VT.getVectorElementType());
6343 
6344   if (Opcode == ISD::BUILD_VECTOR)
6345     return Op.getOperand(Index);
6346 
6347   return SDValue();
6348 }
6349 
6350 // Use PINSRB/PINSRW/PINSRD to create a build vector.
6351 static SDValue LowerBuildVectorAsInsert(SDValue Op, const APInt &NonZeroMask,
6352                                         unsigned NumNonZero, unsigned NumZero,
6353                                         SelectionDAG &DAG,
6354                                         const X86Subtarget &Subtarget) {
6355   MVT VT = Op.getSimpleValueType();
6356   unsigned NumElts = VT.getVectorNumElements();
6357   assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
6358           ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
6359          "Illegal vector insertion");
6360 
6361   SDLoc dl(Op);
6362   SDValue V;
6363   bool First = true;
6364 
6365   for (unsigned i = 0; i < NumElts; ++i) {
6366     bool IsNonZero = NonZeroMask[i];
6367     if (!IsNonZero)
6368       continue;
6369 
6370     // If the build vector contains zeros or our first insertion is not the
6371     // first index then insert into zero vector to break any register
6372     // dependency else use SCALAR_TO_VECTOR.
6373     if (First) {
6374       First = false;
6375       if (NumZero || 0 != i)
6376         V = getZeroVector(VT, Subtarget, DAG, dl);
6377       else {
6378         assert(0 == i && "Expected insertion into zero-index");
6379         V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
6380         V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
6381         V = DAG.getBitcast(VT, V);
6382         continue;
6383       }
6384     }
6385     V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
6386                     DAG.getIntPtrConstant(i, dl));
6387   }
6388 
6389   return V;
6390 }
6391 
6392 /// Custom lower build_vector of v16i8.
6393 static SDValue LowerBuildVectorv16i8(SDValue Op, const APInt &NonZeroMask,
6394                                      unsigned NumNonZero, unsigned NumZero,
6395                                      SelectionDAG &DAG,
6396                                      const X86Subtarget &Subtarget) {
6397   if (NumNonZero > 8 && !Subtarget.hasSSE41())
6398     return SDValue();
6399 
6400   // SSE4.1 - use PINSRB to insert each byte directly.
6401   if (Subtarget.hasSSE41())
6402     return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
6403                                     Subtarget);
6404 
6405   SDLoc dl(Op);
6406   SDValue V;
6407 
6408   // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
6409   // If both the lowest 16-bits are non-zero, then convert to MOVD.
6410   if (!NonZeroMask.extractBits(2, 0).isZero() &&
6411       !NonZeroMask.extractBits(2, 2).isZero()) {
6412     for (unsigned I = 0; I != 4; ++I) {
6413       if (!NonZeroMask[I])
6414         continue;
6415       SDValue Elt = DAG.getZExtOrTrunc(Op.getOperand(I), dl, MVT::i32);
6416       if (I != 0)
6417         Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
6418                           DAG.getConstant(I * 8, dl, MVT::i8));
6419       V = V ? DAG.getNode(ISD::OR, dl, MVT::i32, V, Elt) : Elt;
6420     }
6421     assert(V && "Failed to fold v16i8 vector to zero");
6422     V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
6423     V = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, V);
6424     V = DAG.getBitcast(MVT::v8i16, V);
6425   }
6426   for (unsigned i = V ? 4 : 0; i < 16; i += 2) {
6427     bool ThisIsNonZero = NonZeroMask[i];
6428     bool NextIsNonZero = NonZeroMask[i + 1];
6429     if (!ThisIsNonZero && !NextIsNonZero)
6430       continue;
6431 
6432     SDValue Elt;
6433     if (ThisIsNonZero) {
6434       if (NumZero || NextIsNonZero)
6435         Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
6436       else
6437         Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
6438     }
6439 
6440     if (NextIsNonZero) {
6441       SDValue NextElt = Op.getOperand(i + 1);
6442       if (i == 0 && NumZero)
6443         NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
6444       else
6445         NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
6446       NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
6447                             DAG.getConstant(8, dl, MVT::i8));
6448       if (ThisIsNonZero)
6449         Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
6450       else
6451         Elt = NextElt;
6452     }
6453 
6454     // If our first insertion is not the first index or zeros are needed, then
6455     // insert into zero vector. Otherwise, use SCALAR_TO_VECTOR (leaves high
6456     // elements undefined).
6457     if (!V) {
6458       if (i != 0 || NumZero)
6459         V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
6460       else {
6461         V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
6462         V = DAG.getBitcast(MVT::v8i16, V);
6463         continue;
6464       }
6465     }
6466     Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
6467     V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
6468                     DAG.getIntPtrConstant(i / 2, dl));
6469   }
6470 
6471   return DAG.getBitcast(MVT::v16i8, V);
6472 }
6473 
6474 /// Custom lower build_vector of v8i16.
6475 static SDValue LowerBuildVectorv8i16(SDValue Op, const APInt &NonZeroMask,
6476                                      unsigned NumNonZero, unsigned NumZero,
6477                                      SelectionDAG &DAG,
6478                                      const X86Subtarget &Subtarget) {
6479   if (NumNonZero > 4 && !Subtarget.hasSSE41())
6480     return SDValue();
6481 
6482   // Use PINSRW to insert each byte directly.
6483   return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
6484                                   Subtarget);
6485 }
6486 
6487 /// Custom lower build_vector of v4i32 or v4f32.
6488 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
6489                                      const X86Subtarget &Subtarget) {
6490   // If this is a splat of a pair of elements, use MOVDDUP (unless the target
6491   // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
6492   // Because we're creating a less complicated build vector here, we may enable
6493   // further folding of the MOVDDUP via shuffle transforms.
6494   if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
6495       Op.getOperand(0) == Op.getOperand(2) &&
6496       Op.getOperand(1) == Op.getOperand(3) &&
6497       Op.getOperand(0) != Op.getOperand(1)) {
6498     SDLoc DL(Op);
6499     MVT VT = Op.getSimpleValueType();
6500     MVT EltVT = VT.getVectorElementType();
6501     // Create a new build vector with the first 2 elements followed by undef
6502     // padding, bitcast to v2f64, duplicate, and bitcast back.
6503     SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
6504                        DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
6505     SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
6506     SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
6507     return DAG.getBitcast(VT, Dup);
6508   }
6509 
6510   // Find all zeroable elements.
6511   std::bitset<4> Zeroable, Undefs;
6512   for (int i = 0; i < 4; ++i) {
6513     SDValue Elt = Op.getOperand(i);
6514     Undefs[i] = Elt.isUndef();
6515     Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
6516   }
6517   assert(Zeroable.size() - Zeroable.count() > 1 &&
6518          "We expect at least two non-zero elements!");
6519 
6520   // We only know how to deal with build_vector nodes where elements are either
6521   // zeroable or extract_vector_elt with constant index.
6522   SDValue FirstNonZero;
6523   unsigned FirstNonZeroIdx;
6524   for (unsigned i = 0; i < 4; ++i) {
6525     if (Zeroable[i])
6526       continue;
6527     SDValue Elt = Op.getOperand(i);
6528     if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6529         !isa<ConstantSDNode>(Elt.getOperand(1)))
6530       return SDValue();
6531     // Make sure that this node is extracting from a 128-bit vector.
6532     MVT VT = Elt.getOperand(0).getSimpleValueType();
6533     if (!VT.is128BitVector())
6534       return SDValue();
6535     if (!FirstNonZero.getNode()) {
6536       FirstNonZero = Elt;
6537       FirstNonZeroIdx = i;
6538     }
6539   }
6540 
6541   assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
6542   SDValue V1 = FirstNonZero.getOperand(0);
6543   MVT VT = V1.getSimpleValueType();
6544 
6545   // See if this build_vector can be lowered as a blend with zero.
6546   SDValue Elt;
6547   unsigned EltMaskIdx, EltIdx;
6548   int Mask[4];
6549   for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
6550     if (Zeroable[EltIdx]) {
6551       // The zero vector will be on the right hand side.
6552       Mask[EltIdx] = EltIdx+4;
6553       continue;
6554     }
6555 
6556     Elt = Op->getOperand(EltIdx);
6557     // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
6558     EltMaskIdx = Elt.getConstantOperandVal(1);
6559     if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
6560       break;
6561     Mask[EltIdx] = EltIdx;
6562   }
6563 
6564   if (EltIdx == 4) {
6565     // Let the shuffle legalizer deal with blend operations.
6566     SDValue VZeroOrUndef = (Zeroable == Undefs)
6567                                ? DAG.getUNDEF(VT)
6568                                : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
6569     if (V1.getSimpleValueType() != VT)
6570       V1 = DAG.getBitcast(VT, V1);
6571     return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
6572   }
6573 
6574   // See if we can lower this build_vector to a INSERTPS.
6575   if (!Subtarget.hasSSE41())
6576     return SDValue();
6577 
6578   SDValue V2 = Elt.getOperand(0);
6579   if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
6580     V1 = SDValue();
6581 
6582   bool CanFold = true;
6583   for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
6584     if (Zeroable[i])
6585       continue;
6586 
6587     SDValue Current = Op->getOperand(i);
6588     SDValue SrcVector = Current->getOperand(0);
6589     if (!V1.getNode())
6590       V1 = SrcVector;
6591     CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
6592   }
6593 
6594   if (!CanFold)
6595     return SDValue();
6596 
6597   assert(V1.getNode() && "Expected at least two non-zero elements!");
6598   if (V1.getSimpleValueType() != MVT::v4f32)
6599     V1 = DAG.getBitcast(MVT::v4f32, V1);
6600   if (V2.getSimpleValueType() != MVT::v4f32)
6601     V2 = DAG.getBitcast(MVT::v4f32, V2);
6602 
6603   // Ok, we can emit an INSERTPS instruction.
6604   unsigned ZMask = Zeroable.to_ulong();
6605 
6606   unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
6607   assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
6608   SDLoc DL(Op);
6609   SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
6610                                DAG.getIntPtrConstant(InsertPSMask, DL, true));
6611   return DAG.getBitcast(VT, Result);
6612 }
6613 
6614 /// Return a vector logical shift node.
6615 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
6616                          SelectionDAG &DAG, const TargetLowering &TLI,
6617                          const SDLoc &dl) {
6618   assert(VT.is128BitVector() && "Unknown type for VShift");
6619   MVT ShVT = MVT::v16i8;
6620   unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
6621   SrcOp = DAG.getBitcast(ShVT, SrcOp);
6622   assert(NumBits % 8 == 0 && "Only support byte sized shifts");
6623   SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
6624   return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
6625 }
6626 
6627 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
6628                                       SelectionDAG &DAG) {
6629 
6630   // Check if the scalar load can be widened into a vector load. And if
6631   // the address is "base + cst" see if the cst can be "absorbed" into
6632   // the shuffle mask.
6633   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
6634     SDValue Ptr = LD->getBasePtr();
6635     if (!ISD::isNormalLoad(LD) || !LD->isSimple())
6636       return SDValue();
6637     EVT PVT = LD->getValueType(0);
6638     if (PVT != MVT::i32 && PVT != MVT::f32)
6639       return SDValue();
6640 
6641     int FI = -1;
6642     int64_t Offset = 0;
6643     if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
6644       FI = FINode->getIndex();
6645       Offset = 0;
6646     } else if (DAG.isBaseWithConstantOffset(Ptr) &&
6647                isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6648       FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6649       Offset = Ptr.getConstantOperandVal(1);
6650       Ptr = Ptr.getOperand(0);
6651     } else {
6652       return SDValue();
6653     }
6654 
6655     // FIXME: 256-bit vector instructions don't require a strict alignment,
6656     // improve this code to support it better.
6657     Align RequiredAlign(VT.getSizeInBits() / 8);
6658     SDValue Chain = LD->getChain();
6659     // Make sure the stack object alignment is at least 16 or 32.
6660     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6661     MaybeAlign InferredAlign = DAG.InferPtrAlign(Ptr);
6662     if (!InferredAlign || *InferredAlign < RequiredAlign) {
6663       if (MFI.isFixedObjectIndex(FI)) {
6664         // Can't change the alignment. FIXME: It's possible to compute
6665         // the exact stack offset and reference FI + adjust offset instead.
6666         // If someone *really* cares about this. That's the way to implement it.
6667         return SDValue();
6668       } else {
6669         MFI.setObjectAlignment(FI, RequiredAlign);
6670       }
6671     }
6672 
6673     // (Offset % 16 or 32) must be multiple of 4. Then address is then
6674     // Ptr + (Offset & ~15).
6675     if (Offset < 0)
6676       return SDValue();
6677     if ((Offset % RequiredAlign.value()) & 3)
6678       return SDValue();
6679     int64_t StartOffset = Offset & ~int64_t(RequiredAlign.value() - 1);
6680     if (StartOffset) {
6681       SDLoc DL(Ptr);
6682       Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
6683                         DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
6684     }
6685 
6686     int EltNo = (Offset - StartOffset) >> 2;
6687     unsigned NumElems = VT.getVectorNumElements();
6688 
6689     EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6690     SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6691                              LD->getPointerInfo().getWithOffset(StartOffset));
6692 
6693     SmallVector<int, 8> Mask(NumElems, EltNo);
6694 
6695     return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
6696   }
6697 
6698   return SDValue();
6699 }
6700 
6701 // Recurse to find a LoadSDNode source and the accumulated ByteOffest.
6702 static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
6703   if (ISD::isNON_EXTLoad(Elt.getNode())) {
6704     auto *BaseLd = cast<LoadSDNode>(Elt);
6705     if (!BaseLd->isSimple())
6706       return false;
6707     Ld = BaseLd;
6708     ByteOffset = 0;
6709     return true;
6710   }
6711 
6712   switch (Elt.getOpcode()) {
6713   case ISD::BITCAST:
6714   case ISD::TRUNCATE:
6715   case ISD::SCALAR_TO_VECTOR:
6716     return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
6717   case ISD::SRL:
6718     if (auto *AmtC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
6719       uint64_t Amt = AmtC->getZExtValue();
6720       if ((Amt % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
6721         ByteOffset += Amt / 8;
6722         return true;
6723       }
6724     }
6725     break;
6726   case ISD::EXTRACT_VECTOR_ELT:
6727     if (auto *IdxC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
6728       SDValue Src = Elt.getOperand(0);
6729       unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
6730       unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
6731       if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
6732           findEltLoadSrc(Src, Ld, ByteOffset)) {
6733         uint64_t Idx = IdxC->getZExtValue();
6734         ByteOffset += Idx * (SrcSizeInBits / 8);
6735         return true;
6736       }
6737     }
6738     break;
6739   }
6740 
6741   return false;
6742 }
6743 
6744 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6745 /// elements can be replaced by a single large load which has the same value as
6746 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6747 ///
6748 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
6749 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6750                                         const SDLoc &DL, SelectionDAG &DAG,
6751                                         const X86Subtarget &Subtarget,
6752                                         bool IsAfterLegalize) {
6753   if ((VT.getScalarSizeInBits() % 8) != 0)
6754     return SDValue();
6755 
6756   unsigned NumElems = Elts.size();
6757 
6758   int LastLoadedElt = -1;
6759   APInt LoadMask = APInt::getZero(NumElems);
6760   APInt ZeroMask = APInt::getZero(NumElems);
6761   APInt UndefMask = APInt::getZero(NumElems);
6762 
6763   SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
6764   SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
6765 
6766   // For each element in the initializer, see if we've found a load, zero or an
6767   // undef.
6768   for (unsigned i = 0; i < NumElems; ++i) {
6769     SDValue Elt = peekThroughBitcasts(Elts[i]);
6770     if (!Elt.getNode())
6771       return SDValue();
6772     if (Elt.isUndef()) {
6773       UndefMask.setBit(i);
6774       continue;
6775     }
6776     if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
6777       ZeroMask.setBit(i);
6778       continue;
6779     }
6780 
6781     // Each loaded element must be the correct fractional portion of the
6782     // requested vector load.
6783     unsigned EltSizeInBits = Elt.getValueSizeInBits();
6784     if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
6785       return SDValue();
6786 
6787     if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
6788       return SDValue();
6789     unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
6790     if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
6791       return SDValue();
6792 
6793     LoadMask.setBit(i);
6794     LastLoadedElt = i;
6795   }
6796   assert((ZeroMask.popcount() + UndefMask.popcount() + LoadMask.popcount()) ==
6797              NumElems &&
6798          "Incomplete element masks");
6799 
6800   // Handle Special Cases - all undef or undef/zero.
6801   if (UndefMask.popcount() == NumElems)
6802     return DAG.getUNDEF(VT);
6803   if ((ZeroMask.popcount() + UndefMask.popcount()) == NumElems)
6804     return VT.isInteger() ? DAG.getConstant(0, DL, VT)
6805                           : DAG.getConstantFP(0.0, DL, VT);
6806 
6807   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6808   int FirstLoadedElt = LoadMask.countr_zero();
6809   SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
6810   EVT EltBaseVT = EltBase.getValueType();
6811   assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
6812          "Register/Memory size mismatch");
6813   LoadSDNode *LDBase = Loads[FirstLoadedElt];
6814   assert(LDBase && "Did not find base load for merging consecutive loads");
6815   unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
6816   unsigned BaseSizeInBytes = BaseSizeInBits / 8;
6817   int NumLoadedElts = (1 + LastLoadedElt - FirstLoadedElt);
6818   int LoadSizeInBits = NumLoadedElts * BaseSizeInBits;
6819   assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
6820 
6821   // TODO: Support offsetting the base load.
6822   if (ByteOffsets[FirstLoadedElt] != 0)
6823     return SDValue();
6824 
6825   // Check to see if the element's load is consecutive to the base load
6826   // or offset from a previous (already checked) load.
6827   auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
6828     LoadSDNode *Ld = Loads[EltIdx];
6829     int64_t ByteOffset = ByteOffsets[EltIdx];
6830     if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
6831       int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
6832       return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
6833               Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
6834     }
6835     return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
6836                                               EltIdx - FirstLoadedElt);
6837   };
6838 
6839   // Consecutive loads can contain UNDEFS but not ZERO elements.
6840   // Consecutive loads with UNDEFs and ZEROs elements require a
6841   // an additional shuffle stage to clear the ZERO elements.
6842   bool IsConsecutiveLoad = true;
6843   bool IsConsecutiveLoadWithZeros = true;
6844   for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
6845     if (LoadMask[i]) {
6846       if (!CheckConsecutiveLoad(LDBase, i)) {
6847         IsConsecutiveLoad = false;
6848         IsConsecutiveLoadWithZeros = false;
6849         break;
6850       }
6851     } else if (ZeroMask[i]) {
6852       IsConsecutiveLoad = false;
6853     }
6854   }
6855 
6856   auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
6857     auto MMOFlags = LDBase->getMemOperand()->getFlags();
6858     assert(LDBase->isSimple() &&
6859            "Cannot merge volatile or atomic loads.");
6860     SDValue NewLd =
6861         DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6862                     LDBase->getPointerInfo(), LDBase->getOriginalAlign(),
6863                     MMOFlags);
6864     for (auto *LD : Loads)
6865       if (LD)
6866         DAG.makeEquivalentMemoryOrdering(LD, NewLd);
6867     return NewLd;
6868   };
6869 
6870   // Check if the base load is entirely dereferenceable.
6871   bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
6872       VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
6873 
6874   // LOAD - all consecutive load/undefs (must start/end with a load or be
6875   // entirely dereferenceable). If we have found an entire vector of loads and
6876   // undefs, then return a large load of the entire vector width starting at the
6877   // base pointer. If the vector contains zeros, then attempt to shuffle those
6878   // elements.
6879   if (FirstLoadedElt == 0 &&
6880       (NumLoadedElts == (int)NumElems || IsDereferenceable) &&
6881       (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
6882     if (IsAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
6883       return SDValue();
6884 
6885     // Don't create 256-bit non-temporal aligned loads without AVX2 as these
6886     // will lower to regular temporal loads and use the cache.
6887     if (LDBase->isNonTemporal() && LDBase->getAlign() >= Align(32) &&
6888         VT.is256BitVector() && !Subtarget.hasInt256())
6889       return SDValue();
6890 
6891     if (NumElems == 1)
6892       return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
6893 
6894     if (!ZeroMask)
6895       return CreateLoad(VT, LDBase);
6896 
6897     // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
6898     // vector and a zero vector to clear out the zero elements.
6899     if (!IsAfterLegalize && VT.isVector()) {
6900       unsigned NumMaskElts = VT.getVectorNumElements();
6901       if ((NumMaskElts % NumElems) == 0) {
6902         unsigned Scale = NumMaskElts / NumElems;
6903         SmallVector<int, 4> ClearMask(NumMaskElts, -1);
6904         for (unsigned i = 0; i < NumElems; ++i) {
6905           if (UndefMask[i])
6906             continue;
6907           int Offset = ZeroMask[i] ? NumMaskElts : 0;
6908           for (unsigned j = 0; j != Scale; ++j)
6909             ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
6910         }
6911         SDValue V = CreateLoad(VT, LDBase);
6912         SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
6913                                    : DAG.getConstantFP(0.0, DL, VT);
6914         return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
6915       }
6916     }
6917   }
6918 
6919   // If the upper half of a ymm/zmm load is undef then just load the lower half.
6920   if (VT.is256BitVector() || VT.is512BitVector()) {
6921     unsigned HalfNumElems = NumElems / 2;
6922     if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnes()) {
6923       EVT HalfVT =
6924           EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
6925       SDValue HalfLD =
6926           EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
6927                                    DAG, Subtarget, IsAfterLegalize);
6928       if (HalfLD)
6929         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
6930                            HalfLD, DAG.getIntPtrConstant(0, DL));
6931     }
6932   }
6933 
6934   // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
6935   if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
6936       ((LoadSizeInBits == 16 && Subtarget.hasFP16()) || LoadSizeInBits == 32 ||
6937        LoadSizeInBits == 64) &&
6938       ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
6939     MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
6940                                       : MVT::getIntegerVT(LoadSizeInBits);
6941     MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
6942     // Allow v4f32 on SSE1 only targets.
6943     // FIXME: Add more isel patterns so we can just use VT directly.
6944     if (!Subtarget.hasSSE2() && VT == MVT::v4f32)
6945       VecVT = MVT::v4f32;
6946     if (TLI.isTypeLegal(VecVT)) {
6947       SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
6948       SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6949       SDValue ResNode = DAG.getMemIntrinsicNode(
6950           X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT, LDBase->getPointerInfo(),
6951           LDBase->getOriginalAlign(), MachineMemOperand::MOLoad);
6952       for (auto *LD : Loads)
6953         if (LD)
6954           DAG.makeEquivalentMemoryOrdering(LD, ResNode);
6955       return DAG.getBitcast(VT, ResNode);
6956     }
6957   }
6958 
6959   // BROADCAST - match the smallest possible repetition pattern, load that
6960   // scalar/subvector element and then broadcast to the entire vector.
6961   if (ZeroMask.isZero() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
6962       (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
6963     for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
6964       unsigned RepeatSize = SubElems * BaseSizeInBits;
6965       unsigned ScalarSize = std::min(RepeatSize, 64u);
6966       if (!Subtarget.hasAVX2() && ScalarSize < 32)
6967         continue;
6968 
6969       // Don't attempt a 1:N subvector broadcast - it should be caught by
6970       // combineConcatVectorOps, else will cause infinite loops.
6971       if (RepeatSize > ScalarSize && SubElems == 1)
6972         continue;
6973 
6974       bool Match = true;
6975       SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
6976       for (unsigned i = 0; i != NumElems && Match; ++i) {
6977         if (!LoadMask[i])
6978           continue;
6979         SDValue Elt = peekThroughBitcasts(Elts[i]);
6980         if (RepeatedLoads[i % SubElems].isUndef())
6981           RepeatedLoads[i % SubElems] = Elt;
6982         else
6983           Match &= (RepeatedLoads[i % SubElems] == Elt);
6984       }
6985 
6986       // We must have loads at both ends of the repetition.
6987       Match &= !RepeatedLoads.front().isUndef();
6988       Match &= !RepeatedLoads.back().isUndef();
6989       if (!Match)
6990         continue;
6991 
6992       EVT RepeatVT =
6993           VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
6994               ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
6995               : EVT::getFloatingPointVT(ScalarSize);
6996       if (RepeatSize > ScalarSize)
6997         RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
6998                                     RepeatSize / ScalarSize);
6999       EVT BroadcastVT =
7000           EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
7001                            VT.getSizeInBits() / ScalarSize);
7002       if (TLI.isTypeLegal(BroadcastVT)) {
7003         if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
7004                 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, IsAfterLegalize)) {
7005           SDValue Broadcast = RepeatLoad;
7006           if (RepeatSize > ScalarSize) {
7007             while (Broadcast.getValueSizeInBits() < VT.getSizeInBits())
7008               Broadcast = concatSubVectors(Broadcast, Broadcast, DAG, DL);
7009           } else {
7010             if (!Subtarget.hasAVX2() &&
7011                 !X86::mayFoldLoadIntoBroadcastFromMem(
7012                     RepeatLoad, RepeatVT.getScalarType().getSimpleVT(),
7013                     Subtarget,
7014                     /*AssumeSingleUse=*/true))
7015               return SDValue();
7016             Broadcast =
7017                 DAG.getNode(X86ISD::VBROADCAST, DL, BroadcastVT, RepeatLoad);
7018           }
7019           return DAG.getBitcast(VT, Broadcast);
7020         }
7021       }
7022     }
7023   }
7024 
7025   return SDValue();
7026 }
7027 
7028 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
7029 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
7030 // are consecutive, non-overlapping, and in the right order.
7031 static SDValue combineToConsecutiveLoads(EVT VT, SDValue Op, const SDLoc &DL,
7032                                          SelectionDAG &DAG,
7033                                          const X86Subtarget &Subtarget,
7034                                          bool IsAfterLegalize) {
7035   SmallVector<SDValue, 64> Elts;
7036   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
7037     if (SDValue Elt = getShuffleScalarElt(Op, i, DAG, 0)) {
7038       Elts.push_back(Elt);
7039       continue;
7040     }
7041     return SDValue();
7042   }
7043   assert(Elts.size() == VT.getVectorNumElements());
7044   return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
7045                                   IsAfterLegalize);
7046 }
7047 
7048 static Constant *getConstantVector(MVT VT, ArrayRef<APInt> Bits,
7049                                    const APInt &Undefs, LLVMContext &C) {
7050   unsigned ScalarSize = VT.getScalarSizeInBits();
7051   Type *Ty = EVT(VT.getScalarType()).getTypeForEVT(C);
7052 
7053   auto getConstantScalar = [&](const APInt &Val) -> Constant * {
7054     if (VT.isFloatingPoint()) {
7055       if (ScalarSize == 16)
7056         return ConstantFP::get(C, APFloat(APFloat::IEEEhalf(), Val));
7057       if (ScalarSize == 32)
7058         return ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
7059       assert(ScalarSize == 64 && "Unsupported floating point scalar size");
7060       return ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
7061     }
7062     return Constant::getIntegerValue(Ty, Val);
7063   };
7064 
7065   SmallVector<Constant *, 32> ConstantVec;
7066   for (unsigned I = 0, E = Bits.size(); I != E; ++I)
7067     ConstantVec.push_back(Undefs[I] ? UndefValue::get(Ty)
7068                                     : getConstantScalar(Bits[I]));
7069 
7070   return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
7071 }
7072 
7073 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
7074                                    unsigned SplatBitSize, LLVMContext &C) {
7075   unsigned ScalarSize = VT.getScalarSizeInBits();
7076 
7077   auto getConstantScalar = [&](const APInt &Val) -> Constant * {
7078     if (VT.isFloatingPoint()) {
7079       if (ScalarSize == 16)
7080         return ConstantFP::get(C, APFloat(APFloat::IEEEhalf(), Val));
7081       if (ScalarSize == 32)
7082         return ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
7083       assert(ScalarSize == 64 && "Unsupported floating point scalar size");
7084       return ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
7085     }
7086     return Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
7087   };
7088 
7089   if (ScalarSize == SplatBitSize)
7090     return getConstantScalar(SplatValue);
7091 
7092   unsigned NumElm = SplatBitSize / ScalarSize;
7093   SmallVector<Constant *, 32> ConstantVec;
7094   for (unsigned I = 0; I != NumElm; ++I) {
7095     APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * I);
7096     ConstantVec.push_back(getConstantScalar(Val));
7097   }
7098   return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
7099 }
7100 
7101 static bool isFoldableUseOfShuffle(SDNode *N) {
7102   for (auto *U : N->uses()) {
7103     unsigned Opc = U->getOpcode();
7104     // VPERMV/VPERMV3 shuffles can never fold their index operands.
7105     if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
7106       return false;
7107     if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
7108       return false;
7109     if (isTargetShuffle(Opc))
7110       return true;
7111     if (Opc == ISD::BITCAST) // Ignore bitcasts
7112       return isFoldableUseOfShuffle(U);
7113     if (N->hasOneUse()) {
7114       // TODO, there may be some general way to know if a SDNode can
7115       // be folded. We now only know whether an MI is foldable.
7116       if (Opc == X86ISD::VPDPBUSD && U->getOperand(2).getNode() != N)
7117         return false;
7118       return true;
7119     }
7120   }
7121   return false;
7122 }
7123 
7124 /// Attempt to use the vbroadcast instruction to generate a splat value
7125 /// from a splat BUILD_VECTOR which uses:
7126 ///  a. A single scalar load, or a constant.
7127 ///  b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
7128 ///
7129 /// The VBROADCAST node is returned when a pattern is found,
7130 /// or SDValue() otherwise.
7131 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
7132                                            const X86Subtarget &Subtarget,
7133                                            SelectionDAG &DAG) {
7134   // VBROADCAST requires AVX.
7135   // TODO: Splats could be generated for non-AVX CPUs using SSE
7136   // instructions, but there's less potential gain for only 128-bit vectors.
7137   if (!Subtarget.hasAVX())
7138     return SDValue();
7139 
7140   MVT VT = BVOp->getSimpleValueType(0);
7141   unsigned NumElts = VT.getVectorNumElements();
7142   SDLoc dl(BVOp);
7143 
7144   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
7145          "Unsupported vector type for broadcast.");
7146 
7147   // See if the build vector is a repeating sequence of scalars (inc. splat).
7148   SDValue Ld;
7149   BitVector UndefElements;
7150   SmallVector<SDValue, 16> Sequence;
7151   if (BVOp->getRepeatedSequence(Sequence, &UndefElements)) {
7152     assert((NumElts % Sequence.size()) == 0 && "Sequence doesn't fit.");
7153     if (Sequence.size() == 1)
7154       Ld = Sequence[0];
7155   }
7156 
7157   // Attempt to use VBROADCASTM
7158   // From this pattern:
7159   // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
7160   // b. t1 = (build_vector t0 t0)
7161   //
7162   // Create (VBROADCASTM v2i1 X)
7163   if (!Sequence.empty() && Subtarget.hasCDI()) {
7164     // If not a splat, are the upper sequence values zeroable?
7165     unsigned SeqLen = Sequence.size();
7166     bool UpperZeroOrUndef =
7167         SeqLen == 1 ||
7168         llvm::all_of(ArrayRef(Sequence).drop_front(), [](SDValue V) {
7169           return !V || V.isUndef() || isNullConstant(V);
7170         });
7171     SDValue Op0 = Sequence[0];
7172     if (UpperZeroOrUndef && ((Op0.getOpcode() == ISD::BITCAST) ||
7173                              (Op0.getOpcode() == ISD::ZERO_EXTEND &&
7174                               Op0.getOperand(0).getOpcode() == ISD::BITCAST))) {
7175       SDValue BOperand = Op0.getOpcode() == ISD::BITCAST
7176                              ? Op0.getOperand(0)
7177                              : Op0.getOperand(0).getOperand(0);
7178       MVT MaskVT = BOperand.getSimpleValueType();
7179       MVT EltType = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
7180       if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) ||  // for broadcastmb2q
7181           (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
7182         MVT BcstVT = MVT::getVectorVT(EltType, NumElts / SeqLen);
7183         if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
7184           unsigned Scale = 512 / VT.getSizeInBits();
7185           BcstVT = MVT::getVectorVT(EltType, Scale * (NumElts / SeqLen));
7186         }
7187         SDValue Bcst = DAG.getNode(X86ISD::VBROADCASTM, dl, BcstVT, BOperand);
7188         if (BcstVT.getSizeInBits() != VT.getSizeInBits())
7189           Bcst = extractSubVector(Bcst, 0, DAG, dl, VT.getSizeInBits());
7190         return DAG.getBitcast(VT, Bcst);
7191       }
7192     }
7193   }
7194 
7195   unsigned NumUndefElts = UndefElements.count();
7196   if (!Ld || (NumElts - NumUndefElts) <= 1) {
7197     APInt SplatValue, Undef;
7198     unsigned SplatBitSize;
7199     bool HasUndef;
7200     // Check if this is a repeated constant pattern suitable for broadcasting.
7201     if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
7202         SplatBitSize > VT.getScalarSizeInBits() &&
7203         SplatBitSize < VT.getSizeInBits()) {
7204       // Avoid replacing with broadcast when it's a use of a shuffle
7205       // instruction to preserve the present custom lowering of shuffles.
7206       if (isFoldableUseOfShuffle(BVOp))
7207         return SDValue();
7208       // replace BUILD_VECTOR with broadcast of the repeated constants.
7209       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7210       LLVMContext *Ctx = DAG.getContext();
7211       MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
7212       if (SplatBitSize == 32 || SplatBitSize == 64 ||
7213           (SplatBitSize < 32 && Subtarget.hasAVX2())) {
7214         // Load the constant scalar/subvector and broadcast it.
7215         MVT CVT = MVT::getIntegerVT(SplatBitSize);
7216         Constant *C = getConstantVector(VT, SplatValue, SplatBitSize, *Ctx);
7217         SDValue CP = DAG.getConstantPool(C, PVT);
7218         unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
7219 
7220         Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
7221         SDVTList Tys = DAG.getVTList(MVT::getVectorVT(CVT, Repeat), MVT::Other);
7222         SDValue Ops[] = {DAG.getEntryNode(), CP};
7223         MachinePointerInfo MPI =
7224             MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
7225         SDValue Brdcst =
7226             DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
7227                                     MPI, Alignment, MachineMemOperand::MOLoad);
7228         return DAG.getBitcast(VT, Brdcst);
7229       }
7230       if (SplatBitSize > 64) {
7231         // Load the vector of constants and broadcast it.
7232         Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize, *Ctx);
7233         SDValue VCP = DAG.getConstantPool(VecC, PVT);
7234         unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
7235         MVT VVT = MVT::getVectorVT(VT.getScalarType(), NumElm);
7236         Align Alignment = cast<ConstantPoolSDNode>(VCP)->getAlign();
7237         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
7238         SDValue Ops[] = {DAG.getEntryNode(), VCP};
7239         MachinePointerInfo MPI =
7240             MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
7241         return DAG.getMemIntrinsicNode(X86ISD::SUBV_BROADCAST_LOAD, dl, Tys,
7242                                        Ops, VVT, MPI, Alignment,
7243                                        MachineMemOperand::MOLoad);
7244       }
7245     }
7246 
7247     // If we are moving a scalar into a vector (Ld must be set and all elements
7248     // but 1 are undef) and that operation is not obviously supported by
7249     // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
7250     // That's better than general shuffling and may eliminate a load to GPR and
7251     // move from scalar to vector register.
7252     if (!Ld || NumElts - NumUndefElts != 1)
7253       return SDValue();
7254     unsigned ScalarSize = Ld.getValueSizeInBits();
7255     if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
7256       return SDValue();
7257   }
7258 
7259   bool ConstSplatVal =
7260       (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
7261   bool IsLoad = ISD::isNormalLoad(Ld.getNode());
7262 
7263   // TODO: Handle broadcasts of non-constant sequences.
7264 
7265   // Make sure that all of the users of a non-constant load are from the
7266   // BUILD_VECTOR node.
7267   // FIXME: Is the use count needed for non-constant, non-load case?
7268   if (!ConstSplatVal && !IsLoad && !BVOp->isOnlyUserOf(Ld.getNode()))
7269     return SDValue();
7270 
7271   unsigned ScalarSize = Ld.getValueSizeInBits();
7272   bool IsGE256 = (VT.getSizeInBits() >= 256);
7273 
7274   // When optimizing for size, generate up to 5 extra bytes for a broadcast
7275   // instruction to save 8 or more bytes of constant pool data.
7276   // TODO: If multiple splats are generated to load the same constant,
7277   // it may be detrimental to overall size. There needs to be a way to detect
7278   // that condition to know if this is truly a size win.
7279   bool OptForSize = DAG.shouldOptForSize();
7280 
7281   // Handle broadcasting a single constant scalar from the constant pool
7282   // into a vector.
7283   // On Sandybridge (no AVX2), it is still better to load a constant vector
7284   // from the constant pool and not to broadcast it from a scalar.
7285   // But override that restriction when optimizing for size.
7286   // TODO: Check if splatting is recommended for other AVX-capable CPUs.
7287   if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
7288     EVT CVT = Ld.getValueType();
7289     assert(!CVT.isVector() && "Must not broadcast a vector type");
7290 
7291     // Splat f16, f32, i32, v4f64, v4i64 in all cases with AVX2.
7292     // For size optimization, also splat v2f64 and v2i64, and for size opt
7293     // with AVX2, also splat i8 and i16.
7294     // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
7295     if (ScalarSize == 32 ||
7296         (ScalarSize == 64 && (IsGE256 || Subtarget.hasVLX())) ||
7297         CVT == MVT::f16 ||
7298         (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
7299       const Constant *C = nullptr;
7300       if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
7301         C = CI->getConstantIntValue();
7302       else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
7303         C = CF->getConstantFPValue();
7304 
7305       assert(C && "Invalid constant type");
7306 
7307       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7308       SDValue CP =
7309           DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
7310       Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
7311 
7312       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
7313       SDValue Ops[] = {DAG.getEntryNode(), CP};
7314       MachinePointerInfo MPI =
7315           MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
7316       return DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
7317                                      MPI, Alignment, MachineMemOperand::MOLoad);
7318     }
7319   }
7320 
7321   // Handle AVX2 in-register broadcasts.
7322   if (!IsLoad && Subtarget.hasInt256() &&
7323       (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
7324     return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
7325 
7326   // The scalar source must be a normal load.
7327   if (!IsLoad)
7328     return SDValue();
7329 
7330   // Make sure the non-chain result is only used by this build vector.
7331   if (!Ld->hasNUsesOfValue(NumElts - NumUndefElts, 0))
7332     return SDValue();
7333 
7334   if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
7335       (Subtarget.hasVLX() && ScalarSize == 64)) {
7336     auto *LN = cast<LoadSDNode>(Ld);
7337     SDVTList Tys = DAG.getVTList(VT, MVT::Other);
7338     SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
7339     SDValue BCast =
7340         DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
7341                                 LN->getMemoryVT(), LN->getMemOperand());
7342     DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
7343     return BCast;
7344   }
7345 
7346   // The integer check is needed for the 64-bit into 128-bit so it doesn't match
7347   // double since there is no vbroadcastsd xmm
7348   if (Subtarget.hasInt256() && Ld.getValueType().isInteger() &&
7349       (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)) {
7350     auto *LN = cast<LoadSDNode>(Ld);
7351     SDVTList Tys = DAG.getVTList(VT, MVT::Other);
7352     SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
7353     SDValue BCast =
7354         DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
7355                                 LN->getMemoryVT(), LN->getMemOperand());
7356     DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
7357     return BCast;
7358   }
7359 
7360   if (ScalarSize == 16 && Subtarget.hasFP16() && IsGE256)
7361     return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
7362 
7363   // Unsupported broadcast.
7364   return SDValue();
7365 }
7366 
7367 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
7368 /// underlying vector and index.
7369 ///
7370 /// Modifies \p ExtractedFromVec to the real vector and returns the real
7371 /// index.
7372 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
7373                                          SDValue ExtIdx) {
7374   int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
7375   if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
7376     return Idx;
7377 
7378   // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
7379   // lowered this:
7380   //   (extract_vector_elt (v8f32 %1), Constant<6>)
7381   // to:
7382   //   (extract_vector_elt (vector_shuffle<2,u,u,u>
7383   //                           (extract_subvector (v8f32 %0), Constant<4>),
7384   //                           undef)
7385   //                       Constant<0>)
7386   // In this case the vector is the extract_subvector expression and the index
7387   // is 2, as specified by the shuffle.
7388   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
7389   SDValue ShuffleVec = SVOp->getOperand(0);
7390   MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
7391   assert(ShuffleVecVT.getVectorElementType() ==
7392          ExtractedFromVec.getSimpleValueType().getVectorElementType());
7393 
7394   int ShuffleIdx = SVOp->getMaskElt(Idx);
7395   if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
7396     ExtractedFromVec = ShuffleVec;
7397     return ShuffleIdx;
7398   }
7399   return Idx;
7400 }
7401 
7402 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
7403   MVT VT = Op.getSimpleValueType();
7404 
7405   // Skip if insert_vec_elt is not supported.
7406   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7407   if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
7408     return SDValue();
7409 
7410   SDLoc DL(Op);
7411   unsigned NumElems = Op.getNumOperands();
7412 
7413   SDValue VecIn1;
7414   SDValue VecIn2;
7415   SmallVector<unsigned, 4> InsertIndices;
7416   SmallVector<int, 8> Mask(NumElems, -1);
7417 
7418   for (unsigned i = 0; i != NumElems; ++i) {
7419     unsigned Opc = Op.getOperand(i).getOpcode();
7420 
7421     if (Opc == ISD::UNDEF)
7422       continue;
7423 
7424     if (Opc != ISD::EXTRACT_VECTOR_ELT) {
7425       // Quit if more than 1 elements need inserting.
7426       if (InsertIndices.size() > 1)
7427         return SDValue();
7428 
7429       InsertIndices.push_back(i);
7430       continue;
7431     }
7432 
7433     SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
7434     SDValue ExtIdx = Op.getOperand(i).getOperand(1);
7435 
7436     // Quit if non-constant index.
7437     if (!isa<ConstantSDNode>(ExtIdx))
7438       return SDValue();
7439     int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
7440 
7441     // Quit if extracted from vector of different type.
7442     if (ExtractedFromVec.getValueType() != VT)
7443       return SDValue();
7444 
7445     if (!VecIn1.getNode())
7446       VecIn1 = ExtractedFromVec;
7447     else if (VecIn1 != ExtractedFromVec) {
7448       if (!VecIn2.getNode())
7449         VecIn2 = ExtractedFromVec;
7450       else if (VecIn2 != ExtractedFromVec)
7451         // Quit if more than 2 vectors to shuffle
7452         return SDValue();
7453     }
7454 
7455     if (ExtractedFromVec == VecIn1)
7456       Mask[i] = Idx;
7457     else if (ExtractedFromVec == VecIn2)
7458       Mask[i] = Idx + NumElems;
7459   }
7460 
7461   if (!VecIn1.getNode())
7462     return SDValue();
7463 
7464   VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
7465   SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
7466 
7467   for (unsigned Idx : InsertIndices)
7468     NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
7469                      DAG.getIntPtrConstant(Idx, DL));
7470 
7471   return NV;
7472 }
7473 
7474 // Lower BUILD_VECTOR operation for v8bf16, v16bf16 and v32bf16 types.
7475 static SDValue LowerBUILD_VECTORvXbf16(SDValue Op, SelectionDAG &DAG,
7476                                        const X86Subtarget &Subtarget) {
7477   MVT VT = Op.getSimpleValueType();
7478   MVT IVT = VT.changeVectorElementTypeToInteger();
7479   SmallVector<SDValue, 16> NewOps;
7480   for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I)
7481     NewOps.push_back(DAG.getBitcast(MVT::i16, Op.getOperand(I)));
7482   SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(), IVT, NewOps);
7483   return DAG.getBitcast(VT, Res);
7484 }
7485 
7486 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
7487 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
7488                                      const X86Subtarget &Subtarget) {
7489 
7490   MVT VT = Op.getSimpleValueType();
7491   assert((VT.getVectorElementType() == MVT::i1) &&
7492          "Unexpected type in LowerBUILD_VECTORvXi1!");
7493 
7494   SDLoc dl(Op);
7495   if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
7496       ISD::isBuildVectorAllOnes(Op.getNode()))
7497     return Op;
7498 
7499   uint64_t Immediate = 0;
7500   SmallVector<unsigned, 16> NonConstIdx;
7501   bool IsSplat = true;
7502   bool HasConstElts = false;
7503   int SplatIdx = -1;
7504   for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
7505     SDValue In = Op.getOperand(idx);
7506     if (In.isUndef())
7507       continue;
7508     if (auto *InC = dyn_cast<ConstantSDNode>(In)) {
7509       Immediate |= (InC->getZExtValue() & 0x1) << idx;
7510       HasConstElts = true;
7511     } else {
7512       NonConstIdx.push_back(idx);
7513     }
7514     if (SplatIdx < 0)
7515       SplatIdx = idx;
7516     else if (In != Op.getOperand(SplatIdx))
7517       IsSplat = false;
7518   }
7519 
7520   // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
7521   if (IsSplat) {
7522     // The build_vector allows the scalar element to be larger than the vector
7523     // element type. We need to mask it to use as a condition unless we know
7524     // the upper bits are zero.
7525     // FIXME: Use computeKnownBits instead of checking specific opcode?
7526     SDValue Cond = Op.getOperand(SplatIdx);
7527     assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
7528     if (Cond.getOpcode() != ISD::SETCC)
7529       Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
7530                          DAG.getConstant(1, dl, MVT::i8));
7531 
7532     // Perform the select in the scalar domain so we can use cmov.
7533     if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
7534       SDValue Select = DAG.getSelect(dl, MVT::i32, Cond,
7535                                      DAG.getAllOnesConstant(dl, MVT::i32),
7536                                      DAG.getConstant(0, dl, MVT::i32));
7537       Select = DAG.getBitcast(MVT::v32i1, Select);
7538       return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Select, Select);
7539     } else {
7540       MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
7541       SDValue Select = DAG.getSelect(dl, ImmVT, Cond,
7542                                      DAG.getAllOnesConstant(dl, ImmVT),
7543                                      DAG.getConstant(0, dl, ImmVT));
7544       MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
7545       Select = DAG.getBitcast(VecVT, Select);
7546       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Select,
7547                          DAG.getIntPtrConstant(0, dl));
7548     }
7549   }
7550 
7551   // insert elements one by one
7552   SDValue DstVec;
7553   if (HasConstElts) {
7554     if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
7555       SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
7556       SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
7557       ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
7558       ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
7559       DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
7560     } else {
7561       MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
7562       SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
7563       MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
7564       DstVec = DAG.getBitcast(VecVT, Imm);
7565       DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
7566                            DAG.getIntPtrConstant(0, dl));
7567     }
7568   } else
7569     DstVec = DAG.getUNDEF(VT);
7570 
7571   for (unsigned InsertIdx : NonConstIdx) {
7572     DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
7573                          Op.getOperand(InsertIdx),
7574                          DAG.getIntPtrConstant(InsertIdx, dl));
7575   }
7576   return DstVec;
7577 }
7578 
7579 LLVM_ATTRIBUTE_UNUSED static bool isHorizOp(unsigned Opcode) {
7580   switch (Opcode) {
7581   case X86ISD::PACKSS:
7582   case X86ISD::PACKUS:
7583   case X86ISD::FHADD:
7584   case X86ISD::FHSUB:
7585   case X86ISD::HADD:
7586   case X86ISD::HSUB:
7587     return true;
7588   }
7589   return false;
7590 }
7591 
7592 /// This is a helper function of LowerToHorizontalOp().
7593 /// This function checks that the build_vector \p N in input implements a
7594 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
7595 /// may not match the layout of an x86 256-bit horizontal instruction.
7596 /// In other words, if this returns true, then some extraction/insertion will
7597 /// be required to produce a valid horizontal instruction.
7598 ///
7599 /// Parameter \p Opcode defines the kind of horizontal operation to match.
7600 /// For example, if \p Opcode is equal to ISD::ADD, then this function
7601 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
7602 /// is equal to ISD::SUB, then this function checks if this is a horizontal
7603 /// arithmetic sub.
7604 ///
7605 /// This function only analyzes elements of \p N whose indices are
7606 /// in range [BaseIdx, LastIdx).
7607 ///
7608 /// TODO: This function was originally used to match both real and fake partial
7609 /// horizontal operations, but the index-matching logic is incorrect for that.
7610 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
7611 /// code because it is only used for partial h-op matching now?
7612 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
7613                                   SelectionDAG &DAG,
7614                                   unsigned BaseIdx, unsigned LastIdx,
7615                                   SDValue &V0, SDValue &V1) {
7616   EVT VT = N->getValueType(0);
7617   assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
7618   assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
7619   assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
7620          "Invalid Vector in input!");
7621 
7622   bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
7623   bool CanFold = true;
7624   unsigned ExpectedVExtractIdx = BaseIdx;
7625   unsigned NumElts = LastIdx - BaseIdx;
7626   V0 = DAG.getUNDEF(VT);
7627   V1 = DAG.getUNDEF(VT);
7628 
7629   // Check if N implements a horizontal binop.
7630   for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
7631     SDValue Op = N->getOperand(i + BaseIdx);
7632 
7633     // Skip UNDEFs.
7634     if (Op->isUndef()) {
7635       // Update the expected vector extract index.
7636       if (i * 2 == NumElts)
7637         ExpectedVExtractIdx = BaseIdx;
7638       ExpectedVExtractIdx += 2;
7639       continue;
7640     }
7641 
7642     CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
7643 
7644     if (!CanFold)
7645       break;
7646 
7647     SDValue Op0 = Op.getOperand(0);
7648     SDValue Op1 = Op.getOperand(1);
7649 
7650     // Try to match the following pattern:
7651     // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
7652     CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7653         Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7654         Op0.getOperand(0) == Op1.getOperand(0) &&
7655         isa<ConstantSDNode>(Op0.getOperand(1)) &&
7656         isa<ConstantSDNode>(Op1.getOperand(1)));
7657     if (!CanFold)
7658       break;
7659 
7660     unsigned I0 = Op0.getConstantOperandVal(1);
7661     unsigned I1 = Op1.getConstantOperandVal(1);
7662 
7663     if (i * 2 < NumElts) {
7664       if (V0.isUndef()) {
7665         V0 = Op0.getOperand(0);
7666         if (V0.getValueType() != VT)
7667           return false;
7668       }
7669     } else {
7670       if (V1.isUndef()) {
7671         V1 = Op0.getOperand(0);
7672         if (V1.getValueType() != VT)
7673           return false;
7674       }
7675       if (i * 2 == NumElts)
7676         ExpectedVExtractIdx = BaseIdx;
7677     }
7678 
7679     SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
7680     if (I0 == ExpectedVExtractIdx)
7681       CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
7682     else if (IsCommutable && I1 == ExpectedVExtractIdx) {
7683       // Try to match the following dag sequence:
7684       // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
7685       CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
7686     } else
7687       CanFold = false;
7688 
7689     ExpectedVExtractIdx += 2;
7690   }
7691 
7692   return CanFold;
7693 }
7694 
7695 /// Emit a sequence of two 128-bit horizontal add/sub followed by
7696 /// a concat_vector.
7697 ///
7698 /// This is a helper function of LowerToHorizontalOp().
7699 /// This function expects two 256-bit vectors called V0 and V1.
7700 /// At first, each vector is split into two separate 128-bit vectors.
7701 /// Then, the resulting 128-bit vectors are used to implement two
7702 /// horizontal binary operations.
7703 ///
7704 /// The kind of horizontal binary operation is defined by \p X86Opcode.
7705 ///
7706 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
7707 /// the two new horizontal binop.
7708 /// When Mode is set, the first horizontal binop dag node would take as input
7709 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
7710 /// horizontal binop dag node would take as input the lower 128-bit of V1
7711 /// and the upper 128-bit of V1.
7712 ///   Example:
7713 ///     HADD V0_LO, V0_HI
7714 ///     HADD V1_LO, V1_HI
7715 ///
7716 /// Otherwise, the first horizontal binop dag node takes as input the lower
7717 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
7718 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
7719 ///   Example:
7720 ///     HADD V0_LO, V1_LO
7721 ///     HADD V0_HI, V1_HI
7722 ///
7723 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
7724 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
7725 /// the upper 128-bits of the result.
7726 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
7727                                      const SDLoc &DL, SelectionDAG &DAG,
7728                                      unsigned X86Opcode, bool Mode,
7729                                      bool isUndefLO, bool isUndefHI) {
7730   MVT VT = V0.getSimpleValueType();
7731   assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
7732          "Invalid nodes in input!");
7733 
7734   unsigned NumElts = VT.getVectorNumElements();
7735   SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
7736   SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
7737   SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
7738   SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
7739   MVT NewVT = V0_LO.getSimpleValueType();
7740 
7741   SDValue LO = DAG.getUNDEF(NewVT);
7742   SDValue HI = DAG.getUNDEF(NewVT);
7743 
7744   if (Mode) {
7745     // Don't emit a horizontal binop if the result is expected to be UNDEF.
7746     if (!isUndefLO && !V0->isUndef())
7747       LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
7748     if (!isUndefHI && !V1->isUndef())
7749       HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
7750   } else {
7751     // Don't emit a horizontal binop if the result is expected to be UNDEF.
7752     if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
7753       LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
7754 
7755     if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
7756       HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
7757   }
7758 
7759   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
7760 }
7761 
7762 /// Returns true iff \p BV builds a vector with the result equivalent to
7763 /// the result of ADDSUB/SUBADD operation.
7764 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
7765 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
7766 /// \p Opnd0 and \p Opnd1.
7767 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
7768                              const X86Subtarget &Subtarget, SelectionDAG &DAG,
7769                              SDValue &Opnd0, SDValue &Opnd1,
7770                              unsigned &NumExtracts,
7771                              bool &IsSubAdd) {
7772 
7773   MVT VT = BV->getSimpleValueType(0);
7774   if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
7775     return false;
7776 
7777   unsigned NumElts = VT.getVectorNumElements();
7778   SDValue InVec0 = DAG.getUNDEF(VT);
7779   SDValue InVec1 = DAG.getUNDEF(VT);
7780 
7781   NumExtracts = 0;
7782 
7783   // Odd-numbered elements in the input build vector are obtained from
7784   // adding/subtracting two integer/float elements.
7785   // Even-numbered elements in the input build vector are obtained from
7786   // subtracting/adding two integer/float elements.
7787   unsigned Opc[2] = {0, 0};
7788   for (unsigned i = 0, e = NumElts; i != e; ++i) {
7789     SDValue Op = BV->getOperand(i);
7790 
7791     // Skip 'undef' values.
7792     unsigned Opcode = Op.getOpcode();
7793     if (Opcode == ISD::UNDEF)
7794       continue;
7795 
7796     // Early exit if we found an unexpected opcode.
7797     if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
7798       return false;
7799 
7800     SDValue Op0 = Op.getOperand(0);
7801     SDValue Op1 = Op.getOperand(1);
7802 
7803     // Try to match the following pattern:
7804     // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
7805     // Early exit if we cannot match that sequence.
7806     if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7807         Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7808         !isa<ConstantSDNode>(Op0.getOperand(1)) ||
7809         Op0.getOperand(1) != Op1.getOperand(1))
7810       return false;
7811 
7812     unsigned I0 = Op0.getConstantOperandVal(1);
7813     if (I0 != i)
7814       return false;
7815 
7816     // We found a valid add/sub node, make sure its the same opcode as previous
7817     // elements for this parity.
7818     if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
7819       return false;
7820     Opc[i % 2] = Opcode;
7821 
7822     // Update InVec0 and InVec1.
7823     if (InVec0.isUndef()) {
7824       InVec0 = Op0.getOperand(0);
7825       if (InVec0.getSimpleValueType() != VT)
7826         return false;
7827     }
7828     if (InVec1.isUndef()) {
7829       InVec1 = Op1.getOperand(0);
7830       if (InVec1.getSimpleValueType() != VT)
7831         return false;
7832     }
7833 
7834     // Make sure that operands in input to each add/sub node always
7835     // come from a same pair of vectors.
7836     if (InVec0 != Op0.getOperand(0)) {
7837       if (Opcode == ISD::FSUB)
7838         return false;
7839 
7840       // FADD is commutable. Try to commute the operands
7841       // and then test again.
7842       std::swap(Op0, Op1);
7843       if (InVec0 != Op0.getOperand(0))
7844         return false;
7845     }
7846 
7847     if (InVec1 != Op1.getOperand(0))
7848       return false;
7849 
7850     // Increment the number of extractions done.
7851     ++NumExtracts;
7852   }
7853 
7854   // Ensure we have found an opcode for both parities and that they are
7855   // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
7856   // inputs are undef.
7857   if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
7858       InVec0.isUndef() || InVec1.isUndef())
7859     return false;
7860 
7861   IsSubAdd = Opc[0] == ISD::FADD;
7862 
7863   Opnd0 = InVec0;
7864   Opnd1 = InVec1;
7865   return true;
7866 }
7867 
7868 /// Returns true if is possible to fold MUL and an idiom that has already been
7869 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
7870 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
7871 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
7872 ///
7873 /// Prior to calling this function it should be known that there is some
7874 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
7875 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
7876 /// before replacement of such SDNode with ADDSUB operation. Thus the number
7877 /// of \p Opnd0 uses is expected to be equal to 2.
7878 /// For example, this function may be called for the following IR:
7879 ///    %AB = fmul fast <2 x double> %A, %B
7880 ///    %Sub = fsub fast <2 x double> %AB, %C
7881 ///    %Add = fadd fast <2 x double> %AB, %C
7882 ///    %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
7883 ///                            <2 x i32> <i32 0, i32 3>
7884 /// There is a def for %Addsub here, which potentially can be replaced by
7885 /// X86ISD::ADDSUB operation:
7886 ///    %Addsub = X86ISD::ADDSUB %AB, %C
7887 /// and such ADDSUB can further be replaced with FMADDSUB:
7888 ///    %Addsub = FMADDSUB %A, %B, %C.
7889 ///
7890 /// The main reason why this method is called before the replacement of the
7891 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
7892 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
7893 /// FMADDSUB is.
7894 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
7895                                  SelectionDAG &DAG,
7896                                  SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
7897                                  unsigned ExpectedUses) {
7898   if (Opnd0.getOpcode() != ISD::FMUL ||
7899       !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
7900     return false;
7901 
7902   // FIXME: These checks must match the similar ones in
7903   // DAGCombiner::visitFADDForFMACombine. It would be good to have one
7904   // function that would answer if it is Ok to fuse MUL + ADD to FMADD
7905   // or MUL + ADDSUB to FMADDSUB.
7906   const TargetOptions &Options = DAG.getTarget().Options;
7907   bool AllowFusion =
7908       (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
7909   if (!AllowFusion)
7910     return false;
7911 
7912   Opnd2 = Opnd1;
7913   Opnd1 = Opnd0.getOperand(1);
7914   Opnd0 = Opnd0.getOperand(0);
7915 
7916   return true;
7917 }
7918 
7919 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
7920 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
7921 /// X86ISD::FMSUBADD node.
7922 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
7923                                        const X86Subtarget &Subtarget,
7924                                        SelectionDAG &DAG) {
7925   SDValue Opnd0, Opnd1;
7926   unsigned NumExtracts;
7927   bool IsSubAdd;
7928   if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
7929                         IsSubAdd))
7930     return SDValue();
7931 
7932   MVT VT = BV->getSimpleValueType(0);
7933   SDLoc DL(BV);
7934 
7935   // Try to generate X86ISD::FMADDSUB node here.
7936   SDValue Opnd2;
7937   if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
7938     unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
7939     return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
7940   }
7941 
7942   // We only support ADDSUB.
7943   if (IsSubAdd)
7944     return SDValue();
7945 
7946   // There are no known X86 targets with 512-bit ADDSUB instructions!
7947   // Convert to blend(fsub,fadd).
7948   if (VT.is512BitVector()) {
7949     SmallVector<int> Mask;
7950     for (int I = 0, E = VT.getVectorNumElements(); I != E; I += 2) {
7951         Mask.push_back(I);
7952         Mask.push_back(I + E + 1);
7953     }
7954     SDValue Sub = DAG.getNode(ISD::FSUB, DL, VT, Opnd0, Opnd1);
7955     SDValue Add = DAG.getNode(ISD::FADD, DL, VT, Opnd0, Opnd1);
7956     return DAG.getVectorShuffle(VT, DL, Sub, Add, Mask);
7957   }
7958 
7959   return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
7960 }
7961 
7962 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
7963                              unsigned &HOpcode, SDValue &V0, SDValue &V1) {
7964   // Initialize outputs to known values.
7965   MVT VT = BV->getSimpleValueType(0);
7966   HOpcode = ISD::DELETED_NODE;
7967   V0 = DAG.getUNDEF(VT);
7968   V1 = DAG.getUNDEF(VT);
7969 
7970   // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
7971   // half of the result is calculated independently from the 128-bit halves of
7972   // the inputs, so that makes the index-checking logic below more complicated.
7973   unsigned NumElts = VT.getVectorNumElements();
7974   unsigned GenericOpcode = ISD::DELETED_NODE;
7975   unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
7976   unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
7977   unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
7978   for (unsigned i = 0; i != Num128BitChunks; ++i) {
7979     for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
7980       // Ignore undef elements.
7981       SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
7982       if (Op.isUndef())
7983         continue;
7984 
7985       // If there's an opcode mismatch, we're done.
7986       if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
7987         return false;
7988 
7989       // Initialize horizontal opcode.
7990       if (HOpcode == ISD::DELETED_NODE) {
7991         GenericOpcode = Op.getOpcode();
7992         switch (GenericOpcode) {
7993         case ISD::ADD: HOpcode = X86ISD::HADD; break;
7994         case ISD::SUB: HOpcode = X86ISD::HSUB; break;
7995         case ISD::FADD: HOpcode = X86ISD::FHADD; break;
7996         case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
7997         default: return false;
7998         }
7999       }
8000 
8001       SDValue Op0 = Op.getOperand(0);
8002       SDValue Op1 = Op.getOperand(1);
8003       if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8004           Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8005           Op0.getOperand(0) != Op1.getOperand(0) ||
8006           !isa<ConstantSDNode>(Op0.getOperand(1)) ||
8007           !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
8008         return false;
8009 
8010       // The source vector is chosen based on which 64-bit half of the
8011       // destination vector is being calculated.
8012       if (j < NumEltsIn64Bits) {
8013         if (V0.isUndef())
8014           V0 = Op0.getOperand(0);
8015       } else {
8016         if (V1.isUndef())
8017           V1 = Op0.getOperand(0);
8018       }
8019 
8020       SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
8021       if (SourceVec != Op0.getOperand(0))
8022         return false;
8023 
8024       // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
8025       unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
8026       unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
8027       unsigned ExpectedIndex = i * NumEltsIn128Bits +
8028                                (j % NumEltsIn64Bits) * 2;
8029       if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
8030         continue;
8031 
8032       // If this is not a commutative op, this does not match.
8033       if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
8034         return false;
8035 
8036       // Addition is commutative, so try swapping the extract indexes.
8037       // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
8038       if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
8039         continue;
8040 
8041       // Extract indexes do not match horizontal requirement.
8042       return false;
8043     }
8044   }
8045   // We matched. Opcode and operands are returned by reference as arguments.
8046   return true;
8047 }
8048 
8049 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
8050                                     SelectionDAG &DAG, unsigned HOpcode,
8051                                     SDValue V0, SDValue V1) {
8052   // If either input vector is not the same size as the build vector,
8053   // extract/insert the low bits to the correct size.
8054   // This is free (examples: zmm --> xmm, xmm --> ymm).
8055   MVT VT = BV->getSimpleValueType(0);
8056   unsigned Width = VT.getSizeInBits();
8057   if (V0.getValueSizeInBits() > Width)
8058     V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
8059   else if (V0.getValueSizeInBits() < Width)
8060     V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
8061 
8062   if (V1.getValueSizeInBits() > Width)
8063     V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
8064   else if (V1.getValueSizeInBits() < Width)
8065     V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
8066 
8067   unsigned NumElts = VT.getVectorNumElements();
8068   APInt DemandedElts = APInt::getAllOnes(NumElts);
8069   for (unsigned i = 0; i != NumElts; ++i)
8070     if (BV->getOperand(i).isUndef())
8071       DemandedElts.clearBit(i);
8072 
8073   // If we don't need the upper xmm, then perform as a xmm hop.
8074   unsigned HalfNumElts = NumElts / 2;
8075   if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
8076     MVT HalfVT = VT.getHalfNumVectorElementsVT();
8077     V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
8078     V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
8079     SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
8080     return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
8081   }
8082 
8083   return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
8084 }
8085 
8086 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
8087 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
8088                                    const X86Subtarget &Subtarget,
8089                                    SelectionDAG &DAG) {
8090   // We need at least 2 non-undef elements to make this worthwhile by default.
8091   unsigned NumNonUndefs =
8092       count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
8093   if (NumNonUndefs < 2)
8094     return SDValue();
8095 
8096   // There are 4 sets of horizontal math operations distinguished by type:
8097   // int/FP at 128-bit/256-bit. Each type was introduced with a different
8098   // subtarget feature. Try to match those "native" patterns first.
8099   MVT VT = BV->getSimpleValueType(0);
8100   if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
8101       ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
8102       ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
8103       ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
8104     unsigned HOpcode;
8105     SDValue V0, V1;
8106     if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
8107       return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
8108   }
8109 
8110   // Try harder to match 256-bit ops by using extract/concat.
8111   if (!Subtarget.hasAVX() || !VT.is256BitVector())
8112     return SDValue();
8113 
8114   // Count the number of UNDEF operands in the build_vector in input.
8115   unsigned NumElts = VT.getVectorNumElements();
8116   unsigned Half = NumElts / 2;
8117   unsigned NumUndefsLO = 0;
8118   unsigned NumUndefsHI = 0;
8119   for (unsigned i = 0, e = Half; i != e; ++i)
8120     if (BV->getOperand(i)->isUndef())
8121       NumUndefsLO++;
8122 
8123   for (unsigned i = Half, e = NumElts; i != e; ++i)
8124     if (BV->getOperand(i)->isUndef())
8125       NumUndefsHI++;
8126 
8127   SDLoc DL(BV);
8128   SDValue InVec0, InVec1;
8129   if (VT == MVT::v8i32 || VT == MVT::v16i16) {
8130     SDValue InVec2, InVec3;
8131     unsigned X86Opcode;
8132     bool CanFold = true;
8133 
8134     if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
8135         isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
8136                               InVec3) &&
8137         ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
8138         ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
8139       X86Opcode = X86ISD::HADD;
8140     else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
8141                                    InVec1) &&
8142              isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
8143                                    InVec3) &&
8144              ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
8145              ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
8146       X86Opcode = X86ISD::HSUB;
8147     else
8148       CanFold = false;
8149 
8150     if (CanFold) {
8151       // Do not try to expand this build_vector into a pair of horizontal
8152       // add/sub if we can emit a pair of scalar add/sub.
8153       if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
8154         return SDValue();
8155 
8156       // Convert this build_vector into a pair of horizontal binops followed by
8157       // a concat vector. We must adjust the outputs from the partial horizontal
8158       // matching calls above to account for undefined vector halves.
8159       SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
8160       SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
8161       assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
8162       bool isUndefLO = NumUndefsLO == Half;
8163       bool isUndefHI = NumUndefsHI == Half;
8164       return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
8165                                    isUndefHI);
8166     }
8167   }
8168 
8169   if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
8170       VT == MVT::v16i16) {
8171     unsigned X86Opcode;
8172     if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
8173       X86Opcode = X86ISD::HADD;
8174     else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
8175                                    InVec1))
8176       X86Opcode = X86ISD::HSUB;
8177     else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
8178                                    InVec1))
8179       X86Opcode = X86ISD::FHADD;
8180     else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
8181                                    InVec1))
8182       X86Opcode = X86ISD::FHSUB;
8183     else
8184       return SDValue();
8185 
8186     // Don't try to expand this build_vector into a pair of horizontal add/sub
8187     // if we can simply emit a pair of scalar add/sub.
8188     if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
8189       return SDValue();
8190 
8191     // Convert this build_vector into two horizontal add/sub followed by
8192     // a concat vector.
8193     bool isUndefLO = NumUndefsLO == Half;
8194     bool isUndefHI = NumUndefsHI == Half;
8195     return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
8196                                  isUndefLO, isUndefHI);
8197   }
8198 
8199   return SDValue();
8200 }
8201 
8202 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
8203                           SelectionDAG &DAG);
8204 
8205 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
8206 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
8207 /// just apply the bit to the vectors.
8208 /// NOTE: Its not in our interest to start make a general purpose vectorizer
8209 /// from this, but enough scalar bit operations are created from the later
8210 /// legalization + scalarization stages to need basic support.
8211 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
8212                                        const X86Subtarget &Subtarget,
8213                                        SelectionDAG &DAG) {
8214   SDLoc DL(Op);
8215   MVT VT = Op->getSimpleValueType(0);
8216   unsigned NumElems = VT.getVectorNumElements();
8217   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8218 
8219   // Check that all elements have the same opcode.
8220   // TODO: Should we allow UNDEFS and if so how many?
8221   unsigned Opcode = Op->getOperand(0).getOpcode();
8222   for (unsigned i = 1; i < NumElems; ++i)
8223     if (Opcode != Op->getOperand(i).getOpcode())
8224       return SDValue();
8225 
8226   // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
8227   bool IsShift = false;
8228   switch (Opcode) {
8229   default:
8230     return SDValue();
8231   case ISD::SHL:
8232   case ISD::SRL:
8233   case ISD::SRA:
8234     IsShift = true;
8235     break;
8236   case ISD::AND:
8237   case ISD::XOR:
8238   case ISD::OR:
8239     // Don't do this if the buildvector is a splat - we'd replace one
8240     // constant with an entire vector.
8241     if (Op->getSplatValue())
8242       return SDValue();
8243     if (!TLI.isOperationLegalOrPromote(Opcode, VT))
8244       return SDValue();
8245     break;
8246   }
8247 
8248   SmallVector<SDValue, 4> LHSElts, RHSElts;
8249   for (SDValue Elt : Op->ops()) {
8250     SDValue LHS = Elt.getOperand(0);
8251     SDValue RHS = Elt.getOperand(1);
8252 
8253     // We expect the canonicalized RHS operand to be the constant.
8254     if (!isa<ConstantSDNode>(RHS))
8255       return SDValue();
8256 
8257     // Extend shift amounts.
8258     if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
8259       if (!IsShift)
8260         return SDValue();
8261       RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
8262     }
8263 
8264     LHSElts.push_back(LHS);
8265     RHSElts.push_back(RHS);
8266   }
8267 
8268   // Limit to shifts by uniform immediates.
8269   // TODO: Only accept vXi8/vXi64 special cases?
8270   // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
8271   if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
8272     return SDValue();
8273 
8274   SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
8275   SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
8276   SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
8277 
8278   if (!IsShift)
8279     return Res;
8280 
8281   // Immediately lower the shift to ensure the constant build vector doesn't
8282   // get converted to a constant pool before the shift is lowered.
8283   return LowerShift(Res, Subtarget, DAG);
8284 }
8285 
8286 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
8287 /// functionality to do this, so it's all zeros, all ones, or some derivation
8288 /// that is cheap to calculate.
8289 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
8290                                          const X86Subtarget &Subtarget) {
8291   SDLoc DL(Op);
8292   MVT VT = Op.getSimpleValueType();
8293 
8294   // Vectors containing all zeros can be matched by pxor and xorps.
8295   if (ISD::isBuildVectorAllZeros(Op.getNode()))
8296     return Op;
8297 
8298   // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
8299   // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
8300   // vpcmpeqd on 256-bit vectors.
8301   if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
8302     if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
8303       return Op;
8304 
8305     return getOnesVector(VT, DAG, DL);
8306   }
8307 
8308   return SDValue();
8309 }
8310 
8311 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
8312 /// from a vector of source values and a vector of extraction indices.
8313 /// The vectors might be manipulated to match the type of the permute op.
8314 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
8315                                      SDLoc &DL, SelectionDAG &DAG,
8316                                      const X86Subtarget &Subtarget) {
8317   MVT ShuffleVT = VT;
8318   EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
8319   unsigned NumElts = VT.getVectorNumElements();
8320   unsigned SizeInBits = VT.getSizeInBits();
8321 
8322   // Adjust IndicesVec to match VT size.
8323   assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
8324          "Illegal variable permute mask size");
8325   if (IndicesVec.getValueType().getVectorNumElements() > NumElts) {
8326     // Narrow/widen the indices vector to the correct size.
8327     if (IndicesVec.getValueSizeInBits() > SizeInBits)
8328       IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
8329                                     NumElts * VT.getScalarSizeInBits());
8330     else if (IndicesVec.getValueSizeInBits() < SizeInBits)
8331       IndicesVec = widenSubVector(IndicesVec, false, Subtarget, DAG,
8332                                   SDLoc(IndicesVec), SizeInBits);
8333     // Zero-extend the index elements within the vector.
8334     if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
8335       IndicesVec = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(IndicesVec),
8336                                IndicesVT, IndicesVec);
8337   }
8338   IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
8339 
8340   // Handle SrcVec that don't match VT type.
8341   if (SrcVec.getValueSizeInBits() != SizeInBits) {
8342     if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
8343       // Handle larger SrcVec by treating it as a larger permute.
8344       unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
8345       VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
8346       IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
8347       IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
8348                                   Subtarget, DAG, SDLoc(IndicesVec));
8349       SDValue NewSrcVec =
8350           createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
8351       if (NewSrcVec)
8352         return extractSubVector(NewSrcVec, 0, DAG, DL, SizeInBits);
8353       return SDValue();
8354     } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
8355       // Widen smaller SrcVec to match VT.
8356       SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
8357     } else
8358       return SDValue();
8359   }
8360 
8361   auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
8362     assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
8363     EVT SrcVT = Idx.getValueType();
8364     unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
8365     uint64_t IndexScale = 0;
8366     uint64_t IndexOffset = 0;
8367 
8368     // If we're scaling a smaller permute op, then we need to repeat the
8369     // indices, scaling and offsetting them as well.
8370     // e.g. v4i32 -> v16i8 (Scale = 4)
8371     // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
8372     // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
8373     for (uint64_t i = 0; i != Scale; ++i) {
8374       IndexScale |= Scale << (i * NumDstBits);
8375       IndexOffset |= i << (i * NumDstBits);
8376     }
8377 
8378     Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
8379                       DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
8380     Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
8381                       DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
8382     return Idx;
8383   };
8384 
8385   unsigned Opcode = 0;
8386   switch (VT.SimpleTy) {
8387   default:
8388     break;
8389   case MVT::v16i8:
8390     if (Subtarget.hasSSSE3())
8391       Opcode = X86ISD::PSHUFB;
8392     break;
8393   case MVT::v8i16:
8394     if (Subtarget.hasVLX() && Subtarget.hasBWI())
8395       Opcode = X86ISD::VPERMV;
8396     else if (Subtarget.hasSSSE3()) {
8397       Opcode = X86ISD::PSHUFB;
8398       ShuffleVT = MVT::v16i8;
8399     }
8400     break;
8401   case MVT::v4f32:
8402   case MVT::v4i32:
8403     if (Subtarget.hasAVX()) {
8404       Opcode = X86ISD::VPERMILPV;
8405       ShuffleVT = MVT::v4f32;
8406     } else if (Subtarget.hasSSSE3()) {
8407       Opcode = X86ISD::PSHUFB;
8408       ShuffleVT = MVT::v16i8;
8409     }
8410     break;
8411   case MVT::v2f64:
8412   case MVT::v2i64:
8413     if (Subtarget.hasAVX()) {
8414       // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
8415       IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
8416       Opcode = X86ISD::VPERMILPV;
8417       ShuffleVT = MVT::v2f64;
8418     } else if (Subtarget.hasSSE41()) {
8419       // SSE41 can compare v2i64 - select between indices 0 and 1.
8420       return DAG.getSelectCC(
8421           DL, IndicesVec,
8422           getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
8423           DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
8424           DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
8425           ISD::CondCode::SETEQ);
8426     }
8427     break;
8428   case MVT::v32i8:
8429     if (Subtarget.hasVLX() && Subtarget.hasVBMI())
8430       Opcode = X86ISD::VPERMV;
8431     else if (Subtarget.hasXOP()) {
8432       SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
8433       SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
8434       SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
8435       SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
8436       return DAG.getNode(
8437           ISD::CONCAT_VECTORS, DL, VT,
8438           DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
8439           DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
8440     } else if (Subtarget.hasAVX()) {
8441       SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
8442       SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
8443       SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
8444       SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
8445       auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
8446                               ArrayRef<SDValue> Ops) {
8447         // Permute Lo and Hi and then select based on index range.
8448         // This works as SHUFB uses bits[3:0] to permute elements and we don't
8449         // care about the bit[7] as its just an index vector.
8450         SDValue Idx = Ops[2];
8451         EVT VT = Idx.getValueType();
8452         return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
8453                                DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
8454                                DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
8455                                ISD::CondCode::SETGT);
8456       };
8457       SDValue Ops[] = {LoLo, HiHi, IndicesVec};
8458       return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
8459                               PSHUFBBuilder);
8460     }
8461     break;
8462   case MVT::v16i16:
8463     if (Subtarget.hasVLX() && Subtarget.hasBWI())
8464       Opcode = X86ISD::VPERMV;
8465     else if (Subtarget.hasAVX()) {
8466       // Scale to v32i8 and perform as v32i8.
8467       IndicesVec = ScaleIndices(IndicesVec, 2);
8468       return DAG.getBitcast(
8469           VT, createVariablePermute(
8470                   MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
8471                   DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
8472     }
8473     break;
8474   case MVT::v8f32:
8475   case MVT::v8i32:
8476     if (Subtarget.hasAVX2())
8477       Opcode = X86ISD::VPERMV;
8478     else if (Subtarget.hasAVX()) {
8479       SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
8480       SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
8481                                           {0, 1, 2, 3, 0, 1, 2, 3});
8482       SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
8483                                           {4, 5, 6, 7, 4, 5, 6, 7});
8484       if (Subtarget.hasXOP())
8485         return DAG.getBitcast(
8486             VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
8487                             IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
8488       // Permute Lo and Hi and then select based on index range.
8489       // This works as VPERMILPS only uses index bits[0:1] to permute elements.
8490       SDValue Res = DAG.getSelectCC(
8491           DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
8492           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
8493           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
8494           ISD::CondCode::SETGT);
8495       return DAG.getBitcast(VT, Res);
8496     }
8497     break;
8498   case MVT::v4i64:
8499   case MVT::v4f64:
8500     if (Subtarget.hasAVX512()) {
8501       if (!Subtarget.hasVLX()) {
8502         MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
8503         SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
8504                                 SDLoc(SrcVec));
8505         IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
8506                                     DAG, SDLoc(IndicesVec));
8507         SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
8508                                             DAG, Subtarget);
8509         return extract256BitVector(Res, 0, DAG, DL);
8510       }
8511       Opcode = X86ISD::VPERMV;
8512     } else if (Subtarget.hasAVX()) {
8513       SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
8514       SDValue LoLo =
8515           DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
8516       SDValue HiHi =
8517           DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
8518       // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
8519       IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
8520       if (Subtarget.hasXOP())
8521         return DAG.getBitcast(
8522             VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
8523                             IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
8524       // Permute Lo and Hi and then select based on index range.
8525       // This works as VPERMILPD only uses index bit[1] to permute elements.
8526       SDValue Res = DAG.getSelectCC(
8527           DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
8528           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
8529           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
8530           ISD::CondCode::SETGT);
8531       return DAG.getBitcast(VT, Res);
8532     }
8533     break;
8534   case MVT::v64i8:
8535     if (Subtarget.hasVBMI())
8536       Opcode = X86ISD::VPERMV;
8537     break;
8538   case MVT::v32i16:
8539     if (Subtarget.hasBWI())
8540       Opcode = X86ISD::VPERMV;
8541     break;
8542   case MVT::v16f32:
8543   case MVT::v16i32:
8544   case MVT::v8f64:
8545   case MVT::v8i64:
8546     if (Subtarget.hasAVX512())
8547       Opcode = X86ISD::VPERMV;
8548     break;
8549   }
8550   if (!Opcode)
8551     return SDValue();
8552 
8553   assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
8554          (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
8555          "Illegal variable permute shuffle type");
8556 
8557   uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
8558   if (Scale > 1)
8559     IndicesVec = ScaleIndices(IndicesVec, Scale);
8560 
8561   EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
8562   IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
8563 
8564   SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
8565   SDValue Res = Opcode == X86ISD::VPERMV
8566                     ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
8567                     : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
8568   return DAG.getBitcast(VT, Res);
8569 }
8570 
8571 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
8572 // reasoned to be a permutation of a vector by indices in a non-constant vector.
8573 // (build_vector (extract_elt V, (extract_elt I, 0)),
8574 //               (extract_elt V, (extract_elt I, 1)),
8575 //                    ...
8576 // ->
8577 // (vpermv I, V)
8578 //
8579 // TODO: Handle undefs
8580 // TODO: Utilize pshufb and zero mask blending to support more efficient
8581 // construction of vectors with constant-0 elements.
8582 static SDValue
8583 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
8584                                    const X86Subtarget &Subtarget) {
8585   SDValue SrcVec, IndicesVec;
8586   // Check for a match of the permute source vector and permute index elements.
8587   // This is done by checking that the i-th build_vector operand is of the form:
8588   // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
8589   for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
8590     SDValue Op = V.getOperand(Idx);
8591     if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
8592       return SDValue();
8593 
8594     // If this is the first extract encountered in V, set the source vector,
8595     // otherwise verify the extract is from the previously defined source
8596     // vector.
8597     if (!SrcVec)
8598       SrcVec = Op.getOperand(0);
8599     else if (SrcVec != Op.getOperand(0))
8600       return SDValue();
8601     SDValue ExtractedIndex = Op->getOperand(1);
8602     // Peek through extends.
8603     if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
8604         ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
8605       ExtractedIndex = ExtractedIndex.getOperand(0);
8606     if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
8607       return SDValue();
8608 
8609     // If this is the first extract from the index vector candidate, set the
8610     // indices vector, otherwise verify the extract is from the previously
8611     // defined indices vector.
8612     if (!IndicesVec)
8613       IndicesVec = ExtractedIndex.getOperand(0);
8614     else if (IndicesVec != ExtractedIndex.getOperand(0))
8615       return SDValue();
8616 
8617     auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
8618     if (!PermIdx || PermIdx->getAPIntValue() != Idx)
8619       return SDValue();
8620   }
8621 
8622   SDLoc DL(V);
8623   MVT VT = V.getSimpleValueType();
8624   return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
8625 }
8626 
8627 SDValue
8628 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
8629   SDLoc dl(Op);
8630 
8631   MVT VT = Op.getSimpleValueType();
8632   MVT EltVT = VT.getVectorElementType();
8633   MVT OpEltVT = Op.getOperand(0).getSimpleValueType();
8634   unsigned NumElems = Op.getNumOperands();
8635 
8636   // Generate vectors for predicate vectors.
8637   if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
8638     return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
8639 
8640   if (VT.getVectorElementType() == MVT::bf16 &&
8641       (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16()))
8642     return LowerBUILD_VECTORvXbf16(Op, DAG, Subtarget);
8643 
8644   if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
8645     return VectorConstant;
8646 
8647   unsigned EVTBits = EltVT.getSizeInBits();
8648   APInt UndefMask = APInt::getZero(NumElems);
8649   APInt FrozenUndefMask = APInt::getZero(NumElems);
8650   APInt ZeroMask = APInt::getZero(NumElems);
8651   APInt NonZeroMask = APInt::getZero(NumElems);
8652   bool IsAllConstants = true;
8653   bool OneUseFrozenUndefs = true;
8654   SmallSet<SDValue, 8> Values;
8655   unsigned NumConstants = NumElems;
8656   for (unsigned i = 0; i < NumElems; ++i) {
8657     SDValue Elt = Op.getOperand(i);
8658     if (Elt.isUndef()) {
8659       UndefMask.setBit(i);
8660       continue;
8661     }
8662     if (ISD::isFreezeUndef(Elt.getNode())) {
8663       OneUseFrozenUndefs = OneUseFrozenUndefs && Elt->hasOneUse();
8664       FrozenUndefMask.setBit(i);
8665       continue;
8666     }
8667     Values.insert(Elt);
8668     if (!isIntOrFPConstant(Elt)) {
8669       IsAllConstants = false;
8670       NumConstants--;
8671     }
8672     if (X86::isZeroNode(Elt)) {
8673       ZeroMask.setBit(i);
8674     } else {
8675       NonZeroMask.setBit(i);
8676     }
8677   }
8678 
8679   // All undef vector. Return an UNDEF.
8680   if (UndefMask.isAllOnes())
8681     return DAG.getUNDEF(VT);
8682 
8683   // All undef/freeze(undef) vector. Return a FREEZE UNDEF.
8684   if (OneUseFrozenUndefs && (UndefMask | FrozenUndefMask).isAllOnes())
8685     return DAG.getFreeze(DAG.getUNDEF(VT));
8686 
8687   // All undef/freeze(undef)/zero vector. Return a zero vector.
8688   if ((UndefMask | FrozenUndefMask | ZeroMask).isAllOnes())
8689     return getZeroVector(VT, Subtarget, DAG, dl);
8690 
8691   // If we have multiple FREEZE-UNDEF operands, we are likely going to end up
8692   // lowering into a suboptimal insertion sequence. Instead, thaw the UNDEF in
8693   // our source BUILD_VECTOR, create another FREEZE-UNDEF splat BUILD_VECTOR,
8694   // and blend the FREEZE-UNDEF operands back in.
8695   // FIXME: is this worthwhile even for a single FREEZE-UNDEF operand?
8696   if (unsigned NumFrozenUndefElts = FrozenUndefMask.popcount();
8697       NumFrozenUndefElts >= 2 && NumFrozenUndefElts < NumElems) {
8698     SmallVector<int, 16> BlendMask(NumElems, -1);
8699     SmallVector<SDValue, 16> Elts(NumElems, DAG.getUNDEF(OpEltVT));
8700     for (unsigned i = 0; i < NumElems; ++i) {
8701       if (UndefMask[i]) {
8702         BlendMask[i] = -1;
8703         continue;
8704       }
8705       BlendMask[i] = i;
8706       if (!FrozenUndefMask[i])
8707         Elts[i] = Op.getOperand(i);
8708       else
8709         BlendMask[i] += NumElems;
8710     }
8711     SDValue EltsBV = DAG.getBuildVector(VT, dl, Elts);
8712     SDValue FrozenUndefElt = DAG.getFreeze(DAG.getUNDEF(OpEltVT));
8713     SDValue FrozenUndefBV = DAG.getSplatBuildVector(VT, dl, FrozenUndefElt);
8714     return DAG.getVectorShuffle(VT, dl, EltsBV, FrozenUndefBV, BlendMask);
8715   }
8716 
8717   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
8718 
8719   // If the upper elts of a ymm/zmm are undef/freeze(undef)/zero then we might
8720   // be better off lowering to a smaller build vector and padding with
8721   // undef/zero.
8722   if ((VT.is256BitVector() || VT.is512BitVector()) &&
8723       !isFoldableUseOfShuffle(BV)) {
8724     unsigned UpperElems = NumElems / 2;
8725     APInt UndefOrZeroMask = FrozenUndefMask | UndefMask | ZeroMask;
8726     unsigned NumUpperUndefsOrZeros = UndefOrZeroMask.countl_one();
8727     if (NumUpperUndefsOrZeros >= UpperElems) {
8728       if (VT.is512BitVector() &&
8729           NumUpperUndefsOrZeros >= (NumElems - (NumElems / 4)))
8730         UpperElems = NumElems - (NumElems / 4);
8731       // If freeze(undef) is in any upper elements, force to zero.
8732       bool UndefUpper = UndefMask.countl_one() >= UpperElems;
8733       MVT LowerVT = MVT::getVectorVT(EltVT, NumElems - UpperElems);
8734       SDValue NewBV =
8735           DAG.getBuildVector(LowerVT, dl, Op->ops().drop_back(UpperElems));
8736       return widenSubVector(VT, NewBV, !UndefUpper, Subtarget, DAG, dl);
8737     }
8738   }
8739 
8740   if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
8741     return AddSub;
8742   if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
8743     return HorizontalOp;
8744   if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
8745     return Broadcast;
8746   if (SDValue BitOp = lowerBuildVectorToBitOp(BV, Subtarget, DAG))
8747     return BitOp;
8748 
8749   unsigned NumZero = ZeroMask.popcount();
8750   unsigned NumNonZero = NonZeroMask.popcount();
8751 
8752   // If we are inserting one variable into a vector of non-zero constants, try
8753   // to avoid loading each constant element as a scalar. Load the constants as a
8754   // vector and then insert the variable scalar element. If insertion is not
8755   // supported, fall back to a shuffle to get the scalar blended with the
8756   // constants. Insertion into a zero vector is handled as a special-case
8757   // somewhere below here.
8758   if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
8759       FrozenUndefMask.isZero() &&
8760       (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
8761        isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
8762     // Create an all-constant vector. The variable element in the old
8763     // build vector is replaced by undef in the constant vector. Save the
8764     // variable scalar element and its index for use in the insertelement.
8765     LLVMContext &Context = *DAG.getContext();
8766     Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
8767     SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
8768     SDValue VarElt;
8769     SDValue InsIndex;
8770     for (unsigned i = 0; i != NumElems; ++i) {
8771       SDValue Elt = Op.getOperand(i);
8772       if (auto *C = dyn_cast<ConstantSDNode>(Elt))
8773         ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
8774       else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
8775         ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
8776       else if (!Elt.isUndef()) {
8777         assert(!VarElt.getNode() && !InsIndex.getNode() &&
8778                "Expected one variable element in this vector");
8779         VarElt = Elt;
8780         InsIndex = DAG.getVectorIdxConstant(i, dl);
8781       }
8782     }
8783     Constant *CV = ConstantVector::get(ConstVecOps);
8784     SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
8785 
8786     // The constants we just created may not be legal (eg, floating point). We
8787     // must lower the vector right here because we can not guarantee that we'll
8788     // legalize it before loading it. This is also why we could not just create
8789     // a new build vector here. If the build vector contains illegal constants,
8790     // it could get split back up into a series of insert elements.
8791     // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
8792     SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
8793     MachineFunction &MF = DAG.getMachineFunction();
8794     MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
8795     SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
8796     unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
8797     unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
8798     if (InsertC < NumEltsInLow128Bits)
8799       return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
8800 
8801     // There's no good way to insert into the high elements of a >128-bit
8802     // vector, so use shuffles to avoid an extract/insert sequence.
8803     assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
8804     assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
8805     SmallVector<int, 8> ShuffleMask;
8806     unsigned NumElts = VT.getVectorNumElements();
8807     for (unsigned i = 0; i != NumElts; ++i)
8808       ShuffleMask.push_back(i == InsertC ? NumElts : i);
8809     SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
8810     return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
8811   }
8812 
8813   // Special case for single non-zero, non-undef, element.
8814   if (NumNonZero == 1) {
8815     unsigned Idx = NonZeroMask.countr_zero();
8816     SDValue Item = Op.getOperand(Idx);
8817 
8818     // If we have a constant or non-constant insertion into the low element of
8819     // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
8820     // the rest of the elements.  This will be matched as movd/movq/movss/movsd
8821     // depending on what the source datatype is.
8822     if (Idx == 0) {
8823       if (NumZero == 0)
8824         return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
8825 
8826       if (EltVT == MVT::i32 || EltVT == MVT::f16 || EltVT == MVT::f32 ||
8827           EltVT == MVT::f64 || (EltVT == MVT::i64 && Subtarget.is64Bit()) ||
8828           (EltVT == MVT::i16 && Subtarget.hasFP16())) {
8829         assert((VT.is128BitVector() || VT.is256BitVector() ||
8830                 VT.is512BitVector()) &&
8831                "Expected an SSE value type!");
8832         Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
8833         // Turn it into a MOVL (i.e. movsh, movss, movsd, movw or movd) to a
8834         // zero vector.
8835         return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
8836       }
8837 
8838       // We can't directly insert an i8 or i16 into a vector, so zero extend
8839       // it to i32 first.
8840       if (EltVT == MVT::i16 || EltVT == MVT::i8) {
8841         Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
8842         MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
8843         Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
8844         Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
8845         return DAG.getBitcast(VT, Item);
8846       }
8847     }
8848 
8849     // Is it a vector logical left shift?
8850     if (NumElems == 2 && Idx == 1 &&
8851         X86::isZeroNode(Op.getOperand(0)) &&
8852         !X86::isZeroNode(Op.getOperand(1))) {
8853       unsigned NumBits = VT.getSizeInBits();
8854       return getVShift(true, VT,
8855                        DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
8856                                    VT, Op.getOperand(1)),
8857                        NumBits/2, DAG, *this, dl);
8858     }
8859 
8860     if (IsAllConstants) // Otherwise, it's better to do a constpool load.
8861       return SDValue();
8862 
8863     // Otherwise, if this is a vector with i32 or f32 elements, and the element
8864     // is a non-constant being inserted into an element other than the low one,
8865     // we can't use a constant pool load.  Instead, use SCALAR_TO_VECTOR (aka
8866     // movd/movss) to move this into the low element, then shuffle it into
8867     // place.
8868     if (EVTBits == 32) {
8869       Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
8870       return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
8871     }
8872   }
8873 
8874   // Splat is obviously ok. Let legalizer expand it to a shuffle.
8875   if (Values.size() == 1) {
8876     if (EVTBits == 32) {
8877       // Instead of a shuffle like this:
8878       // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
8879       // Check if it's possible to issue this instead.
8880       // shuffle (vload ptr)), undef, <1, 1, 1, 1>
8881       unsigned Idx = NonZeroMask.countr_zero();
8882       SDValue Item = Op.getOperand(Idx);
8883       if (Op.getNode()->isOnlyUserOf(Item.getNode()))
8884         return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
8885     }
8886     return SDValue();
8887   }
8888 
8889   // A vector full of immediates; various special cases are already
8890   // handled, so this is best done with a single constant-pool load.
8891   if (IsAllConstants)
8892     return SDValue();
8893 
8894   if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
8895       return V;
8896 
8897   // See if we can use a vector load to get all of the elements.
8898   {
8899     SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
8900     if (SDValue LD =
8901             EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
8902       return LD;
8903   }
8904 
8905   // If this is a splat of pairs of 32-bit elements, we can use a narrower
8906   // build_vector and broadcast it.
8907   // TODO: We could probably generalize this more.
8908   if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
8909     SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
8910                        DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
8911     auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
8912       // Make sure all the even/odd operands match.
8913       for (unsigned i = 2; i != NumElems; ++i)
8914         if (Ops[i % 2] != Op.getOperand(i))
8915           return false;
8916       return true;
8917     };
8918     if (CanSplat(Op, NumElems, Ops)) {
8919       MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
8920       MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
8921       // Create a new build vector and cast to v2i64/v2f64.
8922       SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
8923                                      DAG.getBuildVector(NarrowVT, dl, Ops));
8924       // Broadcast from v2i64/v2f64 and cast to final VT.
8925       MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems / 2);
8926       return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
8927                                             NewBV));
8928     }
8929   }
8930 
8931   // For AVX-length vectors, build the individual 128-bit pieces and use
8932   // shuffles to put them in place.
8933   if (VT.getSizeInBits() > 128) {
8934     MVT HVT = MVT::getVectorVT(EltVT, NumElems / 2);
8935 
8936     // Build both the lower and upper subvector.
8937     SDValue Lower =
8938         DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
8939     SDValue Upper = DAG.getBuildVector(
8940         HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
8941 
8942     // Recreate the wider vector with the lower and upper part.
8943     return concatSubVectors(Lower, Upper, DAG, dl);
8944   }
8945 
8946   // Let legalizer expand 2-wide build_vectors.
8947   if (EVTBits == 64) {
8948     if (NumNonZero == 1) {
8949       // One half is zero or undef.
8950       unsigned Idx = NonZeroMask.countr_zero();
8951       SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
8952                                Op.getOperand(Idx));
8953       return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
8954     }
8955     return SDValue();
8956   }
8957 
8958   // If element VT is < 32 bits, convert it to inserts into a zero vector.
8959   if (EVTBits == 8 && NumElems == 16)
8960     if (SDValue V = LowerBuildVectorv16i8(Op, NonZeroMask, NumNonZero, NumZero,
8961                                           DAG, Subtarget))
8962       return V;
8963 
8964   if (EltVT == MVT::i16 && NumElems == 8)
8965     if (SDValue V = LowerBuildVectorv8i16(Op, NonZeroMask, NumNonZero, NumZero,
8966                                           DAG, Subtarget))
8967       return V;
8968 
8969   // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
8970   if (EVTBits == 32 && NumElems == 4)
8971     if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
8972       return V;
8973 
8974   // If element VT is == 32 bits, turn it into a number of shuffles.
8975   if (NumElems == 4 && NumZero > 0) {
8976     SmallVector<SDValue, 8> Ops(NumElems);
8977     for (unsigned i = 0; i < 4; ++i) {
8978       bool isZero = !NonZeroMask[i];
8979       if (isZero)
8980         Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
8981       else
8982         Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
8983     }
8984 
8985     for (unsigned i = 0; i < 2; ++i) {
8986       switch (NonZeroMask.extractBitsAsZExtValue(2, i * 2)) {
8987         default: llvm_unreachable("Unexpected NonZero count");
8988         case 0:
8989           Ops[i] = Ops[i*2];  // Must be a zero vector.
8990           break;
8991         case 1:
8992           Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
8993           break;
8994         case 2:
8995           Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
8996           break;
8997         case 3:
8998           Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
8999           break;
9000       }
9001     }
9002 
9003     bool Reverse1 = NonZeroMask.extractBitsAsZExtValue(2, 0) == 2;
9004     bool Reverse2 = NonZeroMask.extractBitsAsZExtValue(2, 2) == 2;
9005     int MaskVec[] = {
9006       Reverse1 ? 1 : 0,
9007       Reverse1 ? 0 : 1,
9008       static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
9009       static_cast<int>(Reverse2 ? NumElems   : NumElems+1)
9010     };
9011     return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
9012   }
9013 
9014   assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
9015 
9016   // Check for a build vector from mostly shuffle plus few inserting.
9017   if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
9018     return Sh;
9019 
9020   // For SSE 4.1, use insertps to put the high elements into the low element.
9021   if (Subtarget.hasSSE41() && EltVT != MVT::f16) {
9022     SDValue Result;
9023     if (!Op.getOperand(0).isUndef())
9024       Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
9025     else
9026       Result = DAG.getUNDEF(VT);
9027 
9028     for (unsigned i = 1; i < NumElems; ++i) {
9029       if (Op.getOperand(i).isUndef()) continue;
9030       Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
9031                            Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
9032     }
9033     return Result;
9034   }
9035 
9036   // Otherwise, expand into a number of unpckl*, start by extending each of
9037   // our (non-undef) elements to the full vector width with the element in the
9038   // bottom slot of the vector (which generates no code for SSE).
9039   SmallVector<SDValue, 8> Ops(NumElems);
9040   for (unsigned i = 0; i < NumElems; ++i) {
9041     if (!Op.getOperand(i).isUndef())
9042       Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
9043     else
9044       Ops[i] = DAG.getUNDEF(VT);
9045   }
9046 
9047   // Next, we iteratively mix elements, e.g. for v4f32:
9048   //   Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
9049   //         : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
9050   //   Step 2: unpcklpd X, Y ==>    <3, 2, 1, 0>
9051   for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
9052     // Generate scaled UNPCKL shuffle mask.
9053     SmallVector<int, 16> Mask;
9054     for(unsigned i = 0; i != Scale; ++i)
9055       Mask.push_back(i);
9056     for (unsigned i = 0; i != Scale; ++i)
9057       Mask.push_back(NumElems+i);
9058     Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
9059 
9060     for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
9061       Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
9062   }
9063   return Ops[0];
9064 }
9065 
9066 // 256-bit AVX can use the vinsertf128 instruction
9067 // to create 256-bit vectors from two other 128-bit ones.
9068 // TODO: Detect subvector broadcast here instead of DAG combine?
9069 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
9070                                       const X86Subtarget &Subtarget) {
9071   SDLoc dl(Op);
9072   MVT ResVT = Op.getSimpleValueType();
9073 
9074   assert((ResVT.is256BitVector() ||
9075           ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
9076 
9077   unsigned NumOperands = Op.getNumOperands();
9078   unsigned NumFreezeUndef = 0;
9079   unsigned NumZero = 0;
9080   unsigned NumNonZero = 0;
9081   unsigned NonZeros = 0;
9082   for (unsigned i = 0; i != NumOperands; ++i) {
9083     SDValue SubVec = Op.getOperand(i);
9084     if (SubVec.isUndef())
9085       continue;
9086     if (ISD::isFreezeUndef(SubVec.getNode())) {
9087         // If the freeze(undef) has multiple uses then we must fold to zero.
9088         if (SubVec.hasOneUse())
9089           ++NumFreezeUndef;
9090         else
9091           ++NumZero;
9092     }
9093     else if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
9094       ++NumZero;
9095     else {
9096       assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
9097       NonZeros |= 1 << i;
9098       ++NumNonZero;
9099     }
9100   }
9101 
9102   // If we have more than 2 non-zeros, build each half separately.
9103   if (NumNonZero > 2) {
9104     MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
9105     ArrayRef<SDUse> Ops = Op->ops();
9106     SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9107                              Ops.slice(0, NumOperands/2));
9108     SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9109                              Ops.slice(NumOperands/2));
9110     return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
9111   }
9112 
9113   // Otherwise, build it up through insert_subvectors.
9114   SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
9115                         : (NumFreezeUndef ? DAG.getFreeze(DAG.getUNDEF(ResVT))
9116                                           : DAG.getUNDEF(ResVT));
9117 
9118   MVT SubVT = Op.getOperand(0).getSimpleValueType();
9119   unsigned NumSubElems = SubVT.getVectorNumElements();
9120   for (unsigned i = 0; i != NumOperands; ++i) {
9121     if ((NonZeros & (1 << i)) == 0)
9122       continue;
9123 
9124     Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
9125                       Op.getOperand(i),
9126                       DAG.getIntPtrConstant(i * NumSubElems, dl));
9127   }
9128 
9129   return Vec;
9130 }
9131 
9132 // Returns true if the given node is a type promotion (by concatenating i1
9133 // zeros) of the result of a node that already zeros all upper bits of
9134 // k-register.
9135 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
9136 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
9137                                        const X86Subtarget &Subtarget,
9138                                        SelectionDAG & DAG) {
9139   SDLoc dl(Op);
9140   MVT ResVT = Op.getSimpleValueType();
9141   unsigned NumOperands = Op.getNumOperands();
9142 
9143   assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
9144          "Unexpected number of operands in CONCAT_VECTORS");
9145 
9146   uint64_t Zeros = 0;
9147   uint64_t NonZeros = 0;
9148   for (unsigned i = 0; i != NumOperands; ++i) {
9149     SDValue SubVec = Op.getOperand(i);
9150     if (SubVec.isUndef())
9151       continue;
9152     assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
9153     if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
9154       Zeros |= (uint64_t)1 << i;
9155     else
9156       NonZeros |= (uint64_t)1 << i;
9157   }
9158 
9159   unsigned NumElems = ResVT.getVectorNumElements();
9160 
9161   // If we are inserting non-zero vector and there are zeros in LSBs and undef
9162   // in the MSBs we need to emit a KSHIFTL. The generic lowering to
9163   // insert_subvector will give us two kshifts.
9164   if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
9165       Log2_64(NonZeros) != NumOperands - 1) {
9166     unsigned Idx = Log2_64(NonZeros);
9167     SDValue SubVec = Op.getOperand(Idx);
9168     unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
9169     MVT ShiftVT = widenMaskVectorType(ResVT, Subtarget);
9170     Op = widenSubVector(ShiftVT, SubVec, false, Subtarget, DAG, dl);
9171     Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, Op,
9172                      DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
9173     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
9174                        DAG.getIntPtrConstant(0, dl));
9175   }
9176 
9177   // If there are zero or one non-zeros we can handle this very simply.
9178   if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
9179     SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
9180     if (!NonZeros)
9181       return Vec;
9182     unsigned Idx = Log2_64(NonZeros);
9183     SDValue SubVec = Op.getOperand(Idx);
9184     unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
9185     return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
9186                        DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
9187   }
9188 
9189   if (NumOperands > 2) {
9190     MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
9191     ArrayRef<SDUse> Ops = Op->ops();
9192     SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9193                              Ops.slice(0, NumOperands/2));
9194     SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9195                              Ops.slice(NumOperands/2));
9196     return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
9197   }
9198 
9199   assert(llvm::popcount(NonZeros) == 2 && "Simple cases not handled?");
9200 
9201   if (ResVT.getVectorNumElements() >= 16)
9202     return Op; // The operation is legal with KUNPCK
9203 
9204   SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
9205                             DAG.getUNDEF(ResVT), Op.getOperand(0),
9206                             DAG.getIntPtrConstant(0, dl));
9207   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
9208                      DAG.getIntPtrConstant(NumElems/2, dl));
9209 }
9210 
9211 static SDValue LowerCONCAT_VECTORS(SDValue Op,
9212                                    const X86Subtarget &Subtarget,
9213                                    SelectionDAG &DAG) {
9214   MVT VT = Op.getSimpleValueType();
9215   if (VT.getVectorElementType() == MVT::i1)
9216     return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
9217 
9218   assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
9219          (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
9220           Op.getNumOperands() == 4)));
9221 
9222   // AVX can use the vinsertf128 instruction to create 256-bit vectors
9223   // from two other 128-bit ones.
9224 
9225   // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
9226   return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
9227 }
9228 
9229 //===----------------------------------------------------------------------===//
9230 // Vector shuffle lowering
9231 //
9232 // This is an experimental code path for lowering vector shuffles on x86. It is
9233 // designed to handle arbitrary vector shuffles and blends, gracefully
9234 // degrading performance as necessary. It works hard to recognize idiomatic
9235 // shuffles and lower them to optimal instruction patterns without leaving
9236 // a framework that allows reasonably efficient handling of all vector shuffle
9237 // patterns.
9238 //===----------------------------------------------------------------------===//
9239 
9240 /// Tiny helper function to identify a no-op mask.
9241 ///
9242 /// This is a somewhat boring predicate function. It checks whether the mask
9243 /// array input, which is assumed to be a single-input shuffle mask of the kind
9244 /// used by the X86 shuffle instructions (not a fully general
9245 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
9246 /// in-place shuffle are 'no-op's.
9247 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
9248   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9249     assert(Mask[i] >= -1 && "Out of bound mask element!");
9250     if (Mask[i] >= 0 && Mask[i] != i)
9251       return false;
9252   }
9253   return true;
9254 }
9255 
9256 /// Test whether there are elements crossing LaneSizeInBits lanes in this
9257 /// shuffle mask.
9258 ///
9259 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
9260 /// and we routinely test for these.
9261 static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
9262                                       unsigned ScalarSizeInBits,
9263                                       ArrayRef<int> Mask) {
9264   assert(LaneSizeInBits && ScalarSizeInBits &&
9265          (LaneSizeInBits % ScalarSizeInBits) == 0 &&
9266          "Illegal shuffle lane size");
9267   int LaneSize = LaneSizeInBits / ScalarSizeInBits;
9268   int Size = Mask.size();
9269   for (int i = 0; i < Size; ++i)
9270     if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
9271       return true;
9272   return false;
9273 }
9274 
9275 /// Test whether there are elements crossing 128-bit lanes in this
9276 /// shuffle mask.
9277 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
9278   return isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), Mask);
9279 }
9280 
9281 /// Test whether elements in each LaneSizeInBits lane in this shuffle mask come
9282 /// from multiple lanes - this is different to isLaneCrossingShuffleMask to
9283 /// better support 'repeated mask + lane permute' style shuffles.
9284 static bool isMultiLaneShuffleMask(unsigned LaneSizeInBits,
9285                                    unsigned ScalarSizeInBits,
9286                                    ArrayRef<int> Mask) {
9287   assert(LaneSizeInBits && ScalarSizeInBits &&
9288          (LaneSizeInBits % ScalarSizeInBits) == 0 &&
9289          "Illegal shuffle lane size");
9290   int NumElts = Mask.size();
9291   int NumEltsPerLane = LaneSizeInBits / ScalarSizeInBits;
9292   int NumLanes = NumElts / NumEltsPerLane;
9293   if (NumLanes > 1) {
9294     for (int i = 0; i != NumLanes; ++i) {
9295       int SrcLane = -1;
9296       for (int j = 0; j != NumEltsPerLane; ++j) {
9297         int M = Mask[(i * NumEltsPerLane) + j];
9298         if (M < 0)
9299           continue;
9300         int Lane = (M % NumElts) / NumEltsPerLane;
9301         if (SrcLane >= 0 && SrcLane != Lane)
9302           return true;
9303         SrcLane = Lane;
9304       }
9305     }
9306   }
9307   return false;
9308 }
9309 
9310 /// Test whether a shuffle mask is equivalent within each sub-lane.
9311 ///
9312 /// This checks a shuffle mask to see if it is performing the same
9313 /// lane-relative shuffle in each sub-lane. This trivially implies
9314 /// that it is also not lane-crossing. It may however involve a blend from the
9315 /// same lane of a second vector.
9316 ///
9317 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
9318 /// non-trivial to compute in the face of undef lanes. The representation is
9319 /// suitable for use with existing 128-bit shuffles as entries from the second
9320 /// vector have been remapped to [LaneSize, 2*LaneSize).
9321 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
9322                                   ArrayRef<int> Mask,
9323                                   SmallVectorImpl<int> &RepeatedMask) {
9324   auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
9325   RepeatedMask.assign(LaneSize, -1);
9326   int Size = Mask.size();
9327   for (int i = 0; i < Size; ++i) {
9328     assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
9329     if (Mask[i] < 0)
9330       continue;
9331     if ((Mask[i] % Size) / LaneSize != i / LaneSize)
9332       // This entry crosses lanes, so there is no way to model this shuffle.
9333       return false;
9334 
9335     // Ok, handle the in-lane shuffles by detecting if and when they repeat.
9336     // Adjust second vector indices to start at LaneSize instead of Size.
9337     int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
9338                                 : Mask[i] % LaneSize + LaneSize;
9339     if (RepeatedMask[i % LaneSize] < 0)
9340       // This is the first non-undef entry in this slot of a 128-bit lane.
9341       RepeatedMask[i % LaneSize] = LocalM;
9342     else if (RepeatedMask[i % LaneSize] != LocalM)
9343       // Found a mismatch with the repeated mask.
9344       return false;
9345   }
9346   return true;
9347 }
9348 
9349 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
9350 static bool
9351 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
9352                                 SmallVectorImpl<int> &RepeatedMask) {
9353   return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
9354 }
9355 
9356 static bool
9357 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
9358   SmallVector<int, 32> RepeatedMask;
9359   return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
9360 }
9361 
9362 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
9363 static bool
9364 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
9365                                 SmallVectorImpl<int> &RepeatedMask) {
9366   return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
9367 }
9368 
9369 /// Test whether a target shuffle mask is equivalent within each sub-lane.
9370 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
9371 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,
9372                                         unsigned EltSizeInBits,
9373                                         ArrayRef<int> Mask,
9374                                         SmallVectorImpl<int> &RepeatedMask) {
9375   int LaneSize = LaneSizeInBits / EltSizeInBits;
9376   RepeatedMask.assign(LaneSize, SM_SentinelUndef);
9377   int Size = Mask.size();
9378   for (int i = 0; i < Size; ++i) {
9379     assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
9380     if (Mask[i] == SM_SentinelUndef)
9381       continue;
9382     if (Mask[i] == SM_SentinelZero) {
9383       if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
9384         return false;
9385       RepeatedMask[i % LaneSize] = SM_SentinelZero;
9386       continue;
9387     }
9388     if ((Mask[i] % Size) / LaneSize != i / LaneSize)
9389       // This entry crosses lanes, so there is no way to model this shuffle.
9390       return false;
9391 
9392     // Handle the in-lane shuffles by detecting if and when they repeat. Adjust
9393     // later vector indices to start at multiples of LaneSize instead of Size.
9394     int LaneM = Mask[i] / Size;
9395     int LocalM = (Mask[i] % LaneSize) + (LaneM * LaneSize);
9396     if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
9397       // This is the first non-undef entry in this slot of a 128-bit lane.
9398       RepeatedMask[i % LaneSize] = LocalM;
9399     else if (RepeatedMask[i % LaneSize] != LocalM)
9400       // Found a mismatch with the repeated mask.
9401       return false;
9402   }
9403   return true;
9404 }
9405 
9406 /// Test whether a target shuffle mask is equivalent within each sub-lane.
9407 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
9408 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
9409                                         ArrayRef<int> Mask,
9410                                         SmallVectorImpl<int> &RepeatedMask) {
9411   return isRepeatedTargetShuffleMask(LaneSizeInBits, VT.getScalarSizeInBits(),
9412                                      Mask, RepeatedMask);
9413 }
9414 
9415 /// Checks whether the vector elements referenced by two shuffle masks are
9416 /// equivalent.
9417 static bool IsElementEquivalent(int MaskSize, SDValue Op, SDValue ExpectedOp,
9418                                 int Idx, int ExpectedIdx) {
9419   assert(0 <= Idx && Idx < MaskSize && 0 <= ExpectedIdx &&
9420          ExpectedIdx < MaskSize && "Out of range element index");
9421   if (!Op || !ExpectedOp || Op.getOpcode() != ExpectedOp.getOpcode())
9422     return false;
9423 
9424   switch (Op.getOpcode()) {
9425   case ISD::BUILD_VECTOR:
9426     // If the values are build vectors, we can look through them to find
9427     // equivalent inputs that make the shuffles equivalent.
9428     // TODO: Handle MaskSize != Op.getNumOperands()?
9429     if (MaskSize == (int)Op.getNumOperands() &&
9430         MaskSize == (int)ExpectedOp.getNumOperands())
9431       return Op.getOperand(Idx) == ExpectedOp.getOperand(ExpectedIdx);
9432     break;
9433   case X86ISD::VBROADCAST:
9434   case X86ISD::VBROADCAST_LOAD:
9435     // TODO: Handle MaskSize != Op.getValueType().getVectorNumElements()?
9436     return (Op == ExpectedOp &&
9437             (int)Op.getValueType().getVectorNumElements() == MaskSize);
9438   case X86ISD::HADD:
9439   case X86ISD::HSUB:
9440   case X86ISD::FHADD:
9441   case X86ISD::FHSUB:
9442   case X86ISD::PACKSS:
9443   case X86ISD::PACKUS:
9444     // HOP(X,X) can refer to the elt from the lower/upper half of a lane.
9445     // TODO: Handle MaskSize != NumElts?
9446     // TODO: Handle HOP(X,Y) vs HOP(Y,X) equivalence cases.
9447     if (Op == ExpectedOp && Op.getOperand(0) == Op.getOperand(1)) {
9448       MVT VT = Op.getSimpleValueType();
9449       int NumElts = VT.getVectorNumElements();
9450       if (MaskSize == NumElts) {
9451         int NumLanes = VT.getSizeInBits() / 128;
9452         int NumEltsPerLane = NumElts / NumLanes;
9453         int NumHalfEltsPerLane = NumEltsPerLane / 2;
9454         bool SameLane =
9455             (Idx / NumEltsPerLane) == (ExpectedIdx / NumEltsPerLane);
9456         bool SameElt =
9457             (Idx % NumHalfEltsPerLane) == (ExpectedIdx % NumHalfEltsPerLane);
9458         return SameLane && SameElt;
9459       }
9460     }
9461     break;
9462   }
9463 
9464   return false;
9465 }
9466 
9467 /// Checks whether a shuffle mask is equivalent to an explicit list of
9468 /// arguments.
9469 ///
9470 /// This is a fast way to test a shuffle mask against a fixed pattern:
9471 ///
9472 ///   if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
9473 ///
9474 /// It returns true if the mask is exactly as wide as the argument list, and
9475 /// each element of the mask is either -1 (signifying undef) or the value given
9476 /// in the argument.
9477 static bool isShuffleEquivalent(ArrayRef<int> Mask, ArrayRef<int> ExpectedMask,
9478                                 SDValue V1 = SDValue(),
9479                                 SDValue V2 = SDValue()) {
9480   int Size = Mask.size();
9481   if (Size != (int)ExpectedMask.size())
9482     return false;
9483 
9484   for (int i = 0; i < Size; ++i) {
9485     assert(Mask[i] >= -1 && "Out of bound mask element!");
9486     int MaskIdx = Mask[i];
9487     int ExpectedIdx = ExpectedMask[i];
9488     if (0 <= MaskIdx && MaskIdx != ExpectedIdx) {
9489       SDValue MaskV = MaskIdx < Size ? V1 : V2;
9490       SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
9491       MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
9492       ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
9493       if (!IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
9494         return false;
9495     }
9496   }
9497   return true;
9498 }
9499 
9500 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
9501 ///
9502 /// The masks must be exactly the same width.
9503 ///
9504 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
9505 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
9506 ///
9507 /// SM_SentinelZero is accepted as a valid negative index but must match in
9508 /// both, or via a known bits test.
9509 static bool isTargetShuffleEquivalent(MVT VT, ArrayRef<int> Mask,
9510                                       ArrayRef<int> ExpectedMask,
9511                                       const SelectionDAG &DAG,
9512                                       SDValue V1 = SDValue(),
9513                                       SDValue V2 = SDValue()) {
9514   int Size = Mask.size();
9515   if (Size != (int)ExpectedMask.size())
9516     return false;
9517   assert(llvm::all_of(ExpectedMask,
9518                       [Size](int M) { return isInRange(M, 0, 2 * Size); }) &&
9519          "Illegal target shuffle mask");
9520 
9521   // Check for out-of-range target shuffle mask indices.
9522   if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
9523     return false;
9524 
9525   // Don't use V1/V2 if they're not the same size as the shuffle mask type.
9526   if (V1 && (V1.getValueSizeInBits() != VT.getSizeInBits() ||
9527              !V1.getValueType().isVector()))
9528     V1 = SDValue();
9529   if (V2 && (V2.getValueSizeInBits() != VT.getSizeInBits() ||
9530              !V2.getValueType().isVector()))
9531     V2 = SDValue();
9532 
9533   APInt ZeroV1 = APInt::getZero(Size);
9534   APInt ZeroV2 = APInt::getZero(Size);
9535 
9536   for (int i = 0; i < Size; ++i) {
9537     int MaskIdx = Mask[i];
9538     int ExpectedIdx = ExpectedMask[i];
9539     if (MaskIdx == SM_SentinelUndef || MaskIdx == ExpectedIdx)
9540       continue;
9541     if (MaskIdx == SM_SentinelZero) {
9542       // If we need this expected index to be a zero element, then update the
9543       // relevant zero mask and perform the known bits at the end to minimize
9544       // repeated computes.
9545       SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
9546       if (ExpectedV &&
9547           Size == (int)ExpectedV.getValueType().getVectorNumElements()) {
9548         int BitIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
9549         APInt &ZeroMask = ExpectedIdx < Size ? ZeroV1 : ZeroV2;
9550         ZeroMask.setBit(BitIdx);
9551         continue;
9552       }
9553     }
9554     if (MaskIdx >= 0) {
9555       SDValue MaskV = MaskIdx < Size ? V1 : V2;
9556       SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
9557       MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
9558       ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
9559       if (IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
9560         continue;
9561     }
9562     return false;
9563   }
9564   return (ZeroV1.isZero() || DAG.MaskedVectorIsZero(V1, ZeroV1)) &&
9565          (ZeroV2.isZero() || DAG.MaskedVectorIsZero(V2, ZeroV2));
9566 }
9567 
9568 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
9569 // instructions.
9570 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT,
9571                                   const SelectionDAG &DAG) {
9572   if (VT != MVT::v8i32 && VT != MVT::v8f32)
9573     return false;
9574 
9575   SmallVector<int, 8> Unpcklwd;
9576   createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
9577                           /* Unary = */ false);
9578   SmallVector<int, 8> Unpckhwd;
9579   createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
9580                           /* Unary = */ false);
9581   bool IsUnpackwdMask = (isTargetShuffleEquivalent(VT, Mask, Unpcklwd, DAG) ||
9582                          isTargetShuffleEquivalent(VT, Mask, Unpckhwd, DAG));
9583   return IsUnpackwdMask;
9584 }
9585 
9586 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask,
9587                                       const SelectionDAG &DAG) {
9588   // Create 128-bit vector type based on mask size.
9589   MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
9590   MVT VT = MVT::getVectorVT(EltVT, Mask.size());
9591 
9592   // We can't assume a canonical shuffle mask, so try the commuted version too.
9593   SmallVector<int, 4> CommutedMask(Mask);
9594   ShuffleVectorSDNode::commuteMask(CommutedMask);
9595 
9596   // Match any of unary/binary or low/high.
9597   for (unsigned i = 0; i != 4; ++i) {
9598     SmallVector<int, 16> UnpackMask;
9599     createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
9600     if (isTargetShuffleEquivalent(VT, Mask, UnpackMask, DAG) ||
9601         isTargetShuffleEquivalent(VT, CommutedMask, UnpackMask, DAG))
9602       return true;
9603   }
9604   return false;
9605 }
9606 
9607 /// Return true if a shuffle mask chooses elements identically in its top and
9608 /// bottom halves. For example, any splat mask has the same top and bottom
9609 /// halves. If an element is undefined in only one half of the mask, the halves
9610 /// are not considered identical.
9611 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
9612   assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
9613   unsigned HalfSize = Mask.size() / 2;
9614   for (unsigned i = 0; i != HalfSize; ++i) {
9615     if (Mask[i] != Mask[i + HalfSize])
9616       return false;
9617   }
9618   return true;
9619 }
9620 
9621 /// Get a 4-lane 8-bit shuffle immediate for a mask.
9622 ///
9623 /// This helper function produces an 8-bit shuffle immediate corresponding to
9624 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
9625 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
9626 /// example.
9627 ///
9628 /// NB: We rely heavily on "undef" masks preserving the input lane.
9629 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
9630   assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
9631   assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
9632   assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
9633   assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
9634   assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
9635 
9636   // If the mask only uses one non-undef element, then fully 'splat' it to
9637   // improve later broadcast matching.
9638   int FirstIndex = find_if(Mask, [](int M) { return M >= 0; }) - Mask.begin();
9639   assert(0 <= FirstIndex && FirstIndex < 4 && "All undef shuffle mask");
9640 
9641   int FirstElt = Mask[FirstIndex];
9642   if (all_of(Mask, [FirstElt](int M) { return M < 0 || M == FirstElt; }))
9643     return (FirstElt << 6) | (FirstElt << 4) | (FirstElt << 2) | FirstElt;
9644 
9645   unsigned Imm = 0;
9646   Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
9647   Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
9648   Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
9649   Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
9650   return Imm;
9651 }
9652 
9653 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
9654                                           SelectionDAG &DAG) {
9655   return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
9656 }
9657 
9658 // The Shuffle result is as follow:
9659 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
9660 // Each Zeroable's element correspond to a particular Mask's element.
9661 // As described in computeZeroableShuffleElements function.
9662 //
9663 // The function looks for a sub-mask that the nonzero elements are in
9664 // increasing order. If such sub-mask exist. The function returns true.
9665 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
9666                                      ArrayRef<int> Mask, const EVT &VectorType,
9667                                      bool &IsZeroSideLeft) {
9668   int NextElement = -1;
9669   // Check if the Mask's nonzero elements are in increasing order.
9670   for (int i = 0, e = Mask.size(); i < e; i++) {
9671     // Checks if the mask's zeros elements are built from only zeros.
9672     assert(Mask[i] >= -1 && "Out of bound mask element!");
9673     if (Mask[i] < 0)
9674       return false;
9675     if (Zeroable[i])
9676       continue;
9677     // Find the lowest non zero element
9678     if (NextElement < 0) {
9679       NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
9680       IsZeroSideLeft = NextElement != 0;
9681     }
9682     // Exit if the mask's non zero elements are not in increasing order.
9683     if (NextElement != Mask[i])
9684       return false;
9685     NextElement++;
9686   }
9687   return true;
9688 }
9689 
9690 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
9691 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
9692                                       ArrayRef<int> Mask, SDValue V1,
9693                                       SDValue V2, const APInt &Zeroable,
9694                                       const X86Subtarget &Subtarget,
9695                                       SelectionDAG &DAG) {
9696   int Size = Mask.size();
9697   int LaneSize = 128 / VT.getScalarSizeInBits();
9698   const int NumBytes = VT.getSizeInBits() / 8;
9699   const int NumEltBytes = VT.getScalarSizeInBits() / 8;
9700 
9701   assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
9702          (Subtarget.hasAVX2() && VT.is256BitVector()) ||
9703          (Subtarget.hasBWI() && VT.is512BitVector()));
9704 
9705   SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
9706   // Sign bit set in i8 mask means zero element.
9707   SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
9708 
9709   SDValue V;
9710   for (int i = 0; i < NumBytes; ++i) {
9711     int M = Mask[i / NumEltBytes];
9712     if (M < 0) {
9713       PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
9714       continue;
9715     }
9716     if (Zeroable[i / NumEltBytes]) {
9717       PSHUFBMask[i] = ZeroMask;
9718       continue;
9719     }
9720 
9721     // We can only use a single input of V1 or V2.
9722     SDValue SrcV = (M >= Size ? V2 : V1);
9723     if (V && V != SrcV)
9724       return SDValue();
9725     V = SrcV;
9726     M %= Size;
9727 
9728     // PSHUFB can't cross lanes, ensure this doesn't happen.
9729     if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
9730       return SDValue();
9731 
9732     M = M % LaneSize;
9733     M = M * NumEltBytes + (i % NumEltBytes);
9734     PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
9735   }
9736   assert(V && "Failed to find a source input");
9737 
9738   MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
9739   return DAG.getBitcast(
9740       VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
9741                       DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
9742 }
9743 
9744 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
9745                            const X86Subtarget &Subtarget, SelectionDAG &DAG,
9746                            const SDLoc &dl);
9747 
9748 // X86 has dedicated shuffle that can be lowered to VEXPAND
9749 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
9750                                     const APInt &Zeroable,
9751                                     ArrayRef<int> Mask, SDValue &V1,
9752                                     SDValue &V2, SelectionDAG &DAG,
9753                                     const X86Subtarget &Subtarget) {
9754   bool IsLeftZeroSide = true;
9755   if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
9756                                 IsLeftZeroSide))
9757     return SDValue();
9758   unsigned VEXPANDMask = (~Zeroable).getZExtValue();
9759   MVT IntegerType =
9760       MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
9761   SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
9762   unsigned NumElts = VT.getVectorNumElements();
9763   assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
9764          "Unexpected number of vector elements");
9765   SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
9766                               Subtarget, DAG, DL);
9767   SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
9768   SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
9769   return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
9770 }
9771 
9772 static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
9773                                   unsigned &UnpackOpcode, bool IsUnary,
9774                                   ArrayRef<int> TargetMask, const SDLoc &DL,
9775                                   SelectionDAG &DAG,
9776                                   const X86Subtarget &Subtarget) {
9777   int NumElts = VT.getVectorNumElements();
9778 
9779   bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
9780   for (int i = 0; i != NumElts; i += 2) {
9781     int M1 = TargetMask[i + 0];
9782     int M2 = TargetMask[i + 1];
9783     Undef1 &= (SM_SentinelUndef == M1);
9784     Undef2 &= (SM_SentinelUndef == M2);
9785     Zero1 &= isUndefOrZero(M1);
9786     Zero2 &= isUndefOrZero(M2);
9787   }
9788   assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
9789          "Zeroable shuffle detected");
9790 
9791   // Attempt to match the target mask against the unpack lo/hi mask patterns.
9792   SmallVector<int, 64> Unpckl, Unpckh;
9793   createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
9794   if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG, V1,
9795                                 (IsUnary ? V1 : V2))) {
9796     UnpackOpcode = X86ISD::UNPCKL;
9797     V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
9798     V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
9799     return true;
9800   }
9801 
9802   createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
9803   if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG, V1,
9804                                 (IsUnary ? V1 : V2))) {
9805     UnpackOpcode = X86ISD::UNPCKH;
9806     V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
9807     V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
9808     return true;
9809   }
9810 
9811   // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
9812   if (IsUnary && (Zero1 || Zero2)) {
9813     // Don't bother if we can blend instead.
9814     if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
9815         isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
9816       return false;
9817 
9818     bool MatchLo = true, MatchHi = true;
9819     for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
9820       int M = TargetMask[i];
9821 
9822       // Ignore if the input is known to be zero or the index is undef.
9823       if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
9824           (M == SM_SentinelUndef))
9825         continue;
9826 
9827       MatchLo &= (M == Unpckl[i]);
9828       MatchHi &= (M == Unpckh[i]);
9829     }
9830 
9831     if (MatchLo || MatchHi) {
9832       UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
9833       V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
9834       V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
9835       return true;
9836     }
9837   }
9838 
9839   // If a binary shuffle, commute and try again.
9840   if (!IsUnary) {
9841     ShuffleVectorSDNode::commuteMask(Unpckl);
9842     if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG)) {
9843       UnpackOpcode = X86ISD::UNPCKL;
9844       std::swap(V1, V2);
9845       return true;
9846     }
9847 
9848     ShuffleVectorSDNode::commuteMask(Unpckh);
9849     if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG)) {
9850       UnpackOpcode = X86ISD::UNPCKH;
9851       std::swap(V1, V2);
9852       return true;
9853     }
9854   }
9855 
9856   return false;
9857 }
9858 
9859 // X86 has dedicated unpack instructions that can handle specific blend
9860 // operations: UNPCKH and UNPCKL.
9861 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
9862                                      ArrayRef<int> Mask, SDValue V1, SDValue V2,
9863                                      SelectionDAG &DAG) {
9864   SmallVector<int, 8> Unpckl;
9865   createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
9866   if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
9867     return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
9868 
9869   SmallVector<int, 8> Unpckh;
9870   createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
9871   if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
9872     return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
9873 
9874   // Commute and try again.
9875   ShuffleVectorSDNode::commuteMask(Unpckl);
9876   if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
9877     return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
9878 
9879   ShuffleVectorSDNode::commuteMask(Unpckh);
9880   if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
9881     return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
9882 
9883   return SDValue();
9884 }
9885 
9886 /// Check if the mask can be mapped to a preliminary shuffle (vperm 64-bit)
9887 /// followed by unpack 256-bit.
9888 static SDValue lowerShuffleWithUNPCK256(const SDLoc &DL, MVT VT,
9889                                         ArrayRef<int> Mask, SDValue V1,
9890                                         SDValue V2, SelectionDAG &DAG) {
9891   SmallVector<int, 32> Unpckl, Unpckh;
9892   createSplat2ShuffleMask(VT, Unpckl, /* Lo */ true);
9893   createSplat2ShuffleMask(VT, Unpckh, /* Lo */ false);
9894 
9895   unsigned UnpackOpcode;
9896   if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
9897     UnpackOpcode = X86ISD::UNPCKL;
9898   else if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
9899     UnpackOpcode = X86ISD::UNPCKH;
9900   else
9901     return SDValue();
9902 
9903   // This is a "natural" unpack operation (rather than the 128-bit sectored
9904   // operation implemented by AVX). We need to rearrange 64-bit chunks of the
9905   // input in order to use the x86 instruction.
9906   V1 = DAG.getVectorShuffle(MVT::v4f64, DL, DAG.getBitcast(MVT::v4f64, V1),
9907                             DAG.getUNDEF(MVT::v4f64), {0, 2, 1, 3});
9908   V1 = DAG.getBitcast(VT, V1);
9909   return DAG.getNode(UnpackOpcode, DL, VT, V1, V1);
9910 }
9911 
9912 // Check if the mask can be mapped to a TRUNCATE or VTRUNC, truncating the
9913 // source into the lower elements and zeroing the upper elements.
9914 static bool matchShuffleAsVTRUNC(MVT &SrcVT, MVT &DstVT, MVT VT,
9915                                  ArrayRef<int> Mask, const APInt &Zeroable,
9916                                  const X86Subtarget &Subtarget) {
9917   if (!VT.is512BitVector() && !Subtarget.hasVLX())
9918     return false;
9919 
9920   unsigned NumElts = Mask.size();
9921   unsigned EltSizeInBits = VT.getScalarSizeInBits();
9922   unsigned MaxScale = 64 / EltSizeInBits;
9923 
9924   for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
9925     unsigned SrcEltBits = EltSizeInBits * Scale;
9926     if (SrcEltBits < 32 && !Subtarget.hasBWI())
9927       continue;
9928     unsigned NumSrcElts = NumElts / Scale;
9929     if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale))
9930       continue;
9931     unsigned UpperElts = NumElts - NumSrcElts;
9932     if (!Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
9933       continue;
9934     SrcVT = MVT::getIntegerVT(EltSizeInBits * Scale);
9935     SrcVT = MVT::getVectorVT(SrcVT, NumSrcElts);
9936     DstVT = MVT::getIntegerVT(EltSizeInBits);
9937     if ((NumSrcElts * EltSizeInBits) >= 128) {
9938       // ISD::TRUNCATE
9939       DstVT = MVT::getVectorVT(DstVT, NumSrcElts);
9940     } else {
9941       // X86ISD::VTRUNC
9942       DstVT = MVT::getVectorVT(DstVT, 128 / EltSizeInBits);
9943     }
9944     return true;
9945   }
9946 
9947   return false;
9948 }
9949 
9950 // Helper to create TRUNCATE/VTRUNC nodes, optionally with zero/undef upper
9951 // element padding to the final DstVT.
9952 static SDValue getAVX512TruncNode(const SDLoc &DL, MVT DstVT, SDValue Src,
9953                                   const X86Subtarget &Subtarget,
9954                                   SelectionDAG &DAG, bool ZeroUppers) {
9955   MVT SrcVT = Src.getSimpleValueType();
9956   MVT DstSVT = DstVT.getScalarType();
9957   unsigned NumDstElts = DstVT.getVectorNumElements();
9958   unsigned NumSrcElts = SrcVT.getVectorNumElements();
9959   unsigned DstEltSizeInBits = DstVT.getScalarSizeInBits();
9960 
9961   if (!DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))
9962     return SDValue();
9963 
9964   // Perform a direct ISD::TRUNCATE if possible.
9965   if (NumSrcElts == NumDstElts)
9966     return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Src);
9967 
9968   if (NumSrcElts > NumDstElts) {
9969     MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
9970     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
9971     return extractSubVector(Trunc, 0, DAG, DL, DstVT.getSizeInBits());
9972   }
9973 
9974   if ((NumSrcElts * DstEltSizeInBits) >= 128) {
9975     MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
9976     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
9977     return widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
9978                           DstVT.getSizeInBits());
9979   }
9980 
9981   // Non-VLX targets must truncate from a 512-bit type, so we need to
9982   // widen, truncate and then possibly extract the original subvector.
9983   if (!Subtarget.hasVLX() && !SrcVT.is512BitVector()) {
9984     SDValue NewSrc = widenSubVector(Src, ZeroUppers, Subtarget, DAG, DL, 512);
9985     return getAVX512TruncNode(DL, DstVT, NewSrc, Subtarget, DAG, ZeroUppers);
9986   }
9987 
9988   // Fallback to a X86ISD::VTRUNC, padding if necessary.
9989   MVT TruncVT = MVT::getVectorVT(DstSVT, 128 / DstEltSizeInBits);
9990   SDValue Trunc = DAG.getNode(X86ISD::VTRUNC, DL, TruncVT, Src);
9991   if (DstVT != TruncVT)
9992     Trunc = widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
9993                            DstVT.getSizeInBits());
9994   return Trunc;
9995 }
9996 
9997 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
9998 //
9999 // An example is the following:
10000 //
10001 // t0: ch = EntryToken
10002 //           t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
10003 //         t25: v4i32 = truncate t2
10004 //       t41: v8i16 = bitcast t25
10005 //       t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
10006 //       Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
10007 //     t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
10008 //   t18: v2i64 = bitcast t51
10009 //
10010 // One can just use a single vpmovdw instruction, without avx512vl we need to
10011 // use the zmm variant and extract the lower subvector, padding with zeroes.
10012 // TODO: Merge with lowerShuffleAsVTRUNC.
10013 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, MVT VT, SDValue V1,
10014                                      SDValue V2, ArrayRef<int> Mask,
10015                                      const APInt &Zeroable,
10016                                      const X86Subtarget &Subtarget,
10017                                      SelectionDAG &DAG) {
10018   assert((VT == MVT::v16i8 || VT == MVT::v8i16) && "Unexpected VTRUNC type");
10019   if (!Subtarget.hasAVX512())
10020     return SDValue();
10021 
10022   unsigned NumElts = VT.getVectorNumElements();
10023   unsigned EltSizeInBits = VT.getScalarSizeInBits();
10024   unsigned MaxScale = 64 / EltSizeInBits;
10025   for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
10026     unsigned SrcEltBits = EltSizeInBits * Scale;
10027     unsigned NumSrcElts = NumElts / Scale;
10028     unsigned UpperElts = NumElts - NumSrcElts;
10029     if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale) ||
10030         !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
10031       continue;
10032 
10033     // Attempt to find a matching source truncation, but as a fall back VLX
10034     // cases can use the VPMOV directly.
10035     SDValue Src = peekThroughBitcasts(V1);
10036     if (Src.getOpcode() == ISD::TRUNCATE &&
10037         Src.getScalarValueSizeInBits() == SrcEltBits) {
10038       Src = Src.getOperand(0);
10039     } else if (Subtarget.hasVLX()) {
10040       MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
10041       MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
10042       Src = DAG.getBitcast(SrcVT, Src);
10043       // Don't do this if PACKSS/PACKUS could perform it cheaper.
10044       if (Scale == 2 &&
10045           ((DAG.ComputeNumSignBits(Src) > EltSizeInBits) ||
10046            (DAG.computeKnownBits(Src).countMinLeadingZeros() >= EltSizeInBits)))
10047         return SDValue();
10048     } else
10049       return SDValue();
10050 
10051     // VPMOVWB is only available with avx512bw.
10052     if (!Subtarget.hasBWI() && Src.getScalarValueSizeInBits() < 32)
10053       return SDValue();
10054 
10055     bool UndefUppers = isUndefInRange(Mask, NumSrcElts, UpperElts);
10056     return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
10057   }
10058 
10059   return SDValue();
10060 }
10061 
10062 // Attempt to match binary shuffle patterns as a truncate.
10063 static SDValue lowerShuffleAsVTRUNC(const SDLoc &DL, MVT VT, SDValue V1,
10064                                     SDValue V2, ArrayRef<int> Mask,
10065                                     const APInt &Zeroable,
10066                                     const X86Subtarget &Subtarget,
10067                                     SelectionDAG &DAG) {
10068   assert((VT.is128BitVector() || VT.is256BitVector()) &&
10069          "Unexpected VTRUNC type");
10070   if (!Subtarget.hasAVX512())
10071     return SDValue();
10072 
10073   unsigned NumElts = VT.getVectorNumElements();
10074   unsigned EltSizeInBits = VT.getScalarSizeInBits();
10075   unsigned MaxScale = 64 / EltSizeInBits;
10076   for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
10077     // TODO: Support non-BWI VPMOVWB truncations?
10078     unsigned SrcEltBits = EltSizeInBits * Scale;
10079     if (SrcEltBits < 32 && !Subtarget.hasBWI())
10080       continue;
10081 
10082     // Match shuffle <Ofs,Ofs+Scale,Ofs+2*Scale,..,undef_or_zero,undef_or_zero>
10083     // Bail if the V2 elements are undef.
10084     unsigned NumHalfSrcElts = NumElts / Scale;
10085     unsigned NumSrcElts = 2 * NumHalfSrcElts;
10086     for (unsigned Offset = 0; Offset != Scale; ++Offset) {
10087       if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, Offset, Scale) ||
10088           isUndefInRange(Mask, NumHalfSrcElts, NumHalfSrcElts))
10089         continue;
10090 
10091       // The elements beyond the truncation must be undef/zero.
10092       unsigned UpperElts = NumElts - NumSrcElts;
10093       if (UpperElts > 0 &&
10094           !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
10095         continue;
10096       bool UndefUppers =
10097           UpperElts > 0 && isUndefInRange(Mask, NumSrcElts, UpperElts);
10098 
10099       // For offset truncations, ensure that the concat is cheap.
10100       if (Offset) {
10101         auto IsCheapConcat = [&](SDValue Lo, SDValue Hi) {
10102           if (Lo.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
10103               Hi.getOpcode() == ISD::EXTRACT_SUBVECTOR)
10104             return Lo.getOperand(0) == Hi.getOperand(0);
10105           if (ISD::isNormalLoad(Lo.getNode()) &&
10106               ISD::isNormalLoad(Hi.getNode())) {
10107             auto *LDLo = cast<LoadSDNode>(Lo);
10108             auto *LDHi = cast<LoadSDNode>(Hi);
10109             return DAG.areNonVolatileConsecutiveLoads(
10110                 LDHi, LDLo, Lo.getValueType().getStoreSize(), 1);
10111           }
10112           return false;
10113         };
10114         if (!IsCheapConcat(V1, V2))
10115           continue;
10116       }
10117 
10118       // As we're using both sources then we need to concat them together
10119       // and truncate from the double-sized src.
10120       MVT ConcatVT = MVT::getVectorVT(VT.getScalarType(), NumElts * 2);
10121       SDValue Src = DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatVT, V1, V2);
10122 
10123       MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
10124       MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
10125       Src = DAG.getBitcast(SrcVT, Src);
10126 
10127       // Shift the offset'd elements into place for the truncation.
10128       // TODO: Use getTargetVShiftByConstNode.
10129       if (Offset)
10130         Src = DAG.getNode(
10131             X86ISD::VSRLI, DL, SrcVT, Src,
10132             DAG.getTargetConstant(Offset * EltSizeInBits, DL, MVT::i8));
10133 
10134       return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
10135     }
10136   }
10137 
10138   return SDValue();
10139 }
10140 
10141 /// Check whether a compaction lowering can be done by dropping even/odd
10142 /// elements and compute how many times even/odd elements must be dropped.
10143 ///
10144 /// This handles shuffles which take every Nth element where N is a power of
10145 /// two. Example shuffle masks:
10146 ///
10147 /// (even)
10148 ///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14,  0,  2,  4,  6,  8, 10, 12, 14
10149 ///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
10150 ///  N = 2:  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12
10151 ///  N = 2:  0,  4,  8, 12, 16, 20, 24, 28,  0,  4,  8, 12, 16, 20, 24, 28
10152 ///  N = 3:  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8
10153 ///  N = 3:  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24
10154 ///
10155 /// (odd)
10156 ///  N = 1:  1,  3,  5,  7,  9, 11, 13, 15,  0,  2,  4,  6,  8, 10, 12, 14
10157 ///  N = 1:  1,  3,  5,  7,  9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
10158 ///
10159 /// Any of these lanes can of course be undef.
10160 ///
10161 /// This routine only supports N <= 3.
10162 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
10163 /// for larger N.
10164 ///
10165 /// \returns N above, or the number of times even/odd elements must be dropped
10166 /// if there is such a number. Otherwise returns zero.
10167 static int canLowerByDroppingElements(ArrayRef<int> Mask, bool MatchEven,
10168                                       bool IsSingleInput) {
10169   // The modulus for the shuffle vector entries is based on whether this is
10170   // a single input or not.
10171   int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
10172   assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
10173          "We should only be called with masks with a power-of-2 size!");
10174 
10175   uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
10176   int Offset = MatchEven ? 0 : 1;
10177 
10178   // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
10179   // and 2^3 simultaneously. This is because we may have ambiguity with
10180   // partially undef inputs.
10181   bool ViableForN[3] = {true, true, true};
10182 
10183   for (int i = 0, e = Mask.size(); i < e; ++i) {
10184     // Ignore undef lanes, we'll optimistically collapse them to the pattern we
10185     // want.
10186     if (Mask[i] < 0)
10187       continue;
10188 
10189     bool IsAnyViable = false;
10190     for (unsigned j = 0; j != std::size(ViableForN); ++j)
10191       if (ViableForN[j]) {
10192         uint64_t N = j + 1;
10193 
10194         // The shuffle mask must be equal to (i * 2^N) % M.
10195         if ((uint64_t)(Mask[i] - Offset) == (((uint64_t)i << N) & ModMask))
10196           IsAnyViable = true;
10197         else
10198           ViableForN[j] = false;
10199       }
10200     // Early exit if we exhaust the possible powers of two.
10201     if (!IsAnyViable)
10202       break;
10203   }
10204 
10205   for (unsigned j = 0; j != std::size(ViableForN); ++j)
10206     if (ViableForN[j])
10207       return j + 1;
10208 
10209   // Return 0 as there is no viable power of two.
10210   return 0;
10211 }
10212 
10213 // X86 has dedicated pack instructions that can handle specific truncation
10214 // operations: PACKSS and PACKUS.
10215 // Checks for compaction shuffle masks if MaxStages > 1.
10216 // TODO: Add support for matching multiple PACKSS/PACKUS stages.
10217 static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
10218                                  unsigned &PackOpcode, ArrayRef<int> TargetMask,
10219                                  const SelectionDAG &DAG,
10220                                  const X86Subtarget &Subtarget,
10221                                  unsigned MaxStages = 1) {
10222   unsigned NumElts = VT.getVectorNumElements();
10223   unsigned BitSize = VT.getScalarSizeInBits();
10224   assert(0 < MaxStages && MaxStages <= 3 && (BitSize << MaxStages) <= 64 &&
10225          "Illegal maximum compaction");
10226 
10227   auto MatchPACK = [&](SDValue N1, SDValue N2, MVT PackVT) {
10228     unsigned NumSrcBits = PackVT.getScalarSizeInBits();
10229     unsigned NumPackedBits = NumSrcBits - BitSize;
10230     N1 = peekThroughBitcasts(N1);
10231     N2 = peekThroughBitcasts(N2);
10232     unsigned NumBits1 = N1.getScalarValueSizeInBits();
10233     unsigned NumBits2 = N2.getScalarValueSizeInBits();
10234     bool IsZero1 = llvm::isNullOrNullSplat(N1, /*AllowUndefs*/ false);
10235     bool IsZero2 = llvm::isNullOrNullSplat(N2, /*AllowUndefs*/ false);
10236     if ((!N1.isUndef() && !IsZero1 && NumBits1 != NumSrcBits) ||
10237         (!N2.isUndef() && !IsZero2 && NumBits2 != NumSrcBits))
10238       return false;
10239     if (Subtarget.hasSSE41() || BitSize == 8) {
10240       APInt ZeroMask = APInt::getHighBitsSet(NumSrcBits, NumPackedBits);
10241       if ((N1.isUndef() || IsZero1 || DAG.MaskedValueIsZero(N1, ZeroMask)) &&
10242           (N2.isUndef() || IsZero2 || DAG.MaskedValueIsZero(N2, ZeroMask))) {
10243         V1 = N1;
10244         V2 = N2;
10245         SrcVT = PackVT;
10246         PackOpcode = X86ISD::PACKUS;
10247         return true;
10248       }
10249     }
10250     bool IsAllOnes1 = llvm::isAllOnesOrAllOnesSplat(N1, /*AllowUndefs*/ false);
10251     bool IsAllOnes2 = llvm::isAllOnesOrAllOnesSplat(N2, /*AllowUndefs*/ false);
10252     if ((N1.isUndef() || IsZero1 || IsAllOnes1 ||
10253          DAG.ComputeNumSignBits(N1) > NumPackedBits) &&
10254         (N2.isUndef() || IsZero2 || IsAllOnes2 ||
10255          DAG.ComputeNumSignBits(N2) > NumPackedBits)) {
10256       V1 = N1;
10257       V2 = N2;
10258       SrcVT = PackVT;
10259       PackOpcode = X86ISD::PACKSS;
10260       return true;
10261     }
10262     return false;
10263   };
10264 
10265   // Attempt to match against wider and wider compaction patterns.
10266   for (unsigned NumStages = 1; NumStages <= MaxStages; ++NumStages) {
10267     MVT PackSVT = MVT::getIntegerVT(BitSize << NumStages);
10268     MVT PackVT = MVT::getVectorVT(PackSVT, NumElts >> NumStages);
10269 
10270     // Try binary shuffle.
10271     SmallVector<int, 32> BinaryMask;
10272     createPackShuffleMask(VT, BinaryMask, false, NumStages);
10273     if (isTargetShuffleEquivalent(VT, TargetMask, BinaryMask, DAG, V1, V2))
10274       if (MatchPACK(V1, V2, PackVT))
10275         return true;
10276 
10277     // Try unary shuffle.
10278     SmallVector<int, 32> UnaryMask;
10279     createPackShuffleMask(VT, UnaryMask, true, NumStages);
10280     if (isTargetShuffleEquivalent(VT, TargetMask, UnaryMask, DAG, V1))
10281       if (MatchPACK(V1, V1, PackVT))
10282         return true;
10283   }
10284 
10285   return false;
10286 }
10287 
10288 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
10289                                     SDValue V1, SDValue V2, SelectionDAG &DAG,
10290                                     const X86Subtarget &Subtarget) {
10291   MVT PackVT;
10292   unsigned PackOpcode;
10293   unsigned SizeBits = VT.getSizeInBits();
10294   unsigned EltBits = VT.getScalarSizeInBits();
10295   unsigned MaxStages = Log2_32(64 / EltBits);
10296   if (!matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
10297                             Subtarget, MaxStages))
10298     return SDValue();
10299 
10300   unsigned CurrentEltBits = PackVT.getScalarSizeInBits();
10301   unsigned NumStages = Log2_32(CurrentEltBits / EltBits);
10302 
10303   // Don't lower multi-stage packs on AVX512, truncation is better.
10304   if (NumStages != 1 && SizeBits == 128 && Subtarget.hasVLX())
10305     return SDValue();
10306 
10307   // Pack to the largest type possible:
10308   // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
10309   unsigned MaxPackBits = 16;
10310   if (CurrentEltBits > 16 &&
10311       (PackOpcode == X86ISD::PACKSS || Subtarget.hasSSE41()))
10312     MaxPackBits = 32;
10313 
10314   // Repeatedly pack down to the target size.
10315   SDValue Res;
10316   for (unsigned i = 0; i != NumStages; ++i) {
10317     unsigned SrcEltBits = std::min(MaxPackBits, CurrentEltBits);
10318     unsigned NumSrcElts = SizeBits / SrcEltBits;
10319     MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
10320     MVT DstSVT = MVT::getIntegerVT(SrcEltBits / 2);
10321     MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
10322     MVT DstVT = MVT::getVectorVT(DstSVT, NumSrcElts * 2);
10323     Res = DAG.getNode(PackOpcode, DL, DstVT, DAG.getBitcast(SrcVT, V1),
10324                       DAG.getBitcast(SrcVT, V2));
10325     V1 = V2 = Res;
10326     CurrentEltBits /= 2;
10327   }
10328   assert(Res && Res.getValueType() == VT &&
10329          "Failed to lower compaction shuffle");
10330   return Res;
10331 }
10332 
10333 /// Try to emit a bitmask instruction for a shuffle.
10334 ///
10335 /// This handles cases where we can model a blend exactly as a bitmask due to
10336 /// one of the inputs being zeroable.
10337 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
10338                                      SDValue V2, ArrayRef<int> Mask,
10339                                      const APInt &Zeroable,
10340                                      const X86Subtarget &Subtarget,
10341                                      SelectionDAG &DAG) {
10342   MVT MaskVT = VT;
10343   MVT EltVT = VT.getVectorElementType();
10344   SDValue Zero, AllOnes;
10345   // Use f64 if i64 isn't legal.
10346   if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
10347     EltVT = MVT::f64;
10348     MaskVT = MVT::getVectorVT(EltVT, Mask.size());
10349   }
10350 
10351   MVT LogicVT = VT;
10352   if (EltVT == MVT::f32 || EltVT == MVT::f64) {
10353     Zero = DAG.getConstantFP(0.0, DL, EltVT);
10354     APFloat AllOnesValue =
10355         APFloat::getAllOnesValue(SelectionDAG::EVTToAPFloatSemantics(EltVT));
10356     AllOnes = DAG.getConstantFP(AllOnesValue, DL, EltVT);
10357     LogicVT =
10358         MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
10359   } else {
10360     Zero = DAG.getConstant(0, DL, EltVT);
10361     AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10362   }
10363 
10364   SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
10365   SDValue V;
10366   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10367     if (Zeroable[i])
10368       continue;
10369     if (Mask[i] % Size != i)
10370       return SDValue(); // Not a blend.
10371     if (!V)
10372       V = Mask[i] < Size ? V1 : V2;
10373     else if (V != (Mask[i] < Size ? V1 : V2))
10374       return SDValue(); // Can only let one input through the mask.
10375 
10376     VMaskOps[i] = AllOnes;
10377   }
10378   if (!V)
10379     return SDValue(); // No non-zeroable elements!
10380 
10381   SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
10382   VMask = DAG.getBitcast(LogicVT, VMask);
10383   V = DAG.getBitcast(LogicVT, V);
10384   SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
10385   return DAG.getBitcast(VT, And);
10386 }
10387 
10388 /// Try to emit a blend instruction for a shuffle using bit math.
10389 ///
10390 /// This is used as a fallback approach when first class blend instructions are
10391 /// unavailable. Currently it is only suitable for integer vectors, but could
10392 /// be generalized for floating point vectors if desirable.
10393 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
10394                                       SDValue V2, ArrayRef<int> Mask,
10395                                       SelectionDAG &DAG) {
10396   assert(VT.isInteger() && "Only supports integer vector types!");
10397   MVT EltVT = VT.getVectorElementType();
10398   SDValue Zero = DAG.getConstant(0, DL, EltVT);
10399   SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10400   SmallVector<SDValue, 16> MaskOps;
10401   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10402     if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
10403       return SDValue(); // Shuffled input!
10404     MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
10405   }
10406 
10407   SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
10408   return getBitSelect(DL, VT, V1, V2, V1Mask, DAG);
10409 }
10410 
10411 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
10412                                     SDValue PreservedSrc,
10413                                     const X86Subtarget &Subtarget,
10414                                     SelectionDAG &DAG);
10415 
10416 static bool matchShuffleAsBlend(MVT VT, SDValue V1, SDValue V2,
10417                                 MutableArrayRef<int> Mask,
10418                                 const APInt &Zeroable, bool &ForceV1Zero,
10419                                 bool &ForceV2Zero, uint64_t &BlendMask) {
10420   bool V1IsZeroOrUndef =
10421       V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
10422   bool V2IsZeroOrUndef =
10423       V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
10424 
10425   BlendMask = 0;
10426   ForceV1Zero = false, ForceV2Zero = false;
10427   assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
10428 
10429   int NumElts = Mask.size();
10430   int NumLanes = VT.getSizeInBits() / 128;
10431   int NumEltsPerLane = NumElts / NumLanes;
10432   assert((NumLanes * NumEltsPerLane) == NumElts && "Value type mismatch");
10433 
10434   // For 32/64-bit elements, if we only reference one input (plus any undefs),
10435   // then ensure the blend mask part for that lane just references that input.
10436   bool ForceWholeLaneMasks =
10437       VT.is256BitVector() && VT.getScalarSizeInBits() >= 32;
10438 
10439   // Attempt to generate the binary blend mask. If an input is zero then
10440   // we can use any lane.
10441   for (int Lane = 0; Lane != NumLanes; ++Lane) {
10442     // Keep track of the inputs used per lane.
10443     bool LaneV1InUse = false;
10444     bool LaneV2InUse = false;
10445     uint64_t LaneBlendMask = 0;
10446     for (int LaneElt = 0; LaneElt != NumEltsPerLane; ++LaneElt) {
10447       int Elt = (Lane * NumEltsPerLane) + LaneElt;
10448       int M = Mask[Elt];
10449       if (M == SM_SentinelUndef)
10450         continue;
10451       if (M == Elt || (0 <= M && M < NumElts &&
10452                      IsElementEquivalent(NumElts, V1, V1, M, Elt))) {
10453         Mask[Elt] = Elt;
10454         LaneV1InUse = true;
10455         continue;
10456       }
10457       if (M == (Elt + NumElts) ||
10458           (NumElts <= M &&
10459            IsElementEquivalent(NumElts, V2, V2, M - NumElts, Elt))) {
10460         LaneBlendMask |= 1ull << LaneElt;
10461         Mask[Elt] = Elt + NumElts;
10462         LaneV2InUse = true;
10463         continue;
10464       }
10465       if (Zeroable[Elt]) {
10466         if (V1IsZeroOrUndef) {
10467           ForceV1Zero = true;
10468           Mask[Elt] = Elt;
10469           LaneV1InUse = true;
10470           continue;
10471         }
10472         if (V2IsZeroOrUndef) {
10473           ForceV2Zero = true;
10474           LaneBlendMask |= 1ull << LaneElt;
10475           Mask[Elt] = Elt + NumElts;
10476           LaneV2InUse = true;
10477           continue;
10478         }
10479       }
10480       return false;
10481     }
10482 
10483     // If we only used V2 then splat the lane blend mask to avoid any demanded
10484     // elts from V1 in this lane (the V1 equivalent is implicit with a zero
10485     // blend mask bit).
10486     if (ForceWholeLaneMasks && LaneV2InUse && !LaneV1InUse)
10487       LaneBlendMask = (1ull << NumEltsPerLane) - 1;
10488 
10489     BlendMask |= LaneBlendMask << (Lane * NumEltsPerLane);
10490   }
10491   return true;
10492 }
10493 
10494 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
10495                                             int Scale) {
10496   uint64_t ScaledMask = 0;
10497   for (int i = 0; i != Size; ++i)
10498     if (BlendMask & (1ull << i))
10499       ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
10500   return ScaledMask;
10501 }
10502 
10503 /// Try to emit a blend instruction for a shuffle.
10504 ///
10505 /// This doesn't do any checks for the availability of instructions for blending
10506 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
10507 /// be matched in the backend with the type given. What it does check for is
10508 /// that the shuffle mask is a blend, or convertible into a blend with zero.
10509 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
10510                                    SDValue V2, ArrayRef<int> Original,
10511                                    const APInt &Zeroable,
10512                                    const X86Subtarget &Subtarget,
10513                                    SelectionDAG &DAG) {
10514   uint64_t BlendMask = 0;
10515   bool ForceV1Zero = false, ForceV2Zero = false;
10516   SmallVector<int, 64> Mask(Original);
10517   if (!matchShuffleAsBlend(VT, V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
10518                            BlendMask))
10519     return SDValue();
10520 
10521   // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
10522   if (ForceV1Zero)
10523     V1 = getZeroVector(VT, Subtarget, DAG, DL);
10524   if (ForceV2Zero)
10525     V2 = getZeroVector(VT, Subtarget, DAG, DL);
10526 
10527   unsigned NumElts = VT.getVectorNumElements();
10528 
10529   switch (VT.SimpleTy) {
10530   case MVT::v4i64:
10531   case MVT::v8i32:
10532     assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
10533     [[fallthrough]];
10534   case MVT::v4f64:
10535   case MVT::v8f32:
10536     assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
10537     [[fallthrough]];
10538   case MVT::v2f64:
10539   case MVT::v2i64:
10540   case MVT::v4f32:
10541   case MVT::v4i32:
10542   case MVT::v8i16:
10543     assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
10544     return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
10545                        DAG.getTargetConstant(BlendMask, DL, MVT::i8));
10546   case MVT::v16i16: {
10547     assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
10548     SmallVector<int, 8> RepeatedMask;
10549     if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
10550       // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
10551       assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
10552       BlendMask = 0;
10553       for (int i = 0; i < 8; ++i)
10554         if (RepeatedMask[i] >= 8)
10555           BlendMask |= 1ull << i;
10556       return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10557                          DAG.getTargetConstant(BlendMask, DL, MVT::i8));
10558     }
10559     // Use PBLENDW for lower/upper lanes and then blend lanes.
10560     // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
10561     // merge to VSELECT where useful.
10562     uint64_t LoMask = BlendMask & 0xFF;
10563     uint64_t HiMask = (BlendMask >> 8) & 0xFF;
10564     if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
10565       SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10566                                DAG.getTargetConstant(LoMask, DL, MVT::i8));
10567       SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10568                                DAG.getTargetConstant(HiMask, DL, MVT::i8));
10569       return DAG.getVectorShuffle(
10570           MVT::v16i16, DL, Lo, Hi,
10571           {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
10572     }
10573     [[fallthrough]];
10574   }
10575   case MVT::v32i8:
10576     assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
10577     [[fallthrough]];
10578   case MVT::v16i8: {
10579     assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
10580 
10581     // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
10582     if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
10583                                                Subtarget, DAG))
10584       return Masked;
10585 
10586     if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
10587       MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
10588       SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
10589       return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
10590     }
10591 
10592     // If we have VPTERNLOG, we can use that as a bit blend.
10593     if (Subtarget.hasVLX())
10594       if (SDValue BitBlend =
10595               lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
10596         return BitBlend;
10597 
10598     // Scale the blend by the number of bytes per element.
10599     int Scale = VT.getScalarSizeInBits() / 8;
10600 
10601     // This form of blend is always done on bytes. Compute the byte vector
10602     // type.
10603     MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
10604 
10605     // x86 allows load folding with blendvb from the 2nd source operand. But
10606     // we are still using LLVM select here (see comment below), so that's V1.
10607     // If V2 can be load-folded and V1 cannot be load-folded, then commute to
10608     // allow that load-folding possibility.
10609     if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
10610       ShuffleVectorSDNode::commuteMask(Mask);
10611       std::swap(V1, V2);
10612     }
10613 
10614     // Compute the VSELECT mask. Note that VSELECT is really confusing in the
10615     // mix of LLVM's code generator and the x86 backend. We tell the code
10616     // generator that boolean values in the elements of an x86 vector register
10617     // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
10618     // mapping a select to operand #1, and 'false' mapping to operand #2. The
10619     // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
10620     // of the element (the remaining are ignored) and 0 in that high bit would
10621     // mean operand #1 while 1 in the high bit would mean operand #2. So while
10622     // the LLVM model for boolean values in vector elements gets the relevant
10623     // bit set, it is set backwards and over constrained relative to x86's
10624     // actual model.
10625     SmallVector<SDValue, 32> VSELECTMask;
10626     for (int i = 0, Size = Mask.size(); i < Size; ++i)
10627       for (int j = 0; j < Scale; ++j)
10628         VSELECTMask.push_back(
10629             Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
10630                         : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
10631                                           MVT::i8));
10632 
10633     V1 = DAG.getBitcast(BlendVT, V1);
10634     V2 = DAG.getBitcast(BlendVT, V2);
10635     return DAG.getBitcast(
10636         VT,
10637         DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
10638                       V1, V2));
10639   }
10640   case MVT::v16f32:
10641   case MVT::v8f64:
10642   case MVT::v8i64:
10643   case MVT::v16i32:
10644   case MVT::v32i16:
10645   case MVT::v64i8: {
10646     // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
10647     bool OptForSize = DAG.shouldOptForSize();
10648     if (!OptForSize) {
10649       if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
10650                                                  Subtarget, DAG))
10651         return Masked;
10652     }
10653 
10654     // Otherwise load an immediate into a GPR, cast to k-register, and use a
10655     // masked move.
10656     MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
10657     SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
10658     return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
10659   }
10660   default:
10661     llvm_unreachable("Not a supported integer vector type!");
10662   }
10663 }
10664 
10665 /// Try to lower as a blend of elements from two inputs followed by
10666 /// a single-input permutation.
10667 ///
10668 /// This matches the pattern where we can blend elements from two inputs and
10669 /// then reduce the shuffle to a single-input permutation.
10670 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
10671                                              SDValue V1, SDValue V2,
10672                                              ArrayRef<int> Mask,
10673                                              SelectionDAG &DAG,
10674                                              bool ImmBlends = false) {
10675   // We build up the blend mask while checking whether a blend is a viable way
10676   // to reduce the shuffle.
10677   SmallVector<int, 32> BlendMask(Mask.size(), -1);
10678   SmallVector<int, 32> PermuteMask(Mask.size(), -1);
10679 
10680   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10681     if (Mask[i] < 0)
10682       continue;
10683 
10684     assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
10685 
10686     if (BlendMask[Mask[i] % Size] < 0)
10687       BlendMask[Mask[i] % Size] = Mask[i];
10688     else if (BlendMask[Mask[i] % Size] != Mask[i])
10689       return SDValue(); // Can't blend in the needed input!
10690 
10691     PermuteMask[i] = Mask[i] % Size;
10692   }
10693 
10694   // If only immediate blends, then bail if the blend mask can't be widened to
10695   // i16.
10696   unsigned EltSize = VT.getScalarSizeInBits();
10697   if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
10698     return SDValue();
10699 
10700   SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
10701   return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
10702 }
10703 
10704 /// Try to lower as an unpack of elements from two inputs followed by
10705 /// a single-input permutation.
10706 ///
10707 /// This matches the pattern where we can unpack elements from two inputs and
10708 /// then reduce the shuffle to a single-input (wider) permutation.
10709 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
10710                                              SDValue V1, SDValue V2,
10711                                              ArrayRef<int> Mask,
10712                                              SelectionDAG &DAG) {
10713   int NumElts = Mask.size();
10714   int NumLanes = VT.getSizeInBits() / 128;
10715   int NumLaneElts = NumElts / NumLanes;
10716   int NumHalfLaneElts = NumLaneElts / 2;
10717 
10718   bool MatchLo = true, MatchHi = true;
10719   SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
10720 
10721   // Determine UNPCKL/UNPCKH type and operand order.
10722   for (int Elt = 0; Elt != NumElts; ++Elt) {
10723     int M = Mask[Elt];
10724     if (M < 0)
10725       continue;
10726 
10727     // Normalize the mask value depending on whether it's V1 or V2.
10728     int NormM = M;
10729     SDValue &Op = Ops[Elt & 1];
10730     if (M < NumElts && (Op.isUndef() || Op == V1))
10731       Op = V1;
10732     else if (NumElts <= M && (Op.isUndef() || Op == V2)) {
10733       Op = V2;
10734       NormM -= NumElts;
10735     } else
10736       return SDValue();
10737 
10738     bool MatchLoAnyLane = false, MatchHiAnyLane = false;
10739     for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
10740       int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
10741       MatchLoAnyLane |= isUndefOrInRange(NormM, Lo, Mid);
10742       MatchHiAnyLane |= isUndefOrInRange(NormM, Mid, Hi);
10743       if (MatchLoAnyLane || MatchHiAnyLane) {
10744         assert((MatchLoAnyLane ^ MatchHiAnyLane) &&
10745                "Failed to match UNPCKLO/UNPCKHI");
10746         break;
10747       }
10748     }
10749     MatchLo &= MatchLoAnyLane;
10750     MatchHi &= MatchHiAnyLane;
10751     if (!MatchLo && !MatchHi)
10752       return SDValue();
10753   }
10754   assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
10755 
10756   // Element indices have changed after unpacking. Calculate permute mask
10757   // so that they will be put back to the position as dictated by the
10758   // original shuffle mask indices.
10759   SmallVector<int, 32> PermuteMask(NumElts, -1);
10760   for (int Elt = 0; Elt != NumElts; ++Elt) {
10761     int M = Mask[Elt];
10762     if (M < 0)
10763       continue;
10764     int NormM = M;
10765     if (NumElts <= M)
10766       NormM -= NumElts;
10767     bool IsFirstOp = M < NumElts;
10768     int BaseMaskElt =
10769         NumLaneElts * (NormM / NumLaneElts) + (2 * (NormM % NumHalfLaneElts));
10770     if ((IsFirstOp && V1 == Ops[0]) || (!IsFirstOp && V2 == Ops[0]))
10771       PermuteMask[Elt] = BaseMaskElt;
10772     else if ((IsFirstOp && V1 == Ops[1]) || (!IsFirstOp && V2 == Ops[1]))
10773       PermuteMask[Elt] = BaseMaskElt + 1;
10774     assert(PermuteMask[Elt] != -1 &&
10775            "Input mask element is defined but failed to assign permute mask");
10776   }
10777 
10778   unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
10779   SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
10780   return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
10781 }
10782 
10783 /// Try to lower a shuffle as a permute of the inputs followed by an
10784 /// UNPCK instruction.
10785 ///
10786 /// This specifically targets cases where we end up with alternating between
10787 /// the two inputs, and so can permute them into something that feeds a single
10788 /// UNPCK instruction. Note that this routine only targets integer vectors
10789 /// because for floating point vectors we have a generalized SHUFPS lowering
10790 /// strategy that handles everything that doesn't *exactly* match an unpack,
10791 /// making this clever lowering unnecessary.
10792 static SDValue lowerShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
10793                                               SDValue V1, SDValue V2,
10794                                               ArrayRef<int> Mask,
10795                                               const X86Subtarget &Subtarget,
10796                                               SelectionDAG &DAG) {
10797   int Size = Mask.size();
10798   assert(Mask.size() >= 2 && "Single element masks are invalid.");
10799 
10800   // This routine only supports 128-bit integer dual input vectors.
10801   if (VT.isFloatingPoint() || !VT.is128BitVector() || V2.isUndef())
10802     return SDValue();
10803 
10804   int NumLoInputs =
10805       count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
10806   int NumHiInputs =
10807       count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
10808 
10809   bool UnpackLo = NumLoInputs >= NumHiInputs;
10810 
10811   auto TryUnpack = [&](int ScalarSize, int Scale) {
10812     SmallVector<int, 16> V1Mask((unsigned)Size, -1);
10813     SmallVector<int, 16> V2Mask((unsigned)Size, -1);
10814 
10815     for (int i = 0; i < Size; ++i) {
10816       if (Mask[i] < 0)
10817         continue;
10818 
10819       // Each element of the unpack contains Scale elements from this mask.
10820       int UnpackIdx = i / Scale;
10821 
10822       // We only handle the case where V1 feeds the first slots of the unpack.
10823       // We rely on canonicalization to ensure this is the case.
10824       if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
10825         return SDValue();
10826 
10827       // Setup the mask for this input. The indexing is tricky as we have to
10828       // handle the unpack stride.
10829       SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
10830       VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
10831           Mask[i] % Size;
10832     }
10833 
10834     // If we will have to shuffle both inputs to use the unpack, check whether
10835     // we can just unpack first and shuffle the result. If so, skip this unpack.
10836     if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
10837         !isNoopShuffleMask(V2Mask))
10838       return SDValue();
10839 
10840     // Shuffle the inputs into place.
10841     V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
10842     V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
10843 
10844     // Cast the inputs to the type we will use to unpack them.
10845     MVT UnpackVT =
10846         MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
10847     V1 = DAG.getBitcast(UnpackVT, V1);
10848     V2 = DAG.getBitcast(UnpackVT, V2);
10849 
10850     // Unpack the inputs and cast the result back to the desired type.
10851     return DAG.getBitcast(
10852         VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
10853                         UnpackVT, V1, V2));
10854   };
10855 
10856   // We try each unpack from the largest to the smallest to try and find one
10857   // that fits this mask.
10858   int OrigScalarSize = VT.getScalarSizeInBits();
10859   for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
10860     if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
10861       return Unpack;
10862 
10863   // If we're shuffling with a zero vector then we're better off not doing
10864   // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
10865   if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
10866       ISD::isBuildVectorAllZeros(V2.getNode()))
10867     return SDValue();
10868 
10869   // If none of the unpack-rooted lowerings worked (or were profitable) try an
10870   // initial unpack.
10871   if (NumLoInputs == 0 || NumHiInputs == 0) {
10872     assert((NumLoInputs > 0 || NumHiInputs > 0) &&
10873            "We have to have *some* inputs!");
10874     int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
10875 
10876     // FIXME: We could consider the total complexity of the permute of each
10877     // possible unpacking. Or at the least we should consider how many
10878     // half-crossings are created.
10879     // FIXME: We could consider commuting the unpacks.
10880 
10881     SmallVector<int, 32> PermMask((unsigned)Size, -1);
10882     for (int i = 0; i < Size; ++i) {
10883       if (Mask[i] < 0)
10884         continue;
10885 
10886       assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
10887 
10888       PermMask[i] =
10889           2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
10890     }
10891     return DAG.getVectorShuffle(
10892         VT, DL,
10893         DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL, DL, VT,
10894                     V1, V2),
10895         DAG.getUNDEF(VT), PermMask);
10896   }
10897 
10898   return SDValue();
10899 }
10900 
10901 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
10902 /// permuting the elements of the result in place.
10903 static SDValue lowerShuffleAsByteRotateAndPermute(
10904     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10905     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
10906   if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
10907       (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
10908       (VT.is512BitVector() && !Subtarget.hasBWI()))
10909     return SDValue();
10910 
10911   // We don't currently support lane crossing permutes.
10912   if (is128BitLaneCrossingShuffleMask(VT, Mask))
10913     return SDValue();
10914 
10915   int Scale = VT.getScalarSizeInBits() / 8;
10916   int NumLanes = VT.getSizeInBits() / 128;
10917   int NumElts = VT.getVectorNumElements();
10918   int NumEltsPerLane = NumElts / NumLanes;
10919 
10920   // Determine range of mask elts.
10921   bool Blend1 = true;
10922   bool Blend2 = true;
10923   std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
10924   std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
10925   for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
10926     for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
10927       int M = Mask[Lane + Elt];
10928       if (M < 0)
10929         continue;
10930       if (M < NumElts) {
10931         Blend1 &= (M == (Lane + Elt));
10932         assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
10933         M = M % NumEltsPerLane;
10934         Range1.first = std::min(Range1.first, M);
10935         Range1.second = std::max(Range1.second, M);
10936       } else {
10937         M -= NumElts;
10938         Blend2 &= (M == (Lane + Elt));
10939         assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
10940         M = M % NumEltsPerLane;
10941         Range2.first = std::min(Range2.first, M);
10942         Range2.second = std::max(Range2.second, M);
10943       }
10944     }
10945   }
10946 
10947   // Bail if we don't need both elements.
10948   // TODO - it might be worth doing this for unary shuffles if the permute
10949   // can be widened.
10950   if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
10951       !(0 <= Range2.first && Range2.second < NumEltsPerLane))
10952     return SDValue();
10953 
10954   if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
10955     return SDValue();
10956 
10957   // Rotate the 2 ops so we can access both ranges, then permute the result.
10958   auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
10959     MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
10960     SDValue Rotate = DAG.getBitcast(
10961         VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
10962                         DAG.getBitcast(ByteVT, Lo),
10963                         DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
10964     SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
10965     for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
10966       for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
10967         int M = Mask[Lane + Elt];
10968         if (M < 0)
10969           continue;
10970         if (M < NumElts)
10971           PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
10972         else
10973           PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
10974       }
10975     }
10976     return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
10977   };
10978 
10979   // Check if the ranges are small enough to rotate from either direction.
10980   if (Range2.second < Range1.first)
10981     return RotateAndPermute(V1, V2, Range1.first, 0);
10982   if (Range1.second < Range2.first)
10983     return RotateAndPermute(V2, V1, Range2.first, NumElts);
10984   return SDValue();
10985 }
10986 
10987 static bool isBroadcastShuffleMask(ArrayRef<int> Mask) {
10988   return isUndefOrEqual(Mask, 0);
10989 }
10990 
10991 static bool isNoopOrBroadcastShuffleMask(ArrayRef<int> Mask) {
10992   return isNoopShuffleMask(Mask) || isBroadcastShuffleMask(Mask);
10993 }
10994 
10995 /// Check if the Mask consists of the same element repeated multiple times.
10996 static bool isSingleElementRepeatedMask(ArrayRef<int> Mask) {
10997   size_t NumUndefs = 0;
10998   std::optional<int> UniqueElt;
10999   for (int Elt : Mask) {
11000     if (Elt == SM_SentinelUndef) {
11001       NumUndefs++;
11002       continue;
11003     }
11004     if (UniqueElt.has_value() && UniqueElt.value() != Elt)
11005       return false;
11006     UniqueElt = Elt;
11007   }
11008   // Make sure the element is repeated enough times by checking the number of
11009   // undefs is small.
11010   return NumUndefs <= Mask.size() / 2 && UniqueElt.has_value();
11011 }
11012 
11013 /// Generic routine to decompose a shuffle and blend into independent
11014 /// blends and permutes.
11015 ///
11016 /// This matches the extremely common pattern for handling combined
11017 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
11018 /// operations. It will try to pick the best arrangement of shuffles and
11019 /// blends. For vXi8/vXi16 shuffles we may use unpack instead of blend.
11020 static SDValue lowerShuffleAsDecomposedShuffleMerge(
11021     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11022     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11023   int NumElts = Mask.size();
11024   int NumLanes = VT.getSizeInBits() / 128;
11025   int NumEltsPerLane = NumElts / NumLanes;
11026 
11027   // Shuffle the input elements into the desired positions in V1 and V2 and
11028   // unpack/blend them together.
11029   bool IsAlternating = true;
11030   SmallVector<int, 32> V1Mask(NumElts, -1);
11031   SmallVector<int, 32> V2Mask(NumElts, -1);
11032   SmallVector<int, 32> FinalMask(NumElts, -1);
11033   for (int i = 0; i < NumElts; ++i) {
11034     int M = Mask[i];
11035     if (M >= 0 && M < NumElts) {
11036       V1Mask[i] = M;
11037       FinalMask[i] = i;
11038       IsAlternating &= (i & 1) == 0;
11039     } else if (M >= NumElts) {
11040       V2Mask[i] = M - NumElts;
11041       FinalMask[i] = i + NumElts;
11042       IsAlternating &= (i & 1) == 1;
11043     }
11044   }
11045 
11046   // If we effectively only demand the 0'th element of \p Input, and not only
11047   // as 0'th element, then broadcast said input,
11048   // and change \p InputMask to be a no-op (identity) mask.
11049   auto canonicalizeBroadcastableInput = [DL, VT, &Subtarget,
11050                                          &DAG](SDValue &Input,
11051                                                MutableArrayRef<int> InputMask) {
11052     unsigned EltSizeInBits = Input.getScalarValueSizeInBits();
11053     if (!Subtarget.hasAVX2() && (!Subtarget.hasAVX() || EltSizeInBits < 32 ||
11054                                  !X86::mayFoldLoad(Input, Subtarget)))
11055       return;
11056     if (isNoopShuffleMask(InputMask))
11057       return;
11058     assert(isBroadcastShuffleMask(InputMask) &&
11059            "Expected to demand only the 0'th element.");
11060     Input = DAG.getNode(X86ISD::VBROADCAST, DL, VT, Input);
11061     for (auto I : enumerate(InputMask)) {
11062       int &InputMaskElt = I.value();
11063       if (InputMaskElt >= 0)
11064         InputMaskElt = I.index();
11065     }
11066   };
11067 
11068   // Currently, we may need to produce one shuffle per input, and blend results.
11069   // It is possible that the shuffle for one of the inputs is already a no-op.
11070   // See if we can simplify non-no-op shuffles into broadcasts,
11071   // which we consider to be strictly better than an arbitrary shuffle.
11072   if (isNoopOrBroadcastShuffleMask(V1Mask) &&
11073       isNoopOrBroadcastShuffleMask(V2Mask)) {
11074     canonicalizeBroadcastableInput(V1, V1Mask);
11075     canonicalizeBroadcastableInput(V2, V2Mask);
11076   }
11077 
11078   // Try to lower with the simpler initial blend/unpack/rotate strategies unless
11079   // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
11080   // the shuffle may be able to fold with a load or other benefit. However, when
11081   // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
11082   // pre-shuffle first is a better strategy.
11083   if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
11084     // Only prefer immediate blends to unpack/rotate.
11085     if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11086                                                           DAG, true))
11087       return BlendPerm;
11088     // If either input vector provides only a single element which is repeated
11089     // multiple times, unpacking from both input vectors would generate worse
11090     // code. e.g. for
11091     // t5: v16i8 = vector_shuffle<16,0,16,1,16,2,16,3,16,4,16,5,16,6,16,7> t2, t4
11092     // it is better to process t4 first to create a vector of t4[0], then unpack
11093     // that vector with t2.
11094     if (!isSingleElementRepeatedMask(V1Mask) &&
11095         !isSingleElementRepeatedMask(V2Mask))
11096       if (SDValue UnpackPerm =
11097               lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask, DAG))
11098         return UnpackPerm;
11099     if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
11100             DL, VT, V1, V2, Mask, Subtarget, DAG))
11101       return RotatePerm;
11102     // Unpack/rotate failed - try again with variable blends.
11103     if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11104                                                           DAG))
11105       return BlendPerm;
11106     if (VT.getScalarSizeInBits() >= 32)
11107       if (SDValue PermUnpack = lowerShuffleAsPermuteAndUnpack(
11108               DL, VT, V1, V2, Mask, Subtarget, DAG))
11109         return PermUnpack;
11110   }
11111 
11112   // If the final mask is an alternating blend of vXi8/vXi16, convert to an
11113   // UNPCKL(SHUFFLE, SHUFFLE) pattern.
11114   // TODO: It doesn't have to be alternating - but each lane mustn't have more
11115   // than half the elements coming from each source.
11116   if (IsAlternating && VT.getScalarSizeInBits() < 32) {
11117     V1Mask.assign(NumElts, -1);
11118     V2Mask.assign(NumElts, -1);
11119     FinalMask.assign(NumElts, -1);
11120     for (int i = 0; i != NumElts; i += NumEltsPerLane)
11121       for (int j = 0; j != NumEltsPerLane; ++j) {
11122         int M = Mask[i + j];
11123         if (M >= 0 && M < NumElts) {
11124           V1Mask[i + (j / 2)] = M;
11125           FinalMask[i + j] = i + (j / 2);
11126         } else if (M >= NumElts) {
11127           V2Mask[i + (j / 2)] = M - NumElts;
11128           FinalMask[i + j] = i + (j / 2) + NumElts;
11129         }
11130       }
11131   }
11132 
11133   V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
11134   V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
11135   return DAG.getVectorShuffle(VT, DL, V1, V2, FinalMask);
11136 }
11137 
11138 static int matchShuffleAsBitRotate(MVT &RotateVT, int EltSizeInBits,
11139                                    const X86Subtarget &Subtarget,
11140                                    ArrayRef<int> Mask) {
11141   assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11142   assert(EltSizeInBits < 64 && "Can't rotate 64-bit integers");
11143 
11144   // AVX512 only has vXi32/vXi64 rotates, so limit the rotation sub group size.
11145   int MinSubElts = Subtarget.hasAVX512() ? std::max(32 / EltSizeInBits, 2) : 2;
11146   int MaxSubElts = 64 / EltSizeInBits;
11147   unsigned RotateAmt, NumSubElts;
11148   if (!ShuffleVectorInst::isBitRotateMask(Mask, EltSizeInBits, MinSubElts,
11149                                           MaxSubElts, NumSubElts, RotateAmt))
11150     return -1;
11151   unsigned NumElts = Mask.size();
11152   MVT RotateSVT = MVT::getIntegerVT(EltSizeInBits * NumSubElts);
11153   RotateVT = MVT::getVectorVT(RotateSVT, NumElts / NumSubElts);
11154   return RotateAmt;
11155 }
11156 
11157 /// Lower shuffle using X86ISD::VROTLI rotations.
11158 static SDValue lowerShuffleAsBitRotate(const SDLoc &DL, MVT VT, SDValue V1,
11159                                        ArrayRef<int> Mask,
11160                                        const X86Subtarget &Subtarget,
11161                                        SelectionDAG &DAG) {
11162   // Only XOP + AVX512 targets have bit rotation instructions.
11163   // If we at least have SSSE3 (PSHUFB) then we shouldn't attempt to use this.
11164   bool IsLegal =
11165       (VT.is128BitVector() && Subtarget.hasXOP()) || Subtarget.hasAVX512();
11166   if (!IsLegal && Subtarget.hasSSE3())
11167     return SDValue();
11168 
11169   MVT RotateVT;
11170   int RotateAmt = matchShuffleAsBitRotate(RotateVT, VT.getScalarSizeInBits(),
11171                                           Subtarget, Mask);
11172   if (RotateAmt < 0)
11173     return SDValue();
11174 
11175   // For pre-SSSE3 targets, if we are shuffling vXi8 elts then ISD::ROTL,
11176   // expanded to OR(SRL,SHL), will be more efficient, but if they can
11177   // widen to vXi16 or more then existing lowering should will be better.
11178   if (!IsLegal) {
11179     if ((RotateAmt % 16) == 0)
11180       return SDValue();
11181     // TODO: Use getTargetVShiftByConstNode.
11182     unsigned ShlAmt = RotateAmt;
11183     unsigned SrlAmt = RotateVT.getScalarSizeInBits() - RotateAmt;
11184     V1 = DAG.getBitcast(RotateVT, V1);
11185     SDValue SHL = DAG.getNode(X86ISD::VSHLI, DL, RotateVT, V1,
11186                               DAG.getTargetConstant(ShlAmt, DL, MVT::i8));
11187     SDValue SRL = DAG.getNode(X86ISD::VSRLI, DL, RotateVT, V1,
11188                               DAG.getTargetConstant(SrlAmt, DL, MVT::i8));
11189     SDValue Rot = DAG.getNode(ISD::OR, DL, RotateVT, SHL, SRL);
11190     return DAG.getBitcast(VT, Rot);
11191   }
11192 
11193   SDValue Rot =
11194       DAG.getNode(X86ISD::VROTLI, DL, RotateVT, DAG.getBitcast(RotateVT, V1),
11195                   DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
11196   return DAG.getBitcast(VT, Rot);
11197 }
11198 
11199 /// Try to match a vector shuffle as an element rotation.
11200 ///
11201 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
11202 static int matchShuffleAsElementRotate(SDValue &V1, SDValue &V2,
11203                                        ArrayRef<int> Mask) {
11204   int NumElts = Mask.size();
11205 
11206   // We need to detect various ways of spelling a rotation:
11207   //   [11, 12, 13, 14, 15,  0,  1,  2]
11208   //   [-1, 12, 13, 14, -1, -1,  1, -1]
11209   //   [-1, -1, -1, -1, -1, -1,  1,  2]
11210   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
11211   //   [-1,  4,  5,  6, -1, -1,  9, -1]
11212   //   [-1,  4,  5,  6, -1, -1, -1, -1]
11213   int Rotation = 0;
11214   SDValue Lo, Hi;
11215   for (int i = 0; i < NumElts; ++i) {
11216     int M = Mask[i];
11217     assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
11218            "Unexpected mask index.");
11219     if (M < 0)
11220       continue;
11221 
11222     // Determine where a rotated vector would have started.
11223     int StartIdx = i - (M % NumElts);
11224     if (StartIdx == 0)
11225       // The identity rotation isn't interesting, stop.
11226       return -1;
11227 
11228     // If we found the tail of a vector the rotation must be the missing
11229     // front. If we found the head of a vector, it must be how much of the
11230     // head.
11231     int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
11232 
11233     if (Rotation == 0)
11234       Rotation = CandidateRotation;
11235     else if (Rotation != CandidateRotation)
11236       // The rotations don't match, so we can't match this mask.
11237       return -1;
11238 
11239     // Compute which value this mask is pointing at.
11240     SDValue MaskV = M < NumElts ? V1 : V2;
11241 
11242     // Compute which of the two target values this index should be assigned
11243     // to. This reflects whether the high elements are remaining or the low
11244     // elements are remaining.
11245     SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
11246 
11247     // Either set up this value if we've not encountered it before, or check
11248     // that it remains consistent.
11249     if (!TargetV)
11250       TargetV = MaskV;
11251     else if (TargetV != MaskV)
11252       // This may be a rotation, but it pulls from the inputs in some
11253       // unsupported interleaving.
11254       return -1;
11255   }
11256 
11257   // Check that we successfully analyzed the mask, and normalize the results.
11258   assert(Rotation != 0 && "Failed to locate a viable rotation!");
11259   assert((Lo || Hi) && "Failed to find a rotated input vector!");
11260   if (!Lo)
11261     Lo = Hi;
11262   else if (!Hi)
11263     Hi = Lo;
11264 
11265   V1 = Lo;
11266   V2 = Hi;
11267 
11268   return Rotation;
11269 }
11270 
11271 /// Try to lower a vector shuffle as a byte rotation.
11272 ///
11273 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
11274 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
11275 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
11276 /// try to generically lower a vector shuffle through such an pattern. It
11277 /// does not check for the profitability of lowering either as PALIGNR or
11278 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
11279 /// This matches shuffle vectors that look like:
11280 ///
11281 ///   v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
11282 ///
11283 /// Essentially it concatenates V1 and V2, shifts right by some number of
11284 /// elements, and takes the low elements as the result. Note that while this is
11285 /// specified as a *right shift* because x86 is little-endian, it is a *left
11286 /// rotate* of the vector lanes.
11287 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
11288                                     ArrayRef<int> Mask) {
11289   // Don't accept any shuffles with zero elements.
11290   if (isAnyZero(Mask))
11291     return -1;
11292 
11293   // PALIGNR works on 128-bit lanes.
11294   SmallVector<int, 16> RepeatedMask;
11295   if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
11296     return -1;
11297 
11298   int Rotation = matchShuffleAsElementRotate(V1, V2, RepeatedMask);
11299   if (Rotation <= 0)
11300     return -1;
11301 
11302   // PALIGNR rotates bytes, so we need to scale the
11303   // rotation based on how many bytes are in the vector lane.
11304   int NumElts = RepeatedMask.size();
11305   int Scale = 16 / NumElts;
11306   return Rotation * Scale;
11307 }
11308 
11309 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
11310                                         SDValue V2, ArrayRef<int> Mask,
11311                                         const X86Subtarget &Subtarget,
11312                                         SelectionDAG &DAG) {
11313   assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11314 
11315   SDValue Lo = V1, Hi = V2;
11316   int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
11317   if (ByteRotation <= 0)
11318     return SDValue();
11319 
11320   // Cast the inputs to i8 vector of correct length to match PALIGNR or
11321   // PSLLDQ/PSRLDQ.
11322   MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11323   Lo = DAG.getBitcast(ByteVT, Lo);
11324   Hi = DAG.getBitcast(ByteVT, Hi);
11325 
11326   // SSSE3 targets can use the palignr instruction.
11327   if (Subtarget.hasSSSE3()) {
11328     assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
11329            "512-bit PALIGNR requires BWI instructions");
11330     return DAG.getBitcast(
11331         VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
11332                         DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
11333   }
11334 
11335   assert(VT.is128BitVector() &&
11336          "Rotate-based lowering only supports 128-bit lowering!");
11337   assert(Mask.size() <= 16 &&
11338          "Can shuffle at most 16 bytes in a 128-bit vector!");
11339   assert(ByteVT == MVT::v16i8 &&
11340          "SSE2 rotate lowering only needed for v16i8!");
11341 
11342   // Default SSE2 implementation
11343   int LoByteShift = 16 - ByteRotation;
11344   int HiByteShift = ByteRotation;
11345 
11346   SDValue LoShift =
11347       DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
11348                   DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
11349   SDValue HiShift =
11350       DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
11351                   DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
11352   return DAG.getBitcast(VT,
11353                         DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
11354 }
11355 
11356 /// Try to lower a vector shuffle as a dword/qword rotation.
11357 ///
11358 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
11359 /// rotation of the concatenation of two vectors; This routine will
11360 /// try to generically lower a vector shuffle through such an pattern.
11361 ///
11362 /// Essentially it concatenates V1 and V2, shifts right by some number of
11363 /// elements, and takes the low elements as the result. Note that while this is
11364 /// specified as a *right shift* because x86 is little-endian, it is a *left
11365 /// rotate* of the vector lanes.
11366 static SDValue lowerShuffleAsVALIGN(const SDLoc &DL, MVT VT, SDValue V1,
11367                                     SDValue V2, ArrayRef<int> Mask,
11368                                     const APInt &Zeroable,
11369                                     const X86Subtarget &Subtarget,
11370                                     SelectionDAG &DAG) {
11371   assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
11372          "Only 32-bit and 64-bit elements are supported!");
11373 
11374   // 128/256-bit vectors are only supported with VLX.
11375   assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
11376          && "VLX required for 128/256-bit vectors");
11377 
11378   SDValue Lo = V1, Hi = V2;
11379   int Rotation = matchShuffleAsElementRotate(Lo, Hi, Mask);
11380   if (0 < Rotation)
11381     return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
11382                        DAG.getTargetConstant(Rotation, DL, MVT::i8));
11383 
11384   // See if we can use VALIGN as a cross-lane version of VSHLDQ/VSRLDQ.
11385   // TODO: Pull this out as a matchShuffleAsElementShift helper?
11386   // TODO: We can probably make this more aggressive and use shift-pairs like
11387   // lowerShuffleAsByteShiftMask.
11388   unsigned NumElts = Mask.size();
11389   unsigned ZeroLo = Zeroable.countr_one();
11390   unsigned ZeroHi = Zeroable.countl_one();
11391   assert((ZeroLo + ZeroHi) < NumElts && "Zeroable shuffle detected");
11392   if (!ZeroLo && !ZeroHi)
11393     return SDValue();
11394 
11395   if (ZeroLo) {
11396     SDValue Src = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11397     int Low = Mask[ZeroLo] < (int)NumElts ? 0 : NumElts;
11398     if (isSequentialOrUndefInRange(Mask, ZeroLo, NumElts - ZeroLo, Low))
11399       return DAG.getNode(X86ISD::VALIGN, DL, VT, Src,
11400                          getZeroVector(VT, Subtarget, DAG, DL),
11401                          DAG.getTargetConstant(NumElts - ZeroLo, DL, MVT::i8));
11402   }
11403 
11404   if (ZeroHi) {
11405     SDValue Src = Mask[0] < (int)NumElts ? V1 : V2;
11406     int Low = Mask[0] < (int)NumElts ? 0 : NumElts;
11407     if (isSequentialOrUndefInRange(Mask, 0, NumElts - ZeroHi, Low + ZeroHi))
11408       return DAG.getNode(X86ISD::VALIGN, DL, VT,
11409                          getZeroVector(VT, Subtarget, DAG, DL), Src,
11410                          DAG.getTargetConstant(ZeroHi, DL, MVT::i8));
11411   }
11412 
11413   return SDValue();
11414 }
11415 
11416 /// Try to lower a vector shuffle as a byte shift sequence.
11417 static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
11418                                            SDValue V2, ArrayRef<int> Mask,
11419                                            const APInt &Zeroable,
11420                                            const X86Subtarget &Subtarget,
11421                                            SelectionDAG &DAG) {
11422   assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11423   assert(VT.is128BitVector() && "Only 128-bit vectors supported");
11424 
11425   // We need a shuffle that has zeros at one/both ends and a sequential
11426   // shuffle from one source within.
11427   unsigned ZeroLo = Zeroable.countr_one();
11428   unsigned ZeroHi = Zeroable.countl_one();
11429   if (!ZeroLo && !ZeroHi)
11430     return SDValue();
11431 
11432   unsigned NumElts = Mask.size();
11433   unsigned Len = NumElts - (ZeroLo + ZeroHi);
11434   if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
11435     return SDValue();
11436 
11437   unsigned Scale = VT.getScalarSizeInBits() / 8;
11438   ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
11439   if (!isUndefOrInRange(StubMask, 0, NumElts) &&
11440       !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
11441     return SDValue();
11442 
11443   SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11444   Res = DAG.getBitcast(MVT::v16i8, Res);
11445 
11446   // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
11447   // inner sequential set of elements, possibly offset:
11448   // 01234567 --> zzzzzz01 --> 1zzzzzzz
11449   // 01234567 --> 4567zzzz --> zzzzz456
11450   // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
11451   if (ZeroLo == 0) {
11452     unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11453     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11454                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11455     Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11456                       DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
11457   } else if (ZeroHi == 0) {
11458     unsigned Shift = Mask[ZeroLo] % NumElts;
11459     Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11460                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11461     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11462                       DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11463   } else if (!Subtarget.hasSSSE3()) {
11464     // If we don't have PSHUFB then its worth avoiding an AND constant mask
11465     // by performing 3 byte shifts. Shuffle combining can kick in above that.
11466     // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
11467     unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11468     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11469                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11470     Shift += Mask[ZeroLo] % NumElts;
11471     Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11472                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11473     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11474                       DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11475   } else
11476     return SDValue();
11477 
11478   return DAG.getBitcast(VT, Res);
11479 }
11480 
11481 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
11482 ///
11483 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
11484 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
11485 /// matches elements from one of the input vectors shuffled to the left or
11486 /// right with zeroable elements 'shifted in'. It handles both the strictly
11487 /// bit-wise element shifts and the byte shift across an entire 128-bit double
11488 /// quad word lane.
11489 ///
11490 /// PSHL : (little-endian) left bit shift.
11491 /// [ zz, 0, zz,  2 ]
11492 /// [ -1, 4, zz, -1 ]
11493 /// PSRL : (little-endian) right bit shift.
11494 /// [  1, zz,  3, zz]
11495 /// [ -1, -1,  7, zz]
11496 /// PSLLDQ : (little-endian) left byte shift
11497 /// [ zz,  0,  1,  2,  3,  4,  5,  6]
11498 /// [ zz, zz, -1, -1,  2,  3,  4, -1]
11499 /// [ zz, zz, zz, zz, zz, zz, -1,  1]
11500 /// PSRLDQ : (little-endian) right byte shift
11501 /// [  5, 6,  7, zz, zz, zz, zz, zz]
11502 /// [ -1, 5,  6,  7, zz, zz, zz, zz]
11503 /// [  1, 2, -1, -1, -1, -1, zz, zz]
11504 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
11505                                unsigned ScalarSizeInBits, ArrayRef<int> Mask,
11506                                int MaskOffset, const APInt &Zeroable,
11507                                const X86Subtarget &Subtarget) {
11508   int Size = Mask.size();
11509   unsigned SizeInBits = Size * ScalarSizeInBits;
11510 
11511   auto CheckZeros = [&](int Shift, int Scale, bool Left) {
11512     for (int i = 0; i < Size; i += Scale)
11513       for (int j = 0; j < Shift; ++j)
11514         if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
11515           return false;
11516 
11517     return true;
11518   };
11519 
11520   auto MatchShift = [&](int Shift, int Scale, bool Left) {
11521     for (int i = 0; i != Size; i += Scale) {
11522       unsigned Pos = Left ? i + Shift : i;
11523       unsigned Low = Left ? i : i + Shift;
11524       unsigned Len = Scale - Shift;
11525       if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
11526         return -1;
11527     }
11528 
11529     int ShiftEltBits = ScalarSizeInBits * Scale;
11530     bool ByteShift = ShiftEltBits > 64;
11531     Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
11532                   : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
11533     int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
11534 
11535     // Normalize the scale for byte shifts to still produce an i64 element
11536     // type.
11537     Scale = ByteShift ? Scale / 2 : Scale;
11538 
11539     // We need to round trip through the appropriate type for the shift.
11540     MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
11541     ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
11542                         : MVT::getVectorVT(ShiftSVT, Size / Scale);
11543     return (int)ShiftAmt;
11544   };
11545 
11546   // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
11547   // keep doubling the size of the integer elements up to that. We can
11548   // then shift the elements of the integer vector by whole multiples of
11549   // their width within the elements of the larger integer vector. Test each
11550   // multiple to see if we can find a match with the moved element indices
11551   // and that the shifted in elements are all zeroable.
11552   unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
11553   for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
11554     for (int Shift = 1; Shift != Scale; ++Shift)
11555       for (bool Left : {true, false})
11556         if (CheckZeros(Shift, Scale, Left)) {
11557           int ShiftAmt = MatchShift(Shift, Scale, Left);
11558           if (0 < ShiftAmt)
11559             return ShiftAmt;
11560         }
11561 
11562   // no match
11563   return -1;
11564 }
11565 
11566 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
11567                                    SDValue V2, ArrayRef<int> Mask,
11568                                    const APInt &Zeroable,
11569                                    const X86Subtarget &Subtarget,
11570                                    SelectionDAG &DAG, bool BitwiseOnly) {
11571   int Size = Mask.size();
11572   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11573 
11574   MVT ShiftVT;
11575   SDValue V = V1;
11576   unsigned Opcode;
11577 
11578   // Try to match shuffle against V1 shift.
11579   int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11580                                      Mask, 0, Zeroable, Subtarget);
11581 
11582   // If V1 failed, try to match shuffle against V2 shift.
11583   if (ShiftAmt < 0) {
11584     ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11585                                    Mask, Size, Zeroable, Subtarget);
11586     V = V2;
11587   }
11588 
11589   if (ShiftAmt < 0)
11590     return SDValue();
11591 
11592   if (BitwiseOnly && (Opcode == X86ISD::VSHLDQ || Opcode == X86ISD::VSRLDQ))
11593     return SDValue();
11594 
11595   assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
11596          "Illegal integer vector type");
11597   V = DAG.getBitcast(ShiftVT, V);
11598   V = DAG.getNode(Opcode, DL, ShiftVT, V,
11599                   DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
11600   return DAG.getBitcast(VT, V);
11601 }
11602 
11603 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
11604 // Remainder of lower half result is zero and upper half is all undef.
11605 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
11606                                 ArrayRef<int> Mask, uint64_t &BitLen,
11607                                 uint64_t &BitIdx, const APInt &Zeroable) {
11608   int Size = Mask.size();
11609   int HalfSize = Size / 2;
11610   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11611   assert(!Zeroable.isAllOnes() && "Fully zeroable shuffle mask");
11612 
11613   // Upper half must be undefined.
11614   if (!isUndefUpperHalf(Mask))
11615     return false;
11616 
11617   // Determine the extraction length from the part of the
11618   // lower half that isn't zeroable.
11619   int Len = HalfSize;
11620   for (; Len > 0; --Len)
11621     if (!Zeroable[Len - 1])
11622       break;
11623   assert(Len > 0 && "Zeroable shuffle mask");
11624 
11625   // Attempt to match first Len sequential elements from the lower half.
11626   SDValue Src;
11627   int Idx = -1;
11628   for (int i = 0; i != Len; ++i) {
11629     int M = Mask[i];
11630     if (M == SM_SentinelUndef)
11631       continue;
11632     SDValue &V = (M < Size ? V1 : V2);
11633     M = M % Size;
11634 
11635     // The extracted elements must start at a valid index and all mask
11636     // elements must be in the lower half.
11637     if (i > M || M >= HalfSize)
11638       return false;
11639 
11640     if (Idx < 0 || (Src == V && Idx == (M - i))) {
11641       Src = V;
11642       Idx = M - i;
11643       continue;
11644     }
11645     return false;
11646   }
11647 
11648   if (!Src || Idx < 0)
11649     return false;
11650 
11651   assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
11652   BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11653   BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11654   V1 = Src;
11655   return true;
11656 }
11657 
11658 // INSERTQ: Extract lowest Len elements from lower half of second source and
11659 // insert over first source, starting at Idx.
11660 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
11661 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
11662                                   ArrayRef<int> Mask, uint64_t &BitLen,
11663                                   uint64_t &BitIdx) {
11664   int Size = Mask.size();
11665   int HalfSize = Size / 2;
11666   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11667 
11668   // Upper half must be undefined.
11669   if (!isUndefUpperHalf(Mask))
11670     return false;
11671 
11672   for (int Idx = 0; Idx != HalfSize; ++Idx) {
11673     SDValue Base;
11674 
11675     // Attempt to match first source from mask before insertion point.
11676     if (isUndefInRange(Mask, 0, Idx)) {
11677       /* EMPTY */
11678     } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
11679       Base = V1;
11680     } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
11681       Base = V2;
11682     } else {
11683       continue;
11684     }
11685 
11686     // Extend the extraction length looking to match both the insertion of
11687     // the second source and the remaining elements of the first.
11688     for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
11689       SDValue Insert;
11690       int Len = Hi - Idx;
11691 
11692       // Match insertion.
11693       if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
11694         Insert = V1;
11695       } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
11696         Insert = V2;
11697       } else {
11698         continue;
11699       }
11700 
11701       // Match the remaining elements of the lower half.
11702       if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
11703         /* EMPTY */
11704       } else if ((!Base || (Base == V1)) &&
11705                  isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
11706         Base = V1;
11707       } else if ((!Base || (Base == V2)) &&
11708                  isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
11709                                             Size + Hi)) {
11710         Base = V2;
11711       } else {
11712         continue;
11713       }
11714 
11715       BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11716       BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11717       V1 = Base;
11718       V2 = Insert;
11719       return true;
11720     }
11721   }
11722 
11723   return false;
11724 }
11725 
11726 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
11727 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
11728                                      SDValue V2, ArrayRef<int> Mask,
11729                                      const APInt &Zeroable, SelectionDAG &DAG) {
11730   uint64_t BitLen, BitIdx;
11731   if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
11732     return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
11733                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
11734                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
11735 
11736   if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
11737     return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
11738                        V2 ? V2 : DAG.getUNDEF(VT),
11739                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
11740                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
11741 
11742   return SDValue();
11743 }
11744 
11745 /// Lower a vector shuffle as a zero or any extension.
11746 ///
11747 /// Given a specific number of elements, element bit width, and extension
11748 /// stride, produce either a zero or any extension based on the available
11749 /// features of the subtarget. The extended elements are consecutive and
11750 /// begin and can start from an offsetted element index in the input; to
11751 /// avoid excess shuffling the offset must either being in the bottom lane
11752 /// or at the start of a higher lane. All extended elements must be from
11753 /// the same lane.
11754 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
11755     const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
11756     ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11757   assert(Scale > 1 && "Need a scale to extend.");
11758   int EltBits = VT.getScalarSizeInBits();
11759   int NumElements = VT.getVectorNumElements();
11760   int NumEltsPerLane = 128 / EltBits;
11761   int OffsetLane = Offset / NumEltsPerLane;
11762   assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
11763          "Only 8, 16, and 32 bit elements can be extended.");
11764   assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
11765   assert(0 <= Offset && "Extension offset must be positive.");
11766   assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
11767          "Extension offset must be in the first lane or start an upper lane.");
11768 
11769   // Check that an index is in same lane as the base offset.
11770   auto SafeOffset = [&](int Idx) {
11771     return OffsetLane == (Idx / NumEltsPerLane);
11772   };
11773 
11774   // Shift along an input so that the offset base moves to the first element.
11775   auto ShuffleOffset = [&](SDValue V) {
11776     if (!Offset)
11777       return V;
11778 
11779     SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
11780     for (int i = 0; i * Scale < NumElements; ++i) {
11781       int SrcIdx = i + Offset;
11782       ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
11783     }
11784     return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
11785   };
11786 
11787   // Found a valid a/zext mask! Try various lowering strategies based on the
11788   // input type and available ISA extensions.
11789   if (Subtarget.hasSSE41()) {
11790     // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
11791     // PUNPCK will catch this in a later shuffle match.
11792     if (Offset && Scale == 2 && VT.is128BitVector())
11793       return SDValue();
11794     MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
11795                                  NumElements / Scale);
11796     InputV = DAG.getBitcast(VT, InputV);
11797     InputV = ShuffleOffset(InputV);
11798     InputV = getEXTEND_VECTOR_INREG(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND,
11799                                     DL, ExtVT, InputV, DAG);
11800     return DAG.getBitcast(VT, InputV);
11801   }
11802 
11803   assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
11804   InputV = DAG.getBitcast(VT, InputV);
11805 
11806   // For any extends we can cheat for larger element sizes and use shuffle
11807   // instructions that can fold with a load and/or copy.
11808   if (AnyExt && EltBits == 32) {
11809     int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
11810                          -1};
11811     return DAG.getBitcast(
11812         VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
11813                         DAG.getBitcast(MVT::v4i32, InputV),
11814                         getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
11815   }
11816   if (AnyExt && EltBits == 16 && Scale > 2) {
11817     int PSHUFDMask[4] = {Offset / 2, -1,
11818                          SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
11819     InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
11820                          DAG.getBitcast(MVT::v4i32, InputV),
11821                          getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
11822     int PSHUFWMask[4] = {1, -1, -1, -1};
11823     unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
11824     return DAG.getBitcast(
11825         VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
11826                         DAG.getBitcast(MVT::v8i16, InputV),
11827                         getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
11828   }
11829 
11830   // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
11831   // to 64-bits.
11832   if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
11833     assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
11834     assert(VT.is128BitVector() && "Unexpected vector width!");
11835 
11836     int LoIdx = Offset * EltBits;
11837     SDValue Lo = DAG.getBitcast(
11838         MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
11839                                 DAG.getTargetConstant(EltBits, DL, MVT::i8),
11840                                 DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
11841 
11842     if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
11843       return DAG.getBitcast(VT, Lo);
11844 
11845     int HiIdx = (Offset + 1) * EltBits;
11846     SDValue Hi = DAG.getBitcast(
11847         MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
11848                                 DAG.getTargetConstant(EltBits, DL, MVT::i8),
11849                                 DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
11850     return DAG.getBitcast(VT,
11851                           DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
11852   }
11853 
11854   // If this would require more than 2 unpack instructions to expand, use
11855   // pshufb when available. We can only use more than 2 unpack instructions
11856   // when zero extending i8 elements which also makes it easier to use pshufb.
11857   if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
11858     assert(NumElements == 16 && "Unexpected byte vector width!");
11859     SDValue PSHUFBMask[16];
11860     for (int i = 0; i < 16; ++i) {
11861       int Idx = Offset + (i / Scale);
11862       if ((i % Scale == 0 && SafeOffset(Idx))) {
11863         PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
11864         continue;
11865       }
11866       PSHUFBMask[i] =
11867           AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
11868     }
11869     InputV = DAG.getBitcast(MVT::v16i8, InputV);
11870     return DAG.getBitcast(
11871         VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
11872                         DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
11873   }
11874 
11875   // If we are extending from an offset, ensure we start on a boundary that
11876   // we can unpack from.
11877   int AlignToUnpack = Offset % (NumElements / Scale);
11878   if (AlignToUnpack) {
11879     SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
11880     for (int i = AlignToUnpack; i < NumElements; ++i)
11881       ShMask[i - AlignToUnpack] = i;
11882     InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
11883     Offset -= AlignToUnpack;
11884   }
11885 
11886   // Otherwise emit a sequence of unpacks.
11887   do {
11888     unsigned UnpackLoHi = X86ISD::UNPCKL;
11889     if (Offset >= (NumElements / 2)) {
11890       UnpackLoHi = X86ISD::UNPCKH;
11891       Offset -= (NumElements / 2);
11892     }
11893 
11894     MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
11895     SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
11896                          : getZeroVector(InputVT, Subtarget, DAG, DL);
11897     InputV = DAG.getBitcast(InputVT, InputV);
11898     InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
11899     Scale /= 2;
11900     EltBits *= 2;
11901     NumElements /= 2;
11902   } while (Scale > 1);
11903   return DAG.getBitcast(VT, InputV);
11904 }
11905 
11906 /// Try to lower a vector shuffle as a zero extension on any microarch.
11907 ///
11908 /// This routine will try to do everything in its power to cleverly lower
11909 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
11910 /// check for the profitability of this lowering,  it tries to aggressively
11911 /// match this pattern. It will use all of the micro-architectural details it
11912 /// can to emit an efficient lowering. It handles both blends with all-zero
11913 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
11914 /// masking out later).
11915 ///
11916 /// The reason we have dedicated lowering for zext-style shuffles is that they
11917 /// are both incredibly common and often quite performance sensitive.
11918 static SDValue lowerShuffleAsZeroOrAnyExtend(
11919     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11920     const APInt &Zeroable, const X86Subtarget &Subtarget,
11921     SelectionDAG &DAG) {
11922   int Bits = VT.getSizeInBits();
11923   int NumLanes = Bits / 128;
11924   int NumElements = VT.getVectorNumElements();
11925   int NumEltsPerLane = NumElements / NumLanes;
11926   assert(VT.getScalarSizeInBits() <= 32 &&
11927          "Exceeds 32-bit integer zero extension limit");
11928   assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
11929 
11930   // Define a helper function to check a particular ext-scale and lower to it if
11931   // valid.
11932   auto Lower = [&](int Scale) -> SDValue {
11933     SDValue InputV;
11934     bool AnyExt = true;
11935     int Offset = 0;
11936     int Matches = 0;
11937     for (int i = 0; i < NumElements; ++i) {
11938       int M = Mask[i];
11939       if (M < 0)
11940         continue; // Valid anywhere but doesn't tell us anything.
11941       if (i % Scale != 0) {
11942         // Each of the extended elements need to be zeroable.
11943         if (!Zeroable[i])
11944           return SDValue();
11945 
11946         // We no longer are in the anyext case.
11947         AnyExt = false;
11948         continue;
11949       }
11950 
11951       // Each of the base elements needs to be consecutive indices into the
11952       // same input vector.
11953       SDValue V = M < NumElements ? V1 : V2;
11954       M = M % NumElements;
11955       if (!InputV) {
11956         InputV = V;
11957         Offset = M - (i / Scale);
11958       } else if (InputV != V)
11959         return SDValue(); // Flip-flopping inputs.
11960 
11961       // Offset must start in the lowest 128-bit lane or at the start of an
11962       // upper lane.
11963       // FIXME: Is it ever worth allowing a negative base offset?
11964       if (!((0 <= Offset && Offset < NumEltsPerLane) ||
11965             (Offset % NumEltsPerLane) == 0))
11966         return SDValue();
11967 
11968       // If we are offsetting, all referenced entries must come from the same
11969       // lane.
11970       if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
11971         return SDValue();
11972 
11973       if ((M % NumElements) != (Offset + (i / Scale)))
11974         return SDValue(); // Non-consecutive strided elements.
11975       Matches++;
11976     }
11977 
11978     // If we fail to find an input, we have a zero-shuffle which should always
11979     // have already been handled.
11980     // FIXME: Maybe handle this here in case during blending we end up with one?
11981     if (!InputV)
11982       return SDValue();
11983 
11984     // If we are offsetting, don't extend if we only match a single input, we
11985     // can always do better by using a basic PSHUF or PUNPCK.
11986     if (Offset != 0 && Matches < 2)
11987       return SDValue();
11988 
11989     return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
11990                                                  InputV, Mask, Subtarget, DAG);
11991   };
11992 
11993   // The widest scale possible for extending is to a 64-bit integer.
11994   assert(Bits % 64 == 0 &&
11995          "The number of bits in a vector must be divisible by 64 on x86!");
11996   int NumExtElements = Bits / 64;
11997 
11998   // Each iteration, try extending the elements half as much, but into twice as
11999   // many elements.
12000   for (; NumExtElements < NumElements; NumExtElements *= 2) {
12001     assert(NumElements % NumExtElements == 0 &&
12002            "The input vector size must be divisible by the extended size.");
12003     if (SDValue V = Lower(NumElements / NumExtElements))
12004       return V;
12005   }
12006 
12007   // General extends failed, but 128-bit vectors may be able to use MOVQ.
12008   if (Bits != 128)
12009     return SDValue();
12010 
12011   // Returns one of the source operands if the shuffle can be reduced to a
12012   // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
12013   auto CanZExtLowHalf = [&]() {
12014     for (int i = NumElements / 2; i != NumElements; ++i)
12015       if (!Zeroable[i])
12016         return SDValue();
12017     if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
12018       return V1;
12019     if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
12020       return V2;
12021     return SDValue();
12022   };
12023 
12024   if (SDValue V = CanZExtLowHalf()) {
12025     V = DAG.getBitcast(MVT::v2i64, V);
12026     V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
12027     return DAG.getBitcast(VT, V);
12028   }
12029 
12030   // No viable ext lowering found.
12031   return SDValue();
12032 }
12033 
12034 /// Try to get a scalar value for a specific element of a vector.
12035 ///
12036 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
12037 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
12038                                               SelectionDAG &DAG) {
12039   MVT VT = V.getSimpleValueType();
12040   MVT EltVT = VT.getVectorElementType();
12041   V = peekThroughBitcasts(V);
12042 
12043   // If the bitcasts shift the element size, we can't extract an equivalent
12044   // element from it.
12045   MVT NewVT = V.getSimpleValueType();
12046   if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
12047     return SDValue();
12048 
12049   if (V.getOpcode() == ISD::BUILD_VECTOR ||
12050       (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
12051     // Ensure the scalar operand is the same size as the destination.
12052     // FIXME: Add support for scalar truncation where possible.
12053     SDValue S = V.getOperand(Idx);
12054     if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
12055       return DAG.getBitcast(EltVT, S);
12056   }
12057 
12058   return SDValue();
12059 }
12060 
12061 /// Helper to test for a load that can be folded with x86 shuffles.
12062 ///
12063 /// This is particularly important because the set of instructions varies
12064 /// significantly based on whether the operand is a load or not.
12065 static bool isShuffleFoldableLoad(SDValue V) {
12066   return V->hasOneUse() &&
12067          ISD::isNON_EXTLoad(peekThroughOneUseBitcasts(V).getNode());
12068 }
12069 
12070 template<typename T>
12071 static bool isSoftF16(T VT, const X86Subtarget &Subtarget) {
12072   T EltVT = VT.getScalarType();
12073   return EltVT == MVT::bf16 || (EltVT == MVT::f16 && !Subtarget.hasFP16());
12074 }
12075 
12076 /// Try to lower insertion of a single element into a zero vector.
12077 ///
12078 /// This is a common pattern that we have especially efficient patterns to lower
12079 /// across all subtarget feature sets.
12080 static SDValue lowerShuffleAsElementInsertion(
12081     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12082     const APInt &Zeroable, const X86Subtarget &Subtarget,
12083     SelectionDAG &DAG) {
12084   MVT ExtVT = VT;
12085   MVT EltVT = VT.getVectorElementType();
12086   unsigned NumElts = VT.getVectorNumElements();
12087   unsigned EltBits = VT.getScalarSizeInBits();
12088 
12089   if (isSoftF16(EltVT, Subtarget))
12090     return SDValue();
12091 
12092   int V2Index =
12093       find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
12094       Mask.begin();
12095   bool IsV1Constant = getTargetConstantFromNode(V1) != nullptr;
12096   bool IsV1Zeroable = true;
12097   for (int i = 0, Size = Mask.size(); i < Size; ++i)
12098     if (i != V2Index && !Zeroable[i]) {
12099       IsV1Zeroable = false;
12100       break;
12101     }
12102 
12103   // Bail if a non-zero V1 isn't used in place.
12104   if (!IsV1Zeroable) {
12105     SmallVector<int, 8> V1Mask(Mask);
12106     V1Mask[V2Index] = -1;
12107     if (!isNoopShuffleMask(V1Mask))
12108       return SDValue();
12109   }
12110 
12111   // Check for a single input from a SCALAR_TO_VECTOR node.
12112   // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
12113   // all the smarts here sunk into that routine. However, the current
12114   // lowering of BUILD_VECTOR makes that nearly impossible until the old
12115   // vector shuffle lowering is dead.
12116   SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
12117                                                DAG);
12118   if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
12119     // We need to zext the scalar if it is smaller than an i32.
12120     V2S = DAG.getBitcast(EltVT, V2S);
12121     if (EltVT == MVT::i8 || (EltVT == MVT::i16 && !Subtarget.hasFP16())) {
12122       // Using zext to expand a narrow element won't work for non-zero
12123       // insertions. But we can use a masked constant vector if we're
12124       // inserting V2 into the bottom of V1.
12125       if (!IsV1Zeroable && !(IsV1Constant && V2Index == 0))
12126         return SDValue();
12127 
12128       // Zero-extend directly to i32.
12129       ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
12130       V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
12131 
12132       // If we're inserting into a constant, mask off the inserted index
12133       // and OR with the zero-extended scalar.
12134       if (!IsV1Zeroable) {
12135         SmallVector<APInt> Bits(NumElts, APInt::getAllOnes(EltBits));
12136         Bits[V2Index] = APInt::getZero(EltBits);
12137         SDValue BitMask = getConstVector(Bits, VT, DAG, DL);
12138         V1 = DAG.getNode(ISD::AND, DL, VT, V1, BitMask);
12139         V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
12140         V2 = DAG.getBitcast(VT, DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2));
12141         return DAG.getNode(ISD::OR, DL, VT, V1, V2);
12142       }
12143     }
12144     V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
12145   } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
12146              EltVT == MVT::i16) {
12147     // Either not inserting from the low element of the input or the input
12148     // element size is too small to use VZEXT_MOVL to clear the high bits.
12149     return SDValue();
12150   }
12151 
12152   if (!IsV1Zeroable) {
12153     // If V1 can't be treated as a zero vector we have fewer options to lower
12154     // this. We can't support integer vectors or non-zero targets cheaply.
12155     assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
12156     if (!VT.isFloatingPoint() || V2Index != 0)
12157       return SDValue();
12158     if (!VT.is128BitVector())
12159       return SDValue();
12160 
12161     // Otherwise, use MOVSD, MOVSS or MOVSH.
12162     unsigned MovOpc = 0;
12163     if (EltVT == MVT::f16)
12164       MovOpc = X86ISD::MOVSH;
12165     else if (EltVT == MVT::f32)
12166       MovOpc = X86ISD::MOVSS;
12167     else if (EltVT == MVT::f64)
12168       MovOpc = X86ISD::MOVSD;
12169     else
12170       llvm_unreachable("Unsupported floating point element type to handle!");
12171     return DAG.getNode(MovOpc, DL, ExtVT, V1, V2);
12172   }
12173 
12174   // This lowering only works for the low element with floating point vectors.
12175   if (VT.isFloatingPoint() && V2Index != 0)
12176     return SDValue();
12177 
12178   V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
12179   if (ExtVT != VT)
12180     V2 = DAG.getBitcast(VT, V2);
12181 
12182   if (V2Index != 0) {
12183     // If we have 4 or fewer lanes we can cheaply shuffle the element into
12184     // the desired position. Otherwise it is more efficient to do a vector
12185     // shift left. We know that we can do a vector shift left because all
12186     // the inputs are zero.
12187     if (VT.isFloatingPoint() || NumElts <= 4) {
12188       SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
12189       V2Shuffle[V2Index] = 0;
12190       V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
12191     } else {
12192       V2 = DAG.getBitcast(MVT::v16i8, V2);
12193       V2 = DAG.getNode(
12194           X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
12195           DAG.getTargetConstant(V2Index * EltBits / 8, DL, MVT::i8));
12196       V2 = DAG.getBitcast(VT, V2);
12197     }
12198   }
12199   return V2;
12200 }
12201 
12202 /// Try to lower broadcast of a single - truncated - integer element,
12203 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
12204 ///
12205 /// This assumes we have AVX2.
12206 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
12207                                             int BroadcastIdx,
12208                                             const X86Subtarget &Subtarget,
12209                                             SelectionDAG &DAG) {
12210   assert(Subtarget.hasAVX2() &&
12211          "We can only lower integer broadcasts with AVX2!");
12212 
12213   MVT EltVT = VT.getVectorElementType();
12214   MVT V0VT = V0.getSimpleValueType();
12215 
12216   assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
12217   assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
12218 
12219   MVT V0EltVT = V0VT.getVectorElementType();
12220   if (!V0EltVT.isInteger())
12221     return SDValue();
12222 
12223   const unsigned EltSize = EltVT.getSizeInBits();
12224   const unsigned V0EltSize = V0EltVT.getSizeInBits();
12225 
12226   // This is only a truncation if the original element type is larger.
12227   if (V0EltSize <= EltSize)
12228     return SDValue();
12229 
12230   assert(((V0EltSize % EltSize) == 0) &&
12231          "Scalar type sizes must all be powers of 2 on x86!");
12232 
12233   const unsigned V0Opc = V0.getOpcode();
12234   const unsigned Scale = V0EltSize / EltSize;
12235   const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
12236 
12237   if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
12238       V0Opc != ISD::BUILD_VECTOR)
12239     return SDValue();
12240 
12241   SDValue Scalar = V0.getOperand(V0BroadcastIdx);
12242 
12243   // If we're extracting non-least-significant bits, shift so we can truncate.
12244   // Hopefully, we can fold away the trunc/srl/load into the broadcast.
12245   // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
12246   // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
12247   if (const int OffsetIdx = BroadcastIdx % Scale)
12248     Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
12249                          DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
12250 
12251   return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
12252                      DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
12253 }
12254 
12255 /// Test whether this can be lowered with a single SHUFPS instruction.
12256 ///
12257 /// This is used to disable more specialized lowerings when the shufps lowering
12258 /// will happen to be efficient.
12259 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
12260   // This routine only handles 128-bit shufps.
12261   assert(Mask.size() == 4 && "Unsupported mask size!");
12262   assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
12263   assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
12264   assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
12265   assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
12266 
12267   // To lower with a single SHUFPS we need to have the low half and high half
12268   // each requiring a single input.
12269   if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
12270     return false;
12271   if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
12272     return false;
12273 
12274   return true;
12275 }
12276 
12277 /// Test whether the specified input (0 or 1) is in-place blended by the
12278 /// given mask.
12279 ///
12280 /// This returns true if the elements from a particular input are already in the
12281 /// slot required by the given mask and require no permutation.
12282 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
12283   assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
12284   int Size = Mask.size();
12285   for (int i = 0; i < Size; ++i)
12286     if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
12287       return false;
12288 
12289   return true;
12290 }
12291 
12292 /// If we are extracting two 128-bit halves of a vector and shuffling the
12293 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
12294 /// multi-shuffle lowering.
12295 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
12296                                              SDValue N1, ArrayRef<int> Mask,
12297                                              SelectionDAG &DAG) {
12298   MVT VT = N0.getSimpleValueType();
12299   assert((VT.is128BitVector() &&
12300           (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
12301          "VPERM* family of shuffles requires 32-bit or 64-bit elements");
12302 
12303   // Check that both sources are extracts of the same source vector.
12304   if (N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12305       N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12306       N0.getOperand(0) != N1.getOperand(0) ||
12307       !N0.hasOneUse() || !N1.hasOneUse())
12308     return SDValue();
12309 
12310   SDValue WideVec = N0.getOperand(0);
12311   MVT WideVT = WideVec.getSimpleValueType();
12312   if (!WideVT.is256BitVector())
12313     return SDValue();
12314 
12315   // Match extracts of each half of the wide source vector. Commute the shuffle
12316   // if the extract of the low half is N1.
12317   unsigned NumElts = VT.getVectorNumElements();
12318   SmallVector<int, 4> NewMask(Mask);
12319   const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
12320   const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
12321   if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
12322     ShuffleVectorSDNode::commuteMask(NewMask);
12323   else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
12324     return SDValue();
12325 
12326   // Final bailout: if the mask is simple, we are better off using an extract
12327   // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
12328   // because that avoids a constant load from memory.
12329   if (NumElts == 4 &&
12330       (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask, DAG)))
12331     return SDValue();
12332 
12333   // Extend the shuffle mask with undef elements.
12334   NewMask.append(NumElts, -1);
12335 
12336   // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
12337   SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
12338                                       NewMask);
12339   // This is free: ymm -> xmm.
12340   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
12341                      DAG.getIntPtrConstant(0, DL));
12342 }
12343 
12344 /// Try to lower broadcast of a single element.
12345 ///
12346 /// For convenience, this code also bundles all of the subtarget feature set
12347 /// filtering. While a little annoying to re-dispatch on type here, there isn't
12348 /// a convenient way to factor it out.
12349 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
12350                                        SDValue V2, ArrayRef<int> Mask,
12351                                        const X86Subtarget &Subtarget,
12352                                        SelectionDAG &DAG) {
12353   MVT EltVT = VT.getVectorElementType();
12354   if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
12355         (Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
12356         (Subtarget.hasAVX2() && (VT.isInteger() || EltVT == MVT::f16))))
12357     return SDValue();
12358 
12359   // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
12360   // we can only broadcast from a register with AVX2.
12361   unsigned NumEltBits = VT.getScalarSizeInBits();
12362   unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
12363                         ? X86ISD::MOVDDUP
12364                         : X86ISD::VBROADCAST;
12365   bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
12366 
12367   // Check that the mask is a broadcast.
12368   int BroadcastIdx = getSplatIndex(Mask);
12369   if (BroadcastIdx < 0)
12370     return SDValue();
12371   assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
12372                                             "a sorted mask where the broadcast "
12373                                             "comes from V1.");
12374 
12375   // Go up the chain of (vector) values to find a scalar load that we can
12376   // combine with the broadcast.
12377   // TODO: Combine this logic with findEltLoadSrc() used by
12378   //       EltsFromConsecutiveLoads().
12379   int BitOffset = BroadcastIdx * NumEltBits;
12380   SDValue V = V1;
12381   for (;;) {
12382     switch (V.getOpcode()) {
12383     case ISD::BITCAST: {
12384       V = V.getOperand(0);
12385       continue;
12386     }
12387     case ISD::CONCAT_VECTORS: {
12388       int OpBitWidth = V.getOperand(0).getValueSizeInBits();
12389       int OpIdx = BitOffset / OpBitWidth;
12390       V = V.getOperand(OpIdx);
12391       BitOffset %= OpBitWidth;
12392       continue;
12393     }
12394     case ISD::EXTRACT_SUBVECTOR: {
12395       // The extraction index adds to the existing offset.
12396       unsigned EltBitWidth = V.getScalarValueSizeInBits();
12397       unsigned Idx = V.getConstantOperandVal(1);
12398       unsigned BeginOffset = Idx * EltBitWidth;
12399       BitOffset += BeginOffset;
12400       V = V.getOperand(0);
12401       continue;
12402     }
12403     case ISD::INSERT_SUBVECTOR: {
12404       SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
12405       int EltBitWidth = VOuter.getScalarValueSizeInBits();
12406       int Idx = (int)V.getConstantOperandVal(2);
12407       int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
12408       int BeginOffset = Idx * EltBitWidth;
12409       int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
12410       if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
12411         BitOffset -= BeginOffset;
12412         V = VInner;
12413       } else {
12414         V = VOuter;
12415       }
12416       continue;
12417     }
12418     }
12419     break;
12420   }
12421   assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
12422   BroadcastIdx = BitOffset / NumEltBits;
12423 
12424   // Do we need to bitcast the source to retrieve the original broadcast index?
12425   bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
12426 
12427   // Check if this is a broadcast of a scalar. We special case lowering
12428   // for scalars so that we can more effectively fold with loads.
12429   // If the original value has a larger element type than the shuffle, the
12430   // broadcast element is in essence truncated. Make that explicit to ease
12431   // folding.
12432   if (BitCastSrc && VT.isInteger())
12433     if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
12434             DL, VT, V, BroadcastIdx, Subtarget, DAG))
12435       return TruncBroadcast;
12436 
12437   // Also check the simpler case, where we can directly reuse the scalar.
12438   if (!BitCastSrc &&
12439       ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
12440        (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
12441     V = V.getOperand(BroadcastIdx);
12442 
12443     // If we can't broadcast from a register, check that the input is a load.
12444     if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
12445       return SDValue();
12446   } else if (ISD::isNormalLoad(V.getNode()) &&
12447              cast<LoadSDNode>(V)->isSimple()) {
12448     // We do not check for one-use of the vector load because a broadcast load
12449     // is expected to be a win for code size, register pressure, and possibly
12450     // uops even if the original vector load is not eliminated.
12451 
12452     // Reduce the vector load and shuffle to a broadcasted scalar load.
12453     LoadSDNode *Ld = cast<LoadSDNode>(V);
12454     SDValue BaseAddr = Ld->getOperand(1);
12455     MVT SVT = VT.getScalarType();
12456     unsigned Offset = BroadcastIdx * SVT.getStoreSize();
12457     assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
12458     SDValue NewAddr =
12459         DAG.getMemBasePlusOffset(BaseAddr, TypeSize::getFixed(Offset), DL);
12460 
12461     // Directly form VBROADCAST_LOAD if we're using VBROADCAST opcode rather
12462     // than MOVDDUP.
12463     // FIXME: Should we add VBROADCAST_LOAD isel patterns for pre-AVX?
12464     if (Opcode == X86ISD::VBROADCAST) {
12465       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
12466       SDValue Ops[] = {Ld->getChain(), NewAddr};
12467       V = DAG.getMemIntrinsicNode(
12468           X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SVT,
12469           DAG.getMachineFunction().getMachineMemOperand(
12470               Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12471       DAG.makeEquivalentMemoryOrdering(Ld, V);
12472       return DAG.getBitcast(VT, V);
12473     }
12474     assert(SVT == MVT::f64 && "Unexpected VT!");
12475     V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
12476                     DAG.getMachineFunction().getMachineMemOperand(
12477                         Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12478     DAG.makeEquivalentMemoryOrdering(Ld, V);
12479   } else if (!BroadcastFromReg) {
12480     // We can't broadcast from a vector register.
12481     return SDValue();
12482   } else if (BitOffset != 0) {
12483     // We can only broadcast from the zero-element of a vector register,
12484     // but it can be advantageous to broadcast from the zero-element of a
12485     // subvector.
12486     if (!VT.is256BitVector() && !VT.is512BitVector())
12487       return SDValue();
12488 
12489     // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
12490     if (VT == MVT::v4f64 || VT == MVT::v4i64)
12491       return SDValue();
12492 
12493     // Only broadcast the zero-element of a 128-bit subvector.
12494     if ((BitOffset % 128) != 0)
12495       return SDValue();
12496 
12497     assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
12498            "Unexpected bit-offset");
12499     assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
12500            "Unexpected vector size");
12501     unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
12502     V = extract128BitVector(V, ExtractIdx, DAG, DL);
12503   }
12504 
12505   // On AVX we can use VBROADCAST directly for scalar sources.
12506   if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector()) {
12507     V = DAG.getBitcast(MVT::f64, V);
12508     if (Subtarget.hasAVX()) {
12509       V = DAG.getNode(X86ISD::VBROADCAST, DL, MVT::v2f64, V);
12510       return DAG.getBitcast(VT, V);
12511     }
12512     V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V);
12513   }
12514 
12515   // If this is a scalar, do the broadcast on this type and bitcast.
12516   if (!V.getValueType().isVector()) {
12517     assert(V.getScalarValueSizeInBits() == NumEltBits &&
12518            "Unexpected scalar size");
12519     MVT BroadcastVT = MVT::getVectorVT(V.getSimpleValueType(),
12520                                        VT.getVectorNumElements());
12521     return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
12522   }
12523 
12524   // We only support broadcasting from 128-bit vectors to minimize the
12525   // number of patterns we need to deal with in isel. So extract down to
12526   // 128-bits, removing as many bitcasts as possible.
12527   if (V.getValueSizeInBits() > 128)
12528     V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
12529 
12530   // Otherwise cast V to a vector with the same element type as VT, but
12531   // possibly narrower than VT. Then perform the broadcast.
12532   unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
12533   MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(), NumSrcElts);
12534   return DAG.getNode(Opcode, DL, VT, DAG.getBitcast(CastVT, V));
12535 }
12536 
12537 // Check for whether we can use INSERTPS to perform the shuffle. We only use
12538 // INSERTPS when the V1 elements are already in the correct locations
12539 // because otherwise we can just always use two SHUFPS instructions which
12540 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
12541 // perform INSERTPS if a single V1 element is out of place and all V2
12542 // elements are zeroable.
12543 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
12544                                    unsigned &InsertPSMask,
12545                                    const APInt &Zeroable,
12546                                    ArrayRef<int> Mask, SelectionDAG &DAG) {
12547   assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
12548   assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
12549   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12550 
12551   // Attempt to match INSERTPS with one element from VA or VB being
12552   // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
12553   // are updated.
12554   auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
12555                              ArrayRef<int> CandidateMask) {
12556     unsigned ZMask = 0;
12557     int VADstIndex = -1;
12558     int VBDstIndex = -1;
12559     bool VAUsedInPlace = false;
12560 
12561     for (int i = 0; i < 4; ++i) {
12562       // Synthesize a zero mask from the zeroable elements (includes undefs).
12563       if (Zeroable[i]) {
12564         ZMask |= 1 << i;
12565         continue;
12566       }
12567 
12568       // Flag if we use any VA inputs in place.
12569       if (i == CandidateMask[i]) {
12570         VAUsedInPlace = true;
12571         continue;
12572       }
12573 
12574       // We can only insert a single non-zeroable element.
12575       if (VADstIndex >= 0 || VBDstIndex >= 0)
12576         return false;
12577 
12578       if (CandidateMask[i] < 4) {
12579         // VA input out of place for insertion.
12580         VADstIndex = i;
12581       } else {
12582         // VB input for insertion.
12583         VBDstIndex = i;
12584       }
12585     }
12586 
12587     // Don't bother if we have no (non-zeroable) element for insertion.
12588     if (VADstIndex < 0 && VBDstIndex < 0)
12589       return false;
12590 
12591     // Determine element insertion src/dst indices. The src index is from the
12592     // start of the inserted vector, not the start of the concatenated vector.
12593     unsigned VBSrcIndex = 0;
12594     if (VADstIndex >= 0) {
12595       // If we have a VA input out of place, we use VA as the V2 element
12596       // insertion and don't use the original V2 at all.
12597       VBSrcIndex = CandidateMask[VADstIndex];
12598       VBDstIndex = VADstIndex;
12599       VB = VA;
12600     } else {
12601       VBSrcIndex = CandidateMask[VBDstIndex] - 4;
12602     }
12603 
12604     // If no V1 inputs are used in place, then the result is created only from
12605     // the zero mask and the V2 insertion - so remove V1 dependency.
12606     if (!VAUsedInPlace)
12607       VA = DAG.getUNDEF(MVT::v4f32);
12608 
12609     // Update V1, V2 and InsertPSMask accordingly.
12610     V1 = VA;
12611     V2 = VB;
12612 
12613     // Insert the V2 element into the desired position.
12614     InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
12615     assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
12616     return true;
12617   };
12618 
12619   if (matchAsInsertPS(V1, V2, Mask))
12620     return true;
12621 
12622   // Commute and try again.
12623   SmallVector<int, 4> CommutedMask(Mask);
12624   ShuffleVectorSDNode::commuteMask(CommutedMask);
12625   if (matchAsInsertPS(V2, V1, CommutedMask))
12626     return true;
12627 
12628   return false;
12629 }
12630 
12631 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
12632                                       ArrayRef<int> Mask, const APInt &Zeroable,
12633                                       SelectionDAG &DAG) {
12634   assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12635   assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12636 
12637   // Attempt to match the insertps pattern.
12638   unsigned InsertPSMask = 0;
12639   if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
12640     return SDValue();
12641 
12642   // Insert the V2 element into the desired position.
12643   return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
12644                      DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
12645 }
12646 
12647 /// Handle lowering of 2-lane 64-bit floating point shuffles.
12648 ///
12649 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
12650 /// support for floating point shuffles but not integer shuffles. These
12651 /// instructions will incur a domain crossing penalty on some chips though so
12652 /// it is better to avoid lowering through this for integer vectors where
12653 /// possible.
12654 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12655                                  const APInt &Zeroable, SDValue V1, SDValue V2,
12656                                  const X86Subtarget &Subtarget,
12657                                  SelectionDAG &DAG) {
12658   assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
12659   assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
12660   assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
12661 
12662   if (V2.isUndef()) {
12663     // Check for being able to broadcast a single element.
12664     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
12665                                                     Mask, Subtarget, DAG))
12666       return Broadcast;
12667 
12668     // Straight shuffle of a single input vector. Simulate this by using the
12669     // single input as both of the "inputs" to this instruction..
12670     unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
12671 
12672     if (Subtarget.hasAVX()) {
12673       // If we have AVX, we can use VPERMILPS which will allow folding a load
12674       // into the shuffle.
12675       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
12676                          DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12677     }
12678 
12679     return DAG.getNode(
12680         X86ISD::SHUFP, DL, MVT::v2f64,
12681         Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12682         Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12683         DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12684   }
12685   assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
12686   assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
12687   assert(Mask[0] < 2 && "We sort V1 to be the first input.");
12688   assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
12689 
12690   if (Subtarget.hasAVX2())
12691     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12692       return Extract;
12693 
12694   // When loading a scalar and then shuffling it into a vector we can often do
12695   // the insertion cheaply.
12696   if (SDValue Insertion = lowerShuffleAsElementInsertion(
12697           DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
12698     return Insertion;
12699   // Try inverting the insertion since for v2 masks it is easy to do and we
12700   // can't reliably sort the mask one way or the other.
12701   int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
12702                         Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
12703   if (SDValue Insertion = lowerShuffleAsElementInsertion(
12704           DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
12705     return Insertion;
12706 
12707   // Try to use one of the special instruction patterns to handle two common
12708   // blend patterns if a zero-blend above didn't work.
12709   if (isShuffleEquivalent(Mask, {0, 3}, V1, V2) ||
12710       isShuffleEquivalent(Mask, {1, 3}, V1, V2))
12711     if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
12712       // We can either use a special instruction to load over the low double or
12713       // to move just the low double.
12714       return DAG.getNode(
12715           X86ISD::MOVSD, DL, MVT::v2f64, V2,
12716           DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
12717 
12718   if (Subtarget.hasSSE41())
12719     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
12720                                             Zeroable, Subtarget, DAG))
12721       return Blend;
12722 
12723   // Use dedicated unpack instructions for masks that match their pattern.
12724   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
12725     return V;
12726 
12727   unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
12728   return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
12729                      DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12730 }
12731 
12732 /// Handle lowering of 2-lane 64-bit integer shuffles.
12733 ///
12734 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
12735 /// the integer unit to minimize domain crossing penalties. However, for blends
12736 /// it falls back to the floating point shuffle operation with appropriate bit
12737 /// casting.
12738 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12739                                  const APInt &Zeroable, SDValue V1, SDValue V2,
12740                                  const X86Subtarget &Subtarget,
12741                                  SelectionDAG &DAG) {
12742   assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
12743   assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
12744   assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
12745 
12746   if (V2.isUndef()) {
12747     // Check for being able to broadcast a single element.
12748     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
12749                                                     Mask, Subtarget, DAG))
12750       return Broadcast;
12751 
12752     // Straight shuffle of a single input vector. For everything from SSE2
12753     // onward this has a single fast instruction with no scary immediates.
12754     // We have to map the mask as it is actually a v4i32 shuffle instruction.
12755     V1 = DAG.getBitcast(MVT::v4i32, V1);
12756     int WidenedMask[4] = {Mask[0] < 0 ? -1 : (Mask[0] * 2),
12757                           Mask[0] < 0 ? -1 : ((Mask[0] * 2) + 1),
12758                           Mask[1] < 0 ? -1 : (Mask[1] * 2),
12759                           Mask[1] < 0 ? -1 : ((Mask[1] * 2) + 1)};
12760     return DAG.getBitcast(
12761         MVT::v2i64,
12762         DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
12763                     getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
12764   }
12765   assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
12766   assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
12767   assert(Mask[0] < 2 && "We sort V1 to be the first input.");
12768   assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
12769 
12770   if (Subtarget.hasAVX2())
12771     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12772       return Extract;
12773 
12774   // Try to use shift instructions.
12775   if (SDValue Shift =
12776           lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget,
12777                               DAG, /*BitwiseOnly*/ false))
12778     return Shift;
12779 
12780   // When loading a scalar and then shuffling it into a vector we can often do
12781   // the insertion cheaply.
12782   if (SDValue Insertion = lowerShuffleAsElementInsertion(
12783           DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
12784     return Insertion;
12785   // Try inverting the insertion since for v2 masks it is easy to do and we
12786   // can't reliably sort the mask one way or the other.
12787   int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
12788   if (SDValue Insertion = lowerShuffleAsElementInsertion(
12789           DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
12790     return Insertion;
12791 
12792   // We have different paths for blend lowering, but they all must use the
12793   // *exact* same predicate.
12794   bool IsBlendSupported = Subtarget.hasSSE41();
12795   if (IsBlendSupported)
12796     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
12797                                             Zeroable, Subtarget, DAG))
12798       return Blend;
12799 
12800   // Use dedicated unpack instructions for masks that match their pattern.
12801   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
12802     return V;
12803 
12804   // Try to use byte rotation instructions.
12805   // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
12806   if (Subtarget.hasSSSE3()) {
12807     if (Subtarget.hasVLX())
12808       if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v2i64, V1, V2, Mask,
12809                                                 Zeroable, Subtarget, DAG))
12810         return Rotate;
12811 
12812     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
12813                                                   Subtarget, DAG))
12814       return Rotate;
12815   }
12816 
12817   // If we have direct support for blends, we should lower by decomposing into
12818   // a permute. That will be faster than the domain cross.
12819   if (IsBlendSupported)
12820     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v2i64, V1, V2, Mask,
12821                                                 Subtarget, DAG);
12822 
12823   // We implement this with SHUFPD which is pretty lame because it will likely
12824   // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
12825   // However, all the alternatives are still more cycles and newer chips don't
12826   // have this problem. It would be really nice if x86 had better shuffles here.
12827   V1 = DAG.getBitcast(MVT::v2f64, V1);
12828   V2 = DAG.getBitcast(MVT::v2f64, V2);
12829   return DAG.getBitcast(MVT::v2i64,
12830                         DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
12831 }
12832 
12833 /// Lower a vector shuffle using the SHUFPS instruction.
12834 ///
12835 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
12836 /// It makes no assumptions about whether this is the *best* lowering, it simply
12837 /// uses it.
12838 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
12839                                       ArrayRef<int> Mask, SDValue V1,
12840                                       SDValue V2, SelectionDAG &DAG) {
12841   SDValue LowV = V1, HighV = V2;
12842   SmallVector<int, 4> NewMask(Mask);
12843   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
12844 
12845   if (NumV2Elements == 1) {
12846     int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
12847 
12848     // Compute the index adjacent to V2Index and in the same half by toggling
12849     // the low bit.
12850     int V2AdjIndex = V2Index ^ 1;
12851 
12852     if (Mask[V2AdjIndex] < 0) {
12853       // Handles all the cases where we have a single V2 element and an undef.
12854       // This will only ever happen in the high lanes because we commute the
12855       // vector otherwise.
12856       if (V2Index < 2)
12857         std::swap(LowV, HighV);
12858       NewMask[V2Index] -= 4;
12859     } else {
12860       // Handle the case where the V2 element ends up adjacent to a V1 element.
12861       // To make this work, blend them together as the first step.
12862       int V1Index = V2AdjIndex;
12863       int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
12864       V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
12865                        getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
12866 
12867       // Now proceed to reconstruct the final blend as we have the necessary
12868       // high or low half formed.
12869       if (V2Index < 2) {
12870         LowV = V2;
12871         HighV = V1;
12872       } else {
12873         HighV = V2;
12874       }
12875       NewMask[V1Index] = 2; // We put the V1 element in V2[2].
12876       NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
12877     }
12878   } else if (NumV2Elements == 2) {
12879     if (Mask[0] < 4 && Mask[1] < 4) {
12880       // Handle the easy case where we have V1 in the low lanes and V2 in the
12881       // high lanes.
12882       NewMask[2] -= 4;
12883       NewMask[3] -= 4;
12884     } else if (Mask[2] < 4 && Mask[3] < 4) {
12885       // We also handle the reversed case because this utility may get called
12886       // when we detect a SHUFPS pattern but can't easily commute the shuffle to
12887       // arrange things in the right direction.
12888       NewMask[0] -= 4;
12889       NewMask[1] -= 4;
12890       HighV = V1;
12891       LowV = V2;
12892     } else {
12893       // We have a mixture of V1 and V2 in both low and high lanes. Rather than
12894       // trying to place elements directly, just blend them and set up the final
12895       // shuffle to place them.
12896 
12897       // The first two blend mask elements are for V1, the second two are for
12898       // V2.
12899       int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
12900                           Mask[2] < 4 ? Mask[2] : Mask[3],
12901                           (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
12902                           (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
12903       V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
12904                        getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
12905 
12906       // Now we do a normal shuffle of V1 by giving V1 as both operands to
12907       // a blend.
12908       LowV = HighV = V1;
12909       NewMask[0] = Mask[0] < 4 ? 0 : 2;
12910       NewMask[1] = Mask[0] < 4 ? 2 : 0;
12911       NewMask[2] = Mask[2] < 4 ? 1 : 3;
12912       NewMask[3] = Mask[2] < 4 ? 3 : 1;
12913     }
12914   } else if (NumV2Elements == 3) {
12915     // Ideally canonicalizeShuffleMaskWithCommute should have caught this, but
12916     // we can get here due to other paths (e.g repeated mask matching) that we
12917     // don't want to do another round of lowerVECTOR_SHUFFLE.
12918     ShuffleVectorSDNode::commuteMask(NewMask);
12919     return lowerShuffleWithSHUFPS(DL, VT, NewMask, V2, V1, DAG);
12920   }
12921   return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
12922                      getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
12923 }
12924 
12925 /// Lower 4-lane 32-bit floating point shuffles.
12926 ///
12927 /// Uses instructions exclusively from the floating point unit to minimize
12928 /// domain crossing penalties, as these are sufficient to implement all v4f32
12929 /// shuffles.
12930 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12931                                  const APInt &Zeroable, SDValue V1, SDValue V2,
12932                                  const X86Subtarget &Subtarget,
12933                                  SelectionDAG &DAG) {
12934   assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12935   assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12936   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12937 
12938   if (Subtarget.hasSSE41())
12939     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
12940                                             Zeroable, Subtarget, DAG))
12941       return Blend;
12942 
12943   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
12944 
12945   if (NumV2Elements == 0) {
12946     // Check for being able to broadcast a single element.
12947     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
12948                                                     Mask, Subtarget, DAG))
12949       return Broadcast;
12950 
12951     // Use even/odd duplicate instructions for masks that match their pattern.
12952     if (Subtarget.hasSSE3()) {
12953       if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
12954         return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
12955       if (isShuffleEquivalent(Mask, {1, 1, 3, 3}, V1, V2))
12956         return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
12957     }
12958 
12959     if (Subtarget.hasAVX()) {
12960       // If we have AVX, we can use VPERMILPS which will allow folding a load
12961       // into the shuffle.
12962       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
12963                          getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
12964     }
12965 
12966     // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
12967     // in SSE1 because otherwise they are widened to v2f64 and never get here.
12968     if (!Subtarget.hasSSE2()) {
12969       if (isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2))
12970         return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
12971       if (isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1, V2))
12972         return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
12973     }
12974 
12975     // Otherwise, use a straight shuffle of a single input vector. We pass the
12976     // input vector to both operands to simulate this with a SHUFPS.
12977     return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
12978                        getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
12979   }
12980 
12981   if (Subtarget.hasSSE2())
12982     if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
12983             DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG)) {
12984       ZExt = DAG.getBitcast(MVT::v4f32, ZExt);
12985       return ZExt;
12986     }
12987 
12988   if (Subtarget.hasAVX2())
12989     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12990       return Extract;
12991 
12992   // There are special ways we can lower some single-element blends. However, we
12993   // have custom ways we can lower more complex single-element blends below that
12994   // we defer to if both this and BLENDPS fail to match, so restrict this to
12995   // when the V2 input is targeting element 0 of the mask -- that is the fast
12996   // case here.
12997   if (NumV2Elements == 1 && Mask[0] >= 4)
12998     if (SDValue V = lowerShuffleAsElementInsertion(
12999             DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13000       return V;
13001 
13002   if (Subtarget.hasSSE41()) {
13003     // Use INSERTPS if we can complete the shuffle efficiently.
13004     if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
13005       return V;
13006 
13007     if (!isSingleSHUFPSMask(Mask))
13008       if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
13009                                                             V2, Mask, DAG))
13010         return BlendPerm;
13011   }
13012 
13013   // Use low/high mov instructions. These are only valid in SSE1 because
13014   // otherwise they are widened to v2f64 and never get here.
13015   if (!Subtarget.hasSSE2()) {
13016     if (isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2))
13017       return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
13018     if (isShuffleEquivalent(Mask, {2, 3, 6, 7}, V1, V2))
13019       return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
13020   }
13021 
13022   // Use dedicated unpack instructions for masks that match their pattern.
13023   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
13024     return V;
13025 
13026   // Otherwise fall back to a SHUFPS lowering strategy.
13027   return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
13028 }
13029 
13030 /// Lower 4-lane i32 vector shuffles.
13031 ///
13032 /// We try to handle these with integer-domain shuffles where we can, but for
13033 /// blends we use the floating point domain blend instructions.
13034 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13035                                  const APInt &Zeroable, SDValue V1, SDValue V2,
13036                                  const X86Subtarget &Subtarget,
13037                                  SelectionDAG &DAG) {
13038   assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13039   assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13040   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13041 
13042   // Whenever we can lower this as a zext, that instruction is strictly faster
13043   // than any alternative. It also allows us to fold memory operands into the
13044   // shuffle in many cases.
13045   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
13046                                                    Zeroable, Subtarget, DAG))
13047     return ZExt;
13048 
13049   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13050 
13051   // Try to use shift instructions if fast.
13052   if (Subtarget.preferLowerShuffleAsShift()) {
13053     if (SDValue Shift =
13054             lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, Zeroable,
13055                                 Subtarget, DAG, /*BitwiseOnly*/ true))
13056       return Shift;
13057     if (NumV2Elements == 0)
13058       if (SDValue Rotate =
13059               lowerShuffleAsBitRotate(DL, MVT::v4i32, V1, Mask, Subtarget, DAG))
13060         return Rotate;
13061   }
13062 
13063   if (NumV2Elements == 0) {
13064     // Try to use broadcast unless the mask only has one non-undef element.
13065     if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
13066       if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
13067                                                       Mask, Subtarget, DAG))
13068         return Broadcast;
13069     }
13070 
13071     // Straight shuffle of a single input vector. For everything from SSE2
13072     // onward this has a single fast instruction with no scary immediates.
13073     // We coerce the shuffle pattern to be compatible with UNPCK instructions
13074     // but we aren't actually going to use the UNPCK instruction because doing
13075     // so prevents folding a load into this instruction or making a copy.
13076     const int UnpackLoMask[] = {0, 0, 1, 1};
13077     const int UnpackHiMask[] = {2, 2, 3, 3};
13078     if (isShuffleEquivalent(Mask, {0, 0, 1, 1}, V1, V2))
13079       Mask = UnpackLoMask;
13080     else if (isShuffleEquivalent(Mask, {2, 2, 3, 3}, V1, V2))
13081       Mask = UnpackHiMask;
13082 
13083     return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13084                        getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13085   }
13086 
13087   if (Subtarget.hasAVX2())
13088     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13089       return Extract;
13090 
13091   // Try to use shift instructions.
13092   if (SDValue Shift =
13093           lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget,
13094                               DAG, /*BitwiseOnly*/ false))
13095     return Shift;
13096 
13097   // There are special ways we can lower some single-element blends.
13098   if (NumV2Elements == 1)
13099     if (SDValue V = lowerShuffleAsElementInsertion(
13100             DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13101       return V;
13102 
13103   // We have different paths for blend lowering, but they all must use the
13104   // *exact* same predicate.
13105   bool IsBlendSupported = Subtarget.hasSSE41();
13106   if (IsBlendSupported)
13107     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
13108                                             Zeroable, Subtarget, DAG))
13109       return Blend;
13110 
13111   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
13112                                              Zeroable, Subtarget, DAG))
13113     return Masked;
13114 
13115   // Use dedicated unpack instructions for masks that match their pattern.
13116   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
13117     return V;
13118 
13119   // Try to use byte rotation instructions.
13120   // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13121   if (Subtarget.hasSSSE3()) {
13122     if (Subtarget.hasVLX())
13123       if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i32, V1, V2, Mask,
13124                                                 Zeroable, Subtarget, DAG))
13125         return Rotate;
13126 
13127     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
13128                                                   Subtarget, DAG))
13129       return Rotate;
13130   }
13131 
13132   // Assume that a single SHUFPS is faster than an alternative sequence of
13133   // multiple instructions (even if the CPU has a domain penalty).
13134   // If some CPU is harmed by the domain switch, we can fix it in a later pass.
13135   if (!isSingleSHUFPSMask(Mask)) {
13136     // If we have direct support for blends, we should lower by decomposing into
13137     // a permute. That will be faster than the domain cross.
13138     if (IsBlendSupported)
13139       return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i32, V1, V2, Mask,
13140                                                   Subtarget, DAG);
13141 
13142     // Try to lower by permuting the inputs into an unpack instruction.
13143     if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
13144                                                         Mask, Subtarget, DAG))
13145       return Unpack;
13146   }
13147 
13148   // We implement this with SHUFPS because it can blend from two vectors.
13149   // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
13150   // up the inputs, bypassing domain shift penalties that we would incur if we
13151   // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
13152   // relevant.
13153   SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
13154   SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
13155   SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
13156   return DAG.getBitcast(MVT::v4i32, ShufPS);
13157 }
13158 
13159 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
13160 /// shuffle lowering, and the most complex part.
13161 ///
13162 /// The lowering strategy is to try to form pairs of input lanes which are
13163 /// targeted at the same half of the final vector, and then use a dword shuffle
13164 /// to place them onto the right half, and finally unpack the paired lanes into
13165 /// their final position.
13166 ///
13167 /// The exact breakdown of how to form these dword pairs and align them on the
13168 /// correct sides is really tricky. See the comments within the function for
13169 /// more of the details.
13170 ///
13171 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
13172 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
13173 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
13174 /// vector, form the analogous 128-bit 8-element Mask.
13175 static SDValue lowerV8I16GeneralSingleInputShuffle(
13176     const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
13177     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13178   assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
13179   MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
13180 
13181   assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
13182   MutableArrayRef<int> LoMask = Mask.slice(0, 4);
13183   MutableArrayRef<int> HiMask = Mask.slice(4, 4);
13184 
13185   // Attempt to directly match PSHUFLW or PSHUFHW.
13186   if (isUndefOrInRange(LoMask, 0, 4) &&
13187       isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
13188     return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13189                        getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13190   }
13191   if (isUndefOrInRange(HiMask, 4, 8) &&
13192       isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
13193     for (int i = 0; i != 4; ++i)
13194       HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
13195     return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13196                        getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13197   }
13198 
13199   SmallVector<int, 4> LoInputs;
13200   copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
13201   array_pod_sort(LoInputs.begin(), LoInputs.end());
13202   LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
13203   SmallVector<int, 4> HiInputs;
13204   copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
13205   array_pod_sort(HiInputs.begin(), HiInputs.end());
13206   HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
13207   int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
13208   int NumHToL = LoInputs.size() - NumLToL;
13209   int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
13210   int NumHToH = HiInputs.size() - NumLToH;
13211   MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
13212   MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
13213   MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
13214   MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
13215 
13216   // If we are shuffling values from one half - check how many different DWORD
13217   // pairs we need to create. If only 1 or 2 then we can perform this as a
13218   // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
13219   auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
13220                                ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
13221     V = DAG.getNode(ShufWOp, DL, VT, V,
13222                     getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13223     V = DAG.getBitcast(PSHUFDVT, V);
13224     V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
13225                     getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
13226     return DAG.getBitcast(VT, V);
13227   };
13228 
13229   if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
13230     int PSHUFDMask[4] = { -1, -1, -1, -1 };
13231     SmallVector<std::pair<int, int>, 4> DWordPairs;
13232     int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
13233 
13234     // Collect the different DWORD pairs.
13235     for (int DWord = 0; DWord != 4; ++DWord) {
13236       int M0 = Mask[2 * DWord + 0];
13237       int M1 = Mask[2 * DWord + 1];
13238       M0 = (M0 >= 0 ? M0 % 4 : M0);
13239       M1 = (M1 >= 0 ? M1 % 4 : M1);
13240       if (M0 < 0 && M1 < 0)
13241         continue;
13242 
13243       bool Match = false;
13244       for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
13245         auto &DWordPair = DWordPairs[j];
13246         if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
13247             (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
13248           DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
13249           DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
13250           PSHUFDMask[DWord] = DOffset + j;
13251           Match = true;
13252           break;
13253         }
13254       }
13255       if (!Match) {
13256         PSHUFDMask[DWord] = DOffset + DWordPairs.size();
13257         DWordPairs.push_back(std::make_pair(M0, M1));
13258       }
13259     }
13260 
13261     if (DWordPairs.size() <= 2) {
13262       DWordPairs.resize(2, std::make_pair(-1, -1));
13263       int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
13264                               DWordPairs[1].first, DWordPairs[1].second};
13265       if ((NumHToL + NumHToH) == 0)
13266         return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
13267       if ((NumLToL + NumLToH) == 0)
13268         return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
13269     }
13270   }
13271 
13272   // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
13273   // such inputs we can swap two of the dwords across the half mark and end up
13274   // with <=2 inputs to each half in each half. Once there, we can fall through
13275   // to the generic code below. For example:
13276   //
13277   // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13278   // Mask:  [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
13279   //
13280   // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
13281   // and an existing 2-into-2 on the other half. In this case we may have to
13282   // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
13283   // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
13284   // Fortunately, we don't have to handle anything but a 2-into-2 pattern
13285   // because any other situation (including a 3-into-1 or 1-into-3 in the other
13286   // half than the one we target for fixing) will be fixed when we re-enter this
13287   // path. We will also combine away any sequence of PSHUFD instructions that
13288   // result into a single instruction. Here is an example of the tricky case:
13289   //
13290   // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13291   // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
13292   //
13293   // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
13294   //
13295   // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
13296   // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
13297   //
13298   // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
13299   // Mask:  [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
13300   //
13301   // The result is fine to be handled by the generic logic.
13302   auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
13303                           ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
13304                           int AOffset, int BOffset) {
13305     assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
13306            "Must call this with A having 3 or 1 inputs from the A half.");
13307     assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
13308            "Must call this with B having 1 or 3 inputs from the B half.");
13309     assert(AToAInputs.size() + BToAInputs.size() == 4 &&
13310            "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
13311 
13312     bool ThreeAInputs = AToAInputs.size() == 3;
13313 
13314     // Compute the index of dword with only one word among the three inputs in
13315     // a half by taking the sum of the half with three inputs and subtracting
13316     // the sum of the actual three inputs. The difference is the remaining
13317     // slot.
13318     int ADWord = 0, BDWord = 0;
13319     int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
13320     int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
13321     int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
13322     ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
13323     int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
13324     int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
13325     int TripleNonInputIdx =
13326         TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
13327     TripleDWord = TripleNonInputIdx / 2;
13328 
13329     // We use xor with one to compute the adjacent DWord to whichever one the
13330     // OneInput is in.
13331     OneInputDWord = (OneInput / 2) ^ 1;
13332 
13333     // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
13334     // and BToA inputs. If there is also such a problem with the BToB and AToB
13335     // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
13336     // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
13337     // is essential that we don't *create* a 3<-1 as then we might oscillate.
13338     if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
13339       // Compute how many inputs will be flipped by swapping these DWords. We
13340       // need
13341       // to balance this to ensure we don't form a 3-1 shuffle in the other
13342       // half.
13343       int NumFlippedAToBInputs = llvm::count(AToBInputs, 2 * ADWord) +
13344                                  llvm::count(AToBInputs, 2 * ADWord + 1);
13345       int NumFlippedBToBInputs = llvm::count(BToBInputs, 2 * BDWord) +
13346                                  llvm::count(BToBInputs, 2 * BDWord + 1);
13347       if ((NumFlippedAToBInputs == 1 &&
13348            (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
13349           (NumFlippedBToBInputs == 1 &&
13350            (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
13351         // We choose whether to fix the A half or B half based on whether that
13352         // half has zero flipped inputs. At zero, we may not be able to fix it
13353         // with that half. We also bias towards fixing the B half because that
13354         // will more commonly be the high half, and we have to bias one way.
13355         auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
13356                                                        ArrayRef<int> Inputs) {
13357           int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
13358           bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
13359           // Determine whether the free index is in the flipped dword or the
13360           // unflipped dword based on where the pinned index is. We use this bit
13361           // in an xor to conditionally select the adjacent dword.
13362           int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
13363           bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13364           if (IsFixIdxInput == IsFixFreeIdxInput)
13365             FixFreeIdx += 1;
13366           IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13367           assert(IsFixIdxInput != IsFixFreeIdxInput &&
13368                  "We need to be changing the number of flipped inputs!");
13369           int PSHUFHalfMask[] = {0, 1, 2, 3};
13370           std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
13371           V = DAG.getNode(
13372               FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
13373               MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
13374               getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13375 
13376           for (int &M : Mask)
13377             if (M >= 0 && M == FixIdx)
13378               M = FixFreeIdx;
13379             else if (M >= 0 && M == FixFreeIdx)
13380               M = FixIdx;
13381         };
13382         if (NumFlippedBToBInputs != 0) {
13383           int BPinnedIdx =
13384               BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
13385           FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
13386         } else {
13387           assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
13388           int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
13389           FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
13390         }
13391       }
13392     }
13393 
13394     int PSHUFDMask[] = {0, 1, 2, 3};
13395     PSHUFDMask[ADWord] = BDWord;
13396     PSHUFDMask[BDWord] = ADWord;
13397     V = DAG.getBitcast(
13398         VT,
13399         DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13400                     getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13401 
13402     // Adjust the mask to match the new locations of A and B.
13403     for (int &M : Mask)
13404       if (M >= 0 && M/2 == ADWord)
13405         M = 2 * BDWord + M % 2;
13406       else if (M >= 0 && M/2 == BDWord)
13407         M = 2 * ADWord + M % 2;
13408 
13409     // Recurse back into this routine to re-compute state now that this isn't
13410     // a 3 and 1 problem.
13411     return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
13412   };
13413   if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
13414     return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
13415   if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
13416     return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
13417 
13418   // At this point there are at most two inputs to the low and high halves from
13419   // each half. That means the inputs can always be grouped into dwords and
13420   // those dwords can then be moved to the correct half with a dword shuffle.
13421   // We use at most one low and one high word shuffle to collect these paired
13422   // inputs into dwords, and finally a dword shuffle to place them.
13423   int PSHUFLMask[4] = {-1, -1, -1, -1};
13424   int PSHUFHMask[4] = {-1, -1, -1, -1};
13425   int PSHUFDMask[4] = {-1, -1, -1, -1};
13426 
13427   // First fix the masks for all the inputs that are staying in their
13428   // original halves. This will then dictate the targets of the cross-half
13429   // shuffles.
13430   auto fixInPlaceInputs =
13431       [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
13432                     MutableArrayRef<int> SourceHalfMask,
13433                     MutableArrayRef<int> HalfMask, int HalfOffset) {
13434     if (InPlaceInputs.empty())
13435       return;
13436     if (InPlaceInputs.size() == 1) {
13437       SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13438           InPlaceInputs[0] - HalfOffset;
13439       PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
13440       return;
13441     }
13442     if (IncomingInputs.empty()) {
13443       // Just fix all of the in place inputs.
13444       for (int Input : InPlaceInputs) {
13445         SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
13446         PSHUFDMask[Input / 2] = Input / 2;
13447       }
13448       return;
13449     }
13450 
13451     assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
13452     SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13453         InPlaceInputs[0] - HalfOffset;
13454     // Put the second input next to the first so that they are packed into
13455     // a dword. We find the adjacent index by toggling the low bit.
13456     int AdjIndex = InPlaceInputs[0] ^ 1;
13457     SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
13458     std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
13459     PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
13460   };
13461   fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
13462   fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
13463 
13464   // Now gather the cross-half inputs and place them into a free dword of
13465   // their target half.
13466   // FIXME: This operation could almost certainly be simplified dramatically to
13467   // look more like the 3-1 fixing operation.
13468   auto moveInputsToRightHalf = [&PSHUFDMask](
13469       MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
13470       MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
13471       MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
13472       int DestOffset) {
13473     auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
13474       return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
13475     };
13476     auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
13477                                                int Word) {
13478       int LowWord = Word & ~1;
13479       int HighWord = Word | 1;
13480       return isWordClobbered(SourceHalfMask, LowWord) ||
13481              isWordClobbered(SourceHalfMask, HighWord);
13482     };
13483 
13484     if (IncomingInputs.empty())
13485       return;
13486 
13487     if (ExistingInputs.empty()) {
13488       // Map any dwords with inputs from them into the right half.
13489       for (int Input : IncomingInputs) {
13490         // If the source half mask maps over the inputs, turn those into
13491         // swaps and use the swapped lane.
13492         if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
13493           if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
13494             SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
13495                 Input - SourceOffset;
13496             // We have to swap the uses in our half mask in one sweep.
13497             for (int &M : HalfMask)
13498               if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
13499                 M = Input;
13500               else if (M == Input)
13501                 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13502           } else {
13503             assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
13504                        Input - SourceOffset &&
13505                    "Previous placement doesn't match!");
13506           }
13507           // Note that this correctly re-maps both when we do a swap and when
13508           // we observe the other side of the swap above. We rely on that to
13509           // avoid swapping the members of the input list directly.
13510           Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13511         }
13512 
13513         // Map the input's dword into the correct half.
13514         if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
13515           PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
13516         else
13517           assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
13518                      Input / 2 &&
13519                  "Previous placement doesn't match!");
13520       }
13521 
13522       // And just directly shift any other-half mask elements to be same-half
13523       // as we will have mirrored the dword containing the element into the
13524       // same position within that half.
13525       for (int &M : HalfMask)
13526         if (M >= SourceOffset && M < SourceOffset + 4) {
13527           M = M - SourceOffset + DestOffset;
13528           assert(M >= 0 && "This should never wrap below zero!");
13529         }
13530       return;
13531     }
13532 
13533     // Ensure we have the input in a viable dword of its current half. This
13534     // is particularly tricky because the original position may be clobbered
13535     // by inputs being moved and *staying* in that half.
13536     if (IncomingInputs.size() == 1) {
13537       if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13538         int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
13539                          SourceOffset;
13540         SourceHalfMask[InputFixed - SourceOffset] =
13541             IncomingInputs[0] - SourceOffset;
13542         std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
13543                      InputFixed);
13544         IncomingInputs[0] = InputFixed;
13545       }
13546     } else if (IncomingInputs.size() == 2) {
13547       if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
13548           isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13549         // We have two non-adjacent or clobbered inputs we need to extract from
13550         // the source half. To do this, we need to map them into some adjacent
13551         // dword slot in the source mask.
13552         int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
13553                               IncomingInputs[1] - SourceOffset};
13554 
13555         // If there is a free slot in the source half mask adjacent to one of
13556         // the inputs, place the other input in it. We use (Index XOR 1) to
13557         // compute an adjacent index.
13558         if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
13559             SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
13560           SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
13561           SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13562           InputsFixed[1] = InputsFixed[0] ^ 1;
13563         } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
13564                    SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
13565           SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
13566           SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
13567           InputsFixed[0] = InputsFixed[1] ^ 1;
13568         } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
13569                    SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
13570           // The two inputs are in the same DWord but it is clobbered and the
13571           // adjacent DWord isn't used at all. Move both inputs to the free
13572           // slot.
13573           SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
13574           SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
13575           InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
13576           InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
13577         } else {
13578           // The only way we hit this point is if there is no clobbering
13579           // (because there are no off-half inputs to this half) and there is no
13580           // free slot adjacent to one of the inputs. In this case, we have to
13581           // swap an input with a non-input.
13582           for (int i = 0; i < 4; ++i)
13583             assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
13584                    "We can't handle any clobbers here!");
13585           assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
13586                  "Cannot have adjacent inputs here!");
13587 
13588           SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13589           SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
13590 
13591           // We also have to update the final source mask in this case because
13592           // it may need to undo the above swap.
13593           for (int &M : FinalSourceHalfMask)
13594             if (M == (InputsFixed[0] ^ 1) + SourceOffset)
13595               M = InputsFixed[1] + SourceOffset;
13596             else if (M == InputsFixed[1] + SourceOffset)
13597               M = (InputsFixed[0] ^ 1) + SourceOffset;
13598 
13599           InputsFixed[1] = InputsFixed[0] ^ 1;
13600         }
13601 
13602         // Point everything at the fixed inputs.
13603         for (int &M : HalfMask)
13604           if (M == IncomingInputs[0])
13605             M = InputsFixed[0] + SourceOffset;
13606           else if (M == IncomingInputs[1])
13607             M = InputsFixed[1] + SourceOffset;
13608 
13609         IncomingInputs[0] = InputsFixed[0] + SourceOffset;
13610         IncomingInputs[1] = InputsFixed[1] + SourceOffset;
13611       }
13612     } else {
13613       llvm_unreachable("Unhandled input size!");
13614     }
13615 
13616     // Now hoist the DWord down to the right half.
13617     int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
13618     assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
13619     PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
13620     for (int &M : HalfMask)
13621       for (int Input : IncomingInputs)
13622         if (M == Input)
13623           M = FreeDWord * 2 + Input % 2;
13624   };
13625   moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
13626                         /*SourceOffset*/ 4, /*DestOffset*/ 0);
13627   moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
13628                         /*SourceOffset*/ 0, /*DestOffset*/ 4);
13629 
13630   // Now enact all the shuffles we've computed to move the inputs into their
13631   // target half.
13632   if (!isNoopShuffleMask(PSHUFLMask))
13633     V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13634                     getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
13635   if (!isNoopShuffleMask(PSHUFHMask))
13636     V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13637                     getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
13638   if (!isNoopShuffleMask(PSHUFDMask))
13639     V = DAG.getBitcast(
13640         VT,
13641         DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13642                     getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13643 
13644   // At this point, each half should contain all its inputs, and we can then
13645   // just shuffle them into their final position.
13646   assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
13647          "Failed to lift all the high half inputs to the low mask!");
13648   assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
13649          "Failed to lift all the low half inputs to the high mask!");
13650 
13651   // Do a half shuffle for the low mask.
13652   if (!isNoopShuffleMask(LoMask))
13653     V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13654                     getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13655 
13656   // Do a half shuffle with the high mask after shifting its values down.
13657   for (int &M : HiMask)
13658     if (M >= 0)
13659       M -= 4;
13660   if (!isNoopShuffleMask(HiMask))
13661     V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13662                     getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13663 
13664   return V;
13665 }
13666 
13667 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
13668 /// blend if only one input is used.
13669 static SDValue lowerShuffleAsBlendOfPSHUFBs(
13670     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13671     const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
13672   assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
13673          "Lane crossing shuffle masks not supported");
13674 
13675   int NumBytes = VT.getSizeInBits() / 8;
13676   int Size = Mask.size();
13677   int Scale = NumBytes / Size;
13678 
13679   SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13680   SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13681   V1InUse = false;
13682   V2InUse = false;
13683 
13684   for (int i = 0; i < NumBytes; ++i) {
13685     int M = Mask[i / Scale];
13686     if (M < 0)
13687       continue;
13688 
13689     const int ZeroMask = 0x80;
13690     int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
13691     int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
13692     if (Zeroable[i / Scale])
13693       V1Idx = V2Idx = ZeroMask;
13694 
13695     V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
13696     V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
13697     V1InUse |= (ZeroMask != V1Idx);
13698     V2InUse |= (ZeroMask != V2Idx);
13699   }
13700 
13701   MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
13702   if (V1InUse)
13703     V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
13704                      DAG.getBuildVector(ShufVT, DL, V1Mask));
13705   if (V2InUse)
13706     V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
13707                      DAG.getBuildVector(ShufVT, DL, V2Mask));
13708 
13709   // If we need shuffled inputs from both, blend the two.
13710   SDValue V;
13711   if (V1InUse && V2InUse)
13712     V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
13713   else
13714     V = V1InUse ? V1 : V2;
13715 
13716   // Cast the result back to the correct type.
13717   return DAG.getBitcast(VT, V);
13718 }
13719 
13720 /// Generic lowering of 8-lane i16 shuffles.
13721 ///
13722 /// This handles both single-input shuffles and combined shuffle/blends with
13723 /// two inputs. The single input shuffles are immediately delegated to
13724 /// a dedicated lowering routine.
13725 ///
13726 /// The blends are lowered in one of three fundamental ways. If there are few
13727 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
13728 /// of the input is significantly cheaper when lowered as an interleaving of
13729 /// the two inputs, try to interleave them. Otherwise, blend the low and high
13730 /// halves of the inputs separately (making them have relatively few inputs)
13731 /// and then concatenate them.
13732 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13733                                  const APInt &Zeroable, SDValue V1, SDValue V2,
13734                                  const X86Subtarget &Subtarget,
13735                                  SelectionDAG &DAG) {
13736   assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
13737   assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
13738   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
13739 
13740   // Whenever we can lower this as a zext, that instruction is strictly faster
13741   // than any alternative.
13742   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
13743                                                    Zeroable, Subtarget, DAG))
13744     return ZExt;
13745 
13746   // Try to use lower using a truncation.
13747   if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
13748                                         Subtarget, DAG))
13749     return V;
13750 
13751   int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
13752 
13753   if (NumV2Inputs == 0) {
13754     // Try to use shift instructions.
13755     if (SDValue Shift =
13756             lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask, Zeroable,
13757                                 Subtarget, DAG, /*BitwiseOnly*/ false))
13758       return Shift;
13759 
13760     // Check for being able to broadcast a single element.
13761     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
13762                                                     Mask, Subtarget, DAG))
13763       return Broadcast;
13764 
13765     // Try to use bit rotation instructions.
13766     if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v8i16, V1, Mask,
13767                                                  Subtarget, DAG))
13768       return Rotate;
13769 
13770     // Use dedicated unpack instructions for masks that match their pattern.
13771     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
13772       return V;
13773 
13774     // Use dedicated pack instructions for masks that match their pattern.
13775     if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
13776                                          Subtarget))
13777       return V;
13778 
13779     // Try to use byte rotation instructions.
13780     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
13781                                                   Subtarget, DAG))
13782       return Rotate;
13783 
13784     // Make a copy of the mask so it can be modified.
13785     SmallVector<int, 8> MutableMask(Mask);
13786     return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
13787                                                Subtarget, DAG);
13788   }
13789 
13790   assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
13791          "All single-input shuffles should be canonicalized to be V1-input "
13792          "shuffles.");
13793 
13794   // Try to use shift instructions.
13795   if (SDValue Shift =
13796           lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget,
13797                               DAG, /*BitwiseOnly*/ false))
13798     return Shift;
13799 
13800   // See if we can use SSE4A Extraction / Insertion.
13801   if (Subtarget.hasSSE4A())
13802     if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
13803                                           Zeroable, DAG))
13804       return V;
13805 
13806   // There are special ways we can lower some single-element blends.
13807   if (NumV2Inputs == 1)
13808     if (SDValue V = lowerShuffleAsElementInsertion(
13809             DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
13810       return V;
13811 
13812   // We have different paths for blend lowering, but they all must use the
13813   // *exact* same predicate.
13814   bool IsBlendSupported = Subtarget.hasSSE41();
13815   if (IsBlendSupported)
13816     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
13817                                             Zeroable, Subtarget, DAG))
13818       return Blend;
13819 
13820   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
13821                                              Zeroable, Subtarget, DAG))
13822     return Masked;
13823 
13824   // Use dedicated unpack instructions for masks that match their pattern.
13825   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
13826     return V;
13827 
13828   // Use dedicated pack instructions for masks that match their pattern.
13829   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
13830                                        Subtarget))
13831     return V;
13832 
13833   // Try to use lower using a truncation.
13834   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
13835                                        Subtarget, DAG))
13836     return V;
13837 
13838   // Try to use byte rotation instructions.
13839   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
13840                                                 Subtarget, DAG))
13841     return Rotate;
13842 
13843   if (SDValue BitBlend =
13844           lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
13845     return BitBlend;
13846 
13847   // Try to use byte shift instructions to mask.
13848   if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
13849                                               Zeroable, Subtarget, DAG))
13850     return V;
13851 
13852   // Attempt to lower using compaction, SSE41 is necessary for PACKUSDW.
13853   int NumEvenDrops = canLowerByDroppingElements(Mask, true, false);
13854   if ((NumEvenDrops == 1 || (NumEvenDrops == 2 && Subtarget.hasSSE41())) &&
13855       !Subtarget.hasVLX()) {
13856     // Check if this is part of a 256-bit vector truncation.
13857     unsigned PackOpc = 0;
13858     if (NumEvenDrops == 2 && Subtarget.hasAVX2() &&
13859         peekThroughBitcasts(V1).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
13860         peekThroughBitcasts(V2).getOpcode() == ISD::EXTRACT_SUBVECTOR) {
13861       SDValue V1V2 = concatSubVectors(V1, V2, DAG, DL);
13862       V1V2 = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1V2,
13863                          getZeroVector(MVT::v16i16, Subtarget, DAG, DL),
13864                          DAG.getTargetConstant(0xEE, DL, MVT::i8));
13865       V1V2 = DAG.getBitcast(MVT::v8i32, V1V2);
13866       V1 = extract128BitVector(V1V2, 0, DAG, DL);
13867       V2 = extract128BitVector(V1V2, 4, DAG, DL);
13868       PackOpc = X86ISD::PACKUS;
13869     } else if (Subtarget.hasSSE41()) {
13870       SmallVector<SDValue, 4> DWordClearOps(4,
13871                                             DAG.getConstant(0, DL, MVT::i32));
13872       for (unsigned i = 0; i != 4; i += 1 << (NumEvenDrops - 1))
13873         DWordClearOps[i] = DAG.getConstant(0xFFFF, DL, MVT::i32);
13874       SDValue DWordClearMask =
13875           DAG.getBuildVector(MVT::v4i32, DL, DWordClearOps);
13876       V1 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V1),
13877                        DWordClearMask);
13878       V2 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V2),
13879                        DWordClearMask);
13880       PackOpc = X86ISD::PACKUS;
13881     } else if (!Subtarget.hasSSSE3()) {
13882       SDValue ShAmt = DAG.getTargetConstant(16, DL, MVT::i8);
13883       V1 = DAG.getBitcast(MVT::v4i32, V1);
13884       V2 = DAG.getBitcast(MVT::v4i32, V2);
13885       V1 = DAG.getNode(X86ISD::VSHLI, DL, MVT::v4i32, V1, ShAmt);
13886       V2 = DAG.getNode(X86ISD::VSHLI, DL, MVT::v4i32, V2, ShAmt);
13887       V1 = DAG.getNode(X86ISD::VSRAI, DL, MVT::v4i32, V1, ShAmt);
13888       V2 = DAG.getNode(X86ISD::VSRAI, DL, MVT::v4i32, V2, ShAmt);
13889       PackOpc = X86ISD::PACKSS;
13890     }
13891     if (PackOpc) {
13892       // Now pack things back together.
13893       SDValue Result = DAG.getNode(PackOpc, DL, MVT::v8i16, V1, V2);
13894       if (NumEvenDrops == 2) {
13895         Result = DAG.getBitcast(MVT::v4i32, Result);
13896         Result = DAG.getNode(PackOpc, DL, MVT::v8i16, Result, Result);
13897       }
13898       return Result;
13899     }
13900   }
13901 
13902   // When compacting odd (upper) elements, use PACKSS pre-SSE41.
13903   int NumOddDrops = canLowerByDroppingElements(Mask, false, false);
13904   if (NumOddDrops == 1) {
13905     bool HasSSE41 = Subtarget.hasSSE41();
13906     V1 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
13907                      DAG.getBitcast(MVT::v4i32, V1),
13908                      DAG.getTargetConstant(16, DL, MVT::i8));
13909     V2 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
13910                      DAG.getBitcast(MVT::v4i32, V2),
13911                      DAG.getTargetConstant(16, DL, MVT::i8));
13912     return DAG.getNode(HasSSE41 ? X86ISD::PACKUS : X86ISD::PACKSS, DL,
13913                        MVT::v8i16, V1, V2);
13914   }
13915 
13916   // Try to lower by permuting the inputs into an unpack instruction.
13917   if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
13918                                                       Mask, Subtarget, DAG))
13919     return Unpack;
13920 
13921   // If we can't directly blend but can use PSHUFB, that will be better as it
13922   // can both shuffle and set up the inefficient blend.
13923   if (!IsBlendSupported && Subtarget.hasSSSE3()) {
13924     bool V1InUse, V2InUse;
13925     return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
13926                                         Zeroable, DAG, V1InUse, V2InUse);
13927   }
13928 
13929   // We can always bit-blend if we have to so the fallback strategy is to
13930   // decompose into single-input permutes and blends/unpacks.
13931   return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i16, V1, V2,
13932                                               Mask, Subtarget, DAG);
13933 }
13934 
13935 /// Lower 8-lane 16-bit floating point shuffles.
13936 static SDValue lowerV8F16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13937                                  const APInt &Zeroable, SDValue V1, SDValue V2,
13938                                  const X86Subtarget &Subtarget,
13939                                  SelectionDAG &DAG) {
13940   assert(V1.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
13941   assert(V2.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
13942   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
13943   int NumV2Elements = count_if(Mask, [](int M) { return M >= 8; });
13944 
13945   if (Subtarget.hasFP16()) {
13946     if (NumV2Elements == 0) {
13947       // Check for being able to broadcast a single element.
13948       if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f16, V1, V2,
13949                                                       Mask, Subtarget, DAG))
13950         return Broadcast;
13951     }
13952     if (NumV2Elements == 1 && Mask[0] >= 8)
13953       if (SDValue V = lowerShuffleAsElementInsertion(
13954               DL, MVT::v8f16, V1, V2, Mask, Zeroable, Subtarget, DAG))
13955         return V;
13956   }
13957 
13958   V1 = DAG.getBitcast(MVT::v8i16, V1);
13959   V2 = DAG.getBitcast(MVT::v8i16, V2);
13960   return DAG.getBitcast(MVT::v8f16,
13961                         DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, Mask));
13962 }
13963 
13964 // Lowers unary/binary shuffle as VPERMV/VPERMV3, for non-VLX targets,
13965 // sub-512-bit shuffles are padded to 512-bits for the shuffle and then
13966 // the active subvector is extracted.
13967 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
13968                                      ArrayRef<int> Mask, SDValue V1, SDValue V2,
13969                                      const X86Subtarget &Subtarget,
13970                                      SelectionDAG &DAG) {
13971   MVT MaskVT = VT.changeTypeToInteger();
13972   SDValue MaskNode;
13973   MVT ShuffleVT = VT;
13974   if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
13975     V1 = widenSubVector(V1, false, Subtarget, DAG, DL, 512);
13976     V2 = widenSubVector(V2, false, Subtarget, DAG, DL, 512);
13977     ShuffleVT = V1.getSimpleValueType();
13978 
13979     // Adjust mask to correct indices for the second input.
13980     int NumElts = VT.getVectorNumElements();
13981     unsigned Scale = 512 / VT.getSizeInBits();
13982     SmallVector<int, 32> AdjustedMask(Mask);
13983     for (int &M : AdjustedMask)
13984       if (NumElts <= M)
13985         M += (Scale - 1) * NumElts;
13986     MaskNode = getConstVector(AdjustedMask, MaskVT, DAG, DL, true);
13987     MaskNode = widenSubVector(MaskNode, false, Subtarget, DAG, DL, 512);
13988   } else {
13989     MaskNode = getConstVector(Mask, MaskVT, DAG, DL, true);
13990   }
13991 
13992   SDValue Result;
13993   if (V2.isUndef())
13994     Result = DAG.getNode(X86ISD::VPERMV, DL, ShuffleVT, MaskNode, V1);
13995   else
13996     Result = DAG.getNode(X86ISD::VPERMV3, DL, ShuffleVT, V1, MaskNode, V2);
13997 
13998   if (VT != ShuffleVT)
13999     Result = extractSubVector(Result, 0, DAG, DL, VT.getSizeInBits());
14000 
14001   return Result;
14002 }
14003 
14004 /// Generic lowering of v16i8 shuffles.
14005 ///
14006 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
14007 /// detect any complexity reducing interleaving. If that doesn't help, it uses
14008 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
14009 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
14010 /// back together.
14011 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14012                                  const APInt &Zeroable, SDValue V1, SDValue V2,
14013                                  const X86Subtarget &Subtarget,
14014                                  SelectionDAG &DAG) {
14015   assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14016   assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14017   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
14018 
14019   // Try to use shift instructions.
14020   if (SDValue Shift =
14021           lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget,
14022                               DAG, /*BitwiseOnly*/ false))
14023     return Shift;
14024 
14025   // Try to use byte rotation instructions.
14026   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
14027                                                 Subtarget, DAG))
14028     return Rotate;
14029 
14030   // Use dedicated pack instructions for masks that match their pattern.
14031   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
14032                                        Subtarget))
14033     return V;
14034 
14035   // Try to use a zext lowering.
14036   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
14037                                                    Zeroable, Subtarget, DAG))
14038     return ZExt;
14039 
14040   // Try to use lower using a truncation.
14041   if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
14042                                         Subtarget, DAG))
14043     return V;
14044 
14045   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
14046                                        Subtarget, DAG))
14047     return V;
14048 
14049   // See if we can use SSE4A Extraction / Insertion.
14050   if (Subtarget.hasSSE4A())
14051     if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
14052                                           Zeroable, DAG))
14053       return V;
14054 
14055   int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
14056 
14057   // For single-input shuffles, there are some nicer lowering tricks we can use.
14058   if (NumV2Elements == 0) {
14059     // Check for being able to broadcast a single element.
14060     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
14061                                                     Mask, Subtarget, DAG))
14062       return Broadcast;
14063 
14064     // Try to use bit rotation instructions.
14065     if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i8, V1, Mask,
14066                                                  Subtarget, DAG))
14067       return Rotate;
14068 
14069     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14070       return V;
14071 
14072     // Check whether we can widen this to an i16 shuffle by duplicating bytes.
14073     // Notably, this handles splat and partial-splat shuffles more efficiently.
14074     // However, it only makes sense if the pre-duplication shuffle simplifies
14075     // things significantly. Currently, this means we need to be able to
14076     // express the pre-duplication shuffle as an i16 shuffle.
14077     //
14078     // FIXME: We should check for other patterns which can be widened into an
14079     // i16 shuffle as well.
14080     auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
14081       for (int i = 0; i < 16; i += 2)
14082         if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
14083           return false;
14084 
14085       return true;
14086     };
14087     auto tryToWidenViaDuplication = [&]() -> SDValue {
14088       if (!canWidenViaDuplication(Mask))
14089         return SDValue();
14090       SmallVector<int, 4> LoInputs;
14091       copy_if(Mask, std::back_inserter(LoInputs),
14092               [](int M) { return M >= 0 && M < 8; });
14093       array_pod_sort(LoInputs.begin(), LoInputs.end());
14094       LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
14095                      LoInputs.end());
14096       SmallVector<int, 4> HiInputs;
14097       copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
14098       array_pod_sort(HiInputs.begin(), HiInputs.end());
14099       HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
14100                      HiInputs.end());
14101 
14102       bool TargetLo = LoInputs.size() >= HiInputs.size();
14103       ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
14104       ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
14105 
14106       int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
14107       SmallDenseMap<int, int, 8> LaneMap;
14108       for (int I : InPlaceInputs) {
14109         PreDupI16Shuffle[I/2] = I/2;
14110         LaneMap[I] = I;
14111       }
14112       int j = TargetLo ? 0 : 4, je = j + 4;
14113       for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
14114         // Check if j is already a shuffle of this input. This happens when
14115         // there are two adjacent bytes after we move the low one.
14116         if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
14117           // If we haven't yet mapped the input, search for a slot into which
14118           // we can map it.
14119           while (j < je && PreDupI16Shuffle[j] >= 0)
14120             ++j;
14121 
14122           if (j == je)
14123             // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
14124             return SDValue();
14125 
14126           // Map this input with the i16 shuffle.
14127           PreDupI16Shuffle[j] = MovingInputs[i] / 2;
14128         }
14129 
14130         // Update the lane map based on the mapping we ended up with.
14131         LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
14132       }
14133       V1 = DAG.getBitcast(
14134           MVT::v16i8,
14135           DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14136                                DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
14137 
14138       // Unpack the bytes to form the i16s that will be shuffled into place.
14139       bool EvenInUse = false, OddInUse = false;
14140       for (int i = 0; i < 16; i += 2) {
14141         EvenInUse |= (Mask[i + 0] >= 0);
14142         OddInUse |= (Mask[i + 1] >= 0);
14143         if (EvenInUse && OddInUse)
14144           break;
14145       }
14146       V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
14147                        MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
14148                        OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
14149 
14150       int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
14151       for (int i = 0; i < 16; ++i)
14152         if (Mask[i] >= 0) {
14153           int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
14154           assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
14155           if (PostDupI16Shuffle[i / 2] < 0)
14156             PostDupI16Shuffle[i / 2] = MappedMask;
14157           else
14158             assert(PostDupI16Shuffle[i / 2] == MappedMask &&
14159                    "Conflicting entries in the original shuffle!");
14160         }
14161       return DAG.getBitcast(
14162           MVT::v16i8,
14163           DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14164                                DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
14165     };
14166     if (SDValue V = tryToWidenViaDuplication())
14167       return V;
14168   }
14169 
14170   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
14171                                              Zeroable, Subtarget, DAG))
14172     return Masked;
14173 
14174   // Use dedicated unpack instructions for masks that match their pattern.
14175   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14176     return V;
14177 
14178   // Try to use byte shift instructions to mask.
14179   if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
14180                                               Zeroable, Subtarget, DAG))
14181     return V;
14182 
14183   // Check for compaction patterns.
14184   bool IsSingleInput = V2.isUndef();
14185   int NumEvenDrops = canLowerByDroppingElements(Mask, true, IsSingleInput);
14186 
14187   // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
14188   // with PSHUFB. It is important to do this before we attempt to generate any
14189   // blends but after all of the single-input lowerings. If the single input
14190   // lowerings can find an instruction sequence that is faster than a PSHUFB, we
14191   // want to preserve that and we can DAG combine any longer sequences into
14192   // a PSHUFB in the end. But once we start blending from multiple inputs,
14193   // the complexity of DAG combining bad patterns back into PSHUFB is too high,
14194   // and there are *very* few patterns that would actually be faster than the
14195   // PSHUFB approach because of its ability to zero lanes.
14196   //
14197   // If the mask is a binary compaction, we can more efficiently perform this
14198   // as a PACKUS(AND(),AND()) - which is quicker than UNPACK(PSHUFB(),PSHUFB()).
14199   //
14200   // FIXME: The only exceptions to the above are blends which are exact
14201   // interleavings with direct instructions supporting them. We currently don't
14202   // handle those well here.
14203   if (Subtarget.hasSSSE3() && (IsSingleInput || NumEvenDrops != 1)) {
14204     bool V1InUse = false;
14205     bool V2InUse = false;
14206 
14207     SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
14208         DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
14209 
14210     // If both V1 and V2 are in use and we can use a direct blend or an unpack,
14211     // do so. This avoids using them to handle blends-with-zero which is
14212     // important as a single pshufb is significantly faster for that.
14213     if (V1InUse && V2InUse) {
14214       if (Subtarget.hasSSE41())
14215         if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
14216                                                 Zeroable, Subtarget, DAG))
14217           return Blend;
14218 
14219       // We can use an unpack to do the blending rather than an or in some
14220       // cases. Even though the or may be (very minorly) more efficient, we
14221       // preference this lowering because there are common cases where part of
14222       // the complexity of the shuffles goes away when we do the final blend as
14223       // an unpack.
14224       // FIXME: It might be worth trying to detect if the unpack-feeding
14225       // shuffles will both be pshufb, in which case we shouldn't bother with
14226       // this.
14227       if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
14228               DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14229         return Unpack;
14230 
14231       // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
14232       if (Subtarget.hasVBMI())
14233         return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, Subtarget,
14234                                      DAG);
14235 
14236       // If we have XOP we can use one VPPERM instead of multiple PSHUFBs.
14237       if (Subtarget.hasXOP()) {
14238         SDValue MaskNode = getConstVector(Mask, MVT::v16i8, DAG, DL, true);
14239         return DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, V1, V2, MaskNode);
14240       }
14241 
14242       // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
14243       // PALIGNR will be cheaper than the second PSHUFB+OR.
14244       if (SDValue V = lowerShuffleAsByteRotateAndPermute(
14245               DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14246         return V;
14247     }
14248 
14249     return PSHUFB;
14250   }
14251 
14252   // There are special ways we can lower some single-element blends.
14253   if (NumV2Elements == 1)
14254     if (SDValue V = lowerShuffleAsElementInsertion(
14255             DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14256       return V;
14257 
14258   if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
14259     return Blend;
14260 
14261   // Check whether a compaction lowering can be done. This handles shuffles
14262   // which take every Nth element for some even N. See the helper function for
14263   // details.
14264   //
14265   // We special case these as they can be particularly efficiently handled with
14266   // the PACKUSB instruction on x86 and they show up in common patterns of
14267   // rearranging bytes to truncate wide elements.
14268   if (NumEvenDrops) {
14269     // NumEvenDrops is the power of two stride of the elements. Another way of
14270     // thinking about it is that we need to drop the even elements this many
14271     // times to get the original input.
14272 
14273     // First we need to zero all the dropped bytes.
14274     assert(NumEvenDrops <= 3 &&
14275            "No support for dropping even elements more than 3 times.");
14276     SmallVector<SDValue, 8> WordClearOps(8, DAG.getConstant(0, DL, MVT::i16));
14277     for (unsigned i = 0; i != 8; i += 1 << (NumEvenDrops - 1))
14278       WordClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i16);
14279     SDValue WordClearMask = DAG.getBuildVector(MVT::v8i16, DL, WordClearOps);
14280     V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V1),
14281                      WordClearMask);
14282     if (!IsSingleInput)
14283       V2 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V2),
14284                        WordClearMask);
14285 
14286     // Now pack things back together.
14287     SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
14288                                  IsSingleInput ? V1 : V2);
14289     for (int i = 1; i < NumEvenDrops; ++i) {
14290       Result = DAG.getBitcast(MVT::v8i16, Result);
14291       Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
14292     }
14293     return Result;
14294   }
14295 
14296   int NumOddDrops = canLowerByDroppingElements(Mask, false, IsSingleInput);
14297   if (NumOddDrops == 1) {
14298     V1 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
14299                      DAG.getBitcast(MVT::v8i16, V1),
14300                      DAG.getTargetConstant(8, DL, MVT::i8));
14301     if (!IsSingleInput)
14302       V2 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
14303                        DAG.getBitcast(MVT::v8i16, V2),
14304                        DAG.getTargetConstant(8, DL, MVT::i8));
14305     return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
14306                        IsSingleInput ? V1 : V2);
14307   }
14308 
14309   // Handle multi-input cases by blending/unpacking single-input shuffles.
14310   if (NumV2Elements > 0)
14311     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v16i8, V1, V2, Mask,
14312                                                 Subtarget, DAG);
14313 
14314   // The fallback path for single-input shuffles widens this into two v8i16
14315   // vectors with unpacks, shuffles those, and then pulls them back together
14316   // with a pack.
14317   SDValue V = V1;
14318 
14319   std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14320   std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14321   for (int i = 0; i < 16; ++i)
14322     if (Mask[i] >= 0)
14323       (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
14324 
14325   SDValue VLoHalf, VHiHalf;
14326   // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
14327   // them out and avoid using UNPCK{L,H} to extract the elements of V as
14328   // i16s.
14329   if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
14330       none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
14331     // Use a mask to drop the high bytes.
14332     VLoHalf = DAG.getBitcast(MVT::v8i16, V);
14333     VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
14334                           DAG.getConstant(0x00FF, DL, MVT::v8i16));
14335 
14336     // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
14337     VHiHalf = DAG.getUNDEF(MVT::v8i16);
14338 
14339     // Squash the masks to point directly into VLoHalf.
14340     for (int &M : LoBlendMask)
14341       if (M >= 0)
14342         M /= 2;
14343     for (int &M : HiBlendMask)
14344       if (M >= 0)
14345         M /= 2;
14346   } else {
14347     // Otherwise just unpack the low half of V into VLoHalf and the high half into
14348     // VHiHalf so that we can blend them as i16s.
14349     SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
14350 
14351     VLoHalf = DAG.getBitcast(
14352         MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
14353     VHiHalf = DAG.getBitcast(
14354         MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
14355   }
14356 
14357   SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
14358   SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
14359 
14360   return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
14361 }
14362 
14363 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
14364 ///
14365 /// This routine breaks down the specific type of 128-bit shuffle and
14366 /// dispatches to the lowering routines accordingly.
14367 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
14368                                   MVT VT, SDValue V1, SDValue V2,
14369                                   const APInt &Zeroable,
14370                                   const X86Subtarget &Subtarget,
14371                                   SelectionDAG &DAG) {
14372   switch (VT.SimpleTy) {
14373   case MVT::v2i64:
14374     return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14375   case MVT::v2f64:
14376     return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14377   case MVT::v4i32:
14378     return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14379   case MVT::v4f32:
14380     return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14381   case MVT::v8i16:
14382     return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14383   case MVT::v8f16:
14384     return lowerV8F16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14385   case MVT::v16i8:
14386     return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14387 
14388   default:
14389     llvm_unreachable("Unimplemented!");
14390   }
14391 }
14392 
14393 /// Generic routine to split vector shuffle into half-sized shuffles.
14394 ///
14395 /// This routine just extracts two subvectors, shuffles them independently, and
14396 /// then concatenates them back together. This should work effectively with all
14397 /// AVX vector shuffle types.
14398 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
14399                                     SDValue V2, ArrayRef<int> Mask,
14400                                     SelectionDAG &DAG, bool SimpleOnly) {
14401   assert(VT.getSizeInBits() >= 256 &&
14402          "Only for 256-bit or wider vector shuffles!");
14403   assert(V1.getSimpleValueType() == VT && "Bad operand type!");
14404   assert(V2.getSimpleValueType() == VT && "Bad operand type!");
14405 
14406   ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
14407   ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
14408 
14409   int NumElements = VT.getVectorNumElements();
14410   int SplitNumElements = NumElements / 2;
14411   MVT ScalarVT = VT.getVectorElementType();
14412   MVT SplitVT = MVT::getVectorVT(ScalarVT, SplitNumElements);
14413 
14414   // Use splitVector/extractSubVector so that split build-vectors just build two
14415   // narrower build vectors. This helps shuffling with splats and zeros.
14416   auto SplitVector = [&](SDValue V) {
14417     SDValue LoV, HiV;
14418     std::tie(LoV, HiV) = splitVector(peekThroughBitcasts(V), DAG, DL);
14419     return std::make_pair(DAG.getBitcast(SplitVT, LoV),
14420                           DAG.getBitcast(SplitVT, HiV));
14421   };
14422 
14423   SDValue LoV1, HiV1, LoV2, HiV2;
14424   std::tie(LoV1, HiV1) = SplitVector(V1);
14425   std::tie(LoV2, HiV2) = SplitVector(V2);
14426 
14427   // Now create two 4-way blends of these half-width vectors.
14428   auto GetHalfBlendPiecesReq = [&](const ArrayRef<int> &HalfMask, bool &UseLoV1,
14429                                    bool &UseHiV1, bool &UseLoV2,
14430                                    bool &UseHiV2) {
14431     UseLoV1 = UseHiV1 = UseLoV2 = UseHiV2 = false;
14432     for (int i = 0; i < SplitNumElements; ++i) {
14433       int M = HalfMask[i];
14434       if (M >= NumElements) {
14435         if (M >= NumElements + SplitNumElements)
14436           UseHiV2 = true;
14437         else
14438           UseLoV2 = true;
14439       } else if (M >= 0) {
14440         if (M >= SplitNumElements)
14441           UseHiV1 = true;
14442         else
14443           UseLoV1 = true;
14444       }
14445     }
14446   };
14447 
14448   auto CheckHalfBlendUsable = [&](const ArrayRef<int> &HalfMask) -> bool {
14449     if (!SimpleOnly)
14450       return true;
14451 
14452     bool UseLoV1, UseHiV1, UseLoV2, UseHiV2;
14453     GetHalfBlendPiecesReq(HalfMask, UseLoV1, UseHiV1, UseLoV2, UseHiV2);
14454 
14455     return !(UseHiV1 || UseHiV2);
14456   };
14457 
14458   auto HalfBlend = [&](ArrayRef<int> HalfMask) {
14459     SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
14460     SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
14461     SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
14462     for (int i = 0; i < SplitNumElements; ++i) {
14463       int M = HalfMask[i];
14464       if (M >= NumElements) {
14465         V2BlendMask[i] = M - NumElements;
14466         BlendMask[i] = SplitNumElements + i;
14467       } else if (M >= 0) {
14468         V1BlendMask[i] = M;
14469         BlendMask[i] = i;
14470       }
14471     }
14472 
14473     bool UseLoV1, UseHiV1, UseLoV2, UseHiV2;
14474     GetHalfBlendPiecesReq(HalfMask, UseLoV1, UseHiV1, UseLoV2, UseHiV2);
14475 
14476     // Because the lowering happens after all combining takes place, we need to
14477     // manually combine these blend masks as much as possible so that we create
14478     // a minimal number of high-level vector shuffle nodes.
14479     assert((!SimpleOnly || (!UseHiV1 && !UseHiV2)) && "Shuffle isn't simple");
14480 
14481     // First try just blending the halves of V1 or V2.
14482     if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
14483       return DAG.getUNDEF(SplitVT);
14484     if (!UseLoV2 && !UseHiV2)
14485       return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14486     if (!UseLoV1 && !UseHiV1)
14487       return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14488 
14489     SDValue V1Blend, V2Blend;
14490     if (UseLoV1 && UseHiV1) {
14491       V1Blend = DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14492     } else {
14493       // We only use half of V1 so map the usage down into the final blend mask.
14494       V1Blend = UseLoV1 ? LoV1 : HiV1;
14495       for (int i = 0; i < SplitNumElements; ++i)
14496         if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
14497           BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
14498     }
14499     if (UseLoV2 && UseHiV2) {
14500       V2Blend = DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14501     } else {
14502       // We only use half of V2 so map the usage down into the final blend mask.
14503       V2Blend = UseLoV2 ? LoV2 : HiV2;
14504       for (int i = 0; i < SplitNumElements; ++i)
14505         if (BlendMask[i] >= SplitNumElements)
14506           BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
14507     }
14508     return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
14509   };
14510 
14511   if (!CheckHalfBlendUsable(LoMask) || !CheckHalfBlendUsable(HiMask))
14512     return SDValue();
14513 
14514   SDValue Lo = HalfBlend(LoMask);
14515   SDValue Hi = HalfBlend(HiMask);
14516   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
14517 }
14518 
14519 /// Either split a vector in halves or decompose the shuffles and the
14520 /// blend/unpack.
14521 ///
14522 /// This is provided as a good fallback for many lowerings of non-single-input
14523 /// shuffles with more than one 128-bit lane. In those cases, we want to select
14524 /// between splitting the shuffle into 128-bit components and stitching those
14525 /// back together vs. extracting the single-input shuffles and blending those
14526 /// results.
14527 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
14528                                           SDValue V2, ArrayRef<int> Mask,
14529                                           const X86Subtarget &Subtarget,
14530                                           SelectionDAG &DAG) {
14531   assert(!V2.isUndef() && "This routine must not be used to lower single-input "
14532          "shuffles as it could then recurse on itself.");
14533   int Size = Mask.size();
14534 
14535   // If this can be modeled as a broadcast of two elements followed by a blend,
14536   // prefer that lowering. This is especially important because broadcasts can
14537   // often fold with memory operands.
14538   auto DoBothBroadcast = [&] {
14539     int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
14540     for (int M : Mask)
14541       if (M >= Size) {
14542         if (V2BroadcastIdx < 0)
14543           V2BroadcastIdx = M - Size;
14544         else if (M - Size != V2BroadcastIdx)
14545           return false;
14546       } else if (M >= 0) {
14547         if (V1BroadcastIdx < 0)
14548           V1BroadcastIdx = M;
14549         else if (M != V1BroadcastIdx)
14550           return false;
14551       }
14552     return true;
14553   };
14554   if (DoBothBroadcast())
14555     return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
14556                                                 DAG);
14557 
14558   // If the inputs all stem from a single 128-bit lane of each input, then we
14559   // split them rather than blending because the split will decompose to
14560   // unusually few instructions.
14561   int LaneCount = VT.getSizeInBits() / 128;
14562   int LaneSize = Size / LaneCount;
14563   SmallBitVector LaneInputs[2];
14564   LaneInputs[0].resize(LaneCount, false);
14565   LaneInputs[1].resize(LaneCount, false);
14566   for (int i = 0; i < Size; ++i)
14567     if (Mask[i] >= 0)
14568       LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
14569   if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
14570     return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG,
14571                                 /*SimpleOnly*/ false);
14572 
14573   // Otherwise, just fall back to decomposed shuffles and a blend/unpack. This
14574   // requires that the decomposed single-input shuffles don't end up here.
14575   return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
14576                                               DAG);
14577 }
14578 
14579 // Lower as SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
14580 // TODO: Extend to support v8f32 (+ 512-bit shuffles).
14581 static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
14582                                                  SDValue V1, SDValue V2,
14583                                                  ArrayRef<int> Mask,
14584                                                  SelectionDAG &DAG) {
14585   assert(VT == MVT::v4f64 && "Only for v4f64 shuffles");
14586 
14587   int LHSMask[4] = {-1, -1, -1, -1};
14588   int RHSMask[4] = {-1, -1, -1, -1};
14589   unsigned SHUFPMask = 0;
14590 
14591   // As SHUFPD uses a single LHS/RHS element per lane, we can always
14592   // perform the shuffle once the lanes have been shuffled in place.
14593   for (int i = 0; i != 4; ++i) {
14594     int M = Mask[i];
14595     if (M < 0)
14596       continue;
14597     int LaneBase = i & ~1;
14598     auto &LaneMask = (i & 1) ? RHSMask : LHSMask;
14599     LaneMask[LaneBase + (M & 1)] = M;
14600     SHUFPMask |= (M & 1) << i;
14601   }
14602 
14603   SDValue LHS = DAG.getVectorShuffle(VT, DL, V1, V2, LHSMask);
14604   SDValue RHS = DAG.getVectorShuffle(VT, DL, V1, V2, RHSMask);
14605   return DAG.getNode(X86ISD::SHUFP, DL, VT, LHS, RHS,
14606                      DAG.getTargetConstant(SHUFPMask, DL, MVT::i8));
14607 }
14608 
14609 /// Lower a vector shuffle crossing multiple 128-bit lanes as
14610 /// a lane permutation followed by a per-lane permutation.
14611 ///
14612 /// This is mainly for cases where we can have non-repeating permutes
14613 /// in each lane.
14614 ///
14615 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
14616 /// we should investigate merging them.
14617 static SDValue lowerShuffleAsLanePermuteAndPermute(
14618     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14619     SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14620   int NumElts = VT.getVectorNumElements();
14621   int NumLanes = VT.getSizeInBits() / 128;
14622   int NumEltsPerLane = NumElts / NumLanes;
14623   bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndef();
14624 
14625   /// Attempts to find a sublane permute with the given size
14626   /// that gets all elements into their target lanes.
14627   ///
14628   /// If successful, fills CrossLaneMask and InLaneMask and returns true.
14629   /// If unsuccessful, returns false and may overwrite InLaneMask.
14630   auto getSublanePermute = [&](int NumSublanes) -> SDValue {
14631     int NumSublanesPerLane = NumSublanes / NumLanes;
14632     int NumEltsPerSublane = NumElts / NumSublanes;
14633 
14634     SmallVector<int, 16> CrossLaneMask;
14635     SmallVector<int, 16> InLaneMask(NumElts, SM_SentinelUndef);
14636     // CrossLaneMask but one entry == one sublane.
14637     SmallVector<int, 16> CrossLaneMaskLarge(NumSublanes, SM_SentinelUndef);
14638 
14639     for (int i = 0; i != NumElts; ++i) {
14640       int M = Mask[i];
14641       if (M < 0)
14642         continue;
14643 
14644       int SrcSublane = M / NumEltsPerSublane;
14645       int DstLane = i / NumEltsPerLane;
14646 
14647       // We only need to get the elements into the right lane, not sublane.
14648       // So search all sublanes that make up the destination lane.
14649       bool Found = false;
14650       int DstSubStart = DstLane * NumSublanesPerLane;
14651       int DstSubEnd = DstSubStart + NumSublanesPerLane;
14652       for (int DstSublane = DstSubStart; DstSublane < DstSubEnd; ++DstSublane) {
14653         if (!isUndefOrEqual(CrossLaneMaskLarge[DstSublane], SrcSublane))
14654           continue;
14655 
14656         Found = true;
14657         CrossLaneMaskLarge[DstSublane] = SrcSublane;
14658         int DstSublaneOffset = DstSublane * NumEltsPerSublane;
14659         InLaneMask[i] = DstSublaneOffset + M % NumEltsPerSublane;
14660         break;
14661       }
14662       if (!Found)
14663         return SDValue();
14664     }
14665 
14666     // Fill CrossLaneMask using CrossLaneMaskLarge.
14667     narrowShuffleMaskElts(NumEltsPerSublane, CrossLaneMaskLarge, CrossLaneMask);
14668 
14669     if (!CanUseSublanes) {
14670       // If we're only shuffling a single lowest lane and the rest are identity
14671       // then don't bother.
14672       // TODO - isShuffleMaskInputInPlace could be extended to something like
14673       // this.
14674       int NumIdentityLanes = 0;
14675       bool OnlyShuffleLowestLane = true;
14676       for (int i = 0; i != NumLanes; ++i) {
14677         int LaneOffset = i * NumEltsPerLane;
14678         if (isSequentialOrUndefInRange(InLaneMask, LaneOffset, NumEltsPerLane,
14679                                        i * NumEltsPerLane))
14680           NumIdentityLanes++;
14681         else if (CrossLaneMask[LaneOffset] != 0)
14682           OnlyShuffleLowestLane = false;
14683       }
14684       if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
14685         return SDValue();
14686     }
14687 
14688     // Avoid returning the same shuffle operation. For example,
14689     // t7: v16i16 = vector_shuffle<8,9,10,11,4,5,6,7,0,1,2,3,12,13,14,15> t5,
14690     //                             undef:v16i16
14691     if (CrossLaneMask == Mask || InLaneMask == Mask)
14692       return SDValue();
14693 
14694     SDValue CrossLane = DAG.getVectorShuffle(VT, DL, V1, V2, CrossLaneMask);
14695     return DAG.getVectorShuffle(VT, DL, CrossLane, DAG.getUNDEF(VT),
14696                                 InLaneMask);
14697   };
14698 
14699   // First attempt a solution with full lanes.
14700   if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes))
14701     return V;
14702 
14703   // The rest of the solutions use sublanes.
14704   if (!CanUseSublanes)
14705     return SDValue();
14706 
14707   // Then attempt a solution with 64-bit sublanes (vpermq).
14708   if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes * 2))
14709     return V;
14710 
14711   // If that doesn't work and we have fast variable cross-lane shuffle,
14712   // attempt 32-bit sublanes (vpermd).
14713   if (!Subtarget.hasFastVariableCrossLaneShuffle())
14714     return SDValue();
14715 
14716   return getSublanePermute(/*NumSublanes=*/NumLanes * 4);
14717 }
14718 
14719 /// Helper to get compute inlane shuffle mask for a complete shuffle mask.
14720 static void computeInLaneShuffleMask(const ArrayRef<int> &Mask, int LaneSize,
14721                                      SmallVector<int> &InLaneMask) {
14722   int Size = Mask.size();
14723   InLaneMask.assign(Mask.begin(), Mask.end());
14724   for (int i = 0; i < Size; ++i) {
14725     int &M = InLaneMask[i];
14726     if (M < 0)
14727       continue;
14728     if (((M % Size) / LaneSize) != (i / LaneSize))
14729       M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
14730   }
14731 }
14732 
14733 /// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
14734 /// source with a lane permutation.
14735 ///
14736 /// This lowering strategy results in four instructions in the worst case for a
14737 /// single-input cross lane shuffle which is lower than any other fully general
14738 /// cross-lane shuffle strategy I'm aware of. Special cases for each particular
14739 /// shuffle pattern should be handled prior to trying this lowering.
14740 static SDValue lowerShuffleAsLanePermuteAndShuffle(
14741     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14742     SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14743   // FIXME: This should probably be generalized for 512-bit vectors as well.
14744   assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
14745   int Size = Mask.size();
14746   int LaneSize = Size / 2;
14747 
14748   // Fold to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
14749   // Only do this if the elements aren't all from the lower lane,
14750   // otherwise we're (probably) better off doing a split.
14751   if (VT == MVT::v4f64 &&
14752       !all_of(Mask, [LaneSize](int M) { return M < LaneSize; }))
14753     return lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG);
14754 
14755   // If there are only inputs from one 128-bit lane, splitting will in fact be
14756   // less expensive. The flags track whether the given lane contains an element
14757   // that crosses to another lane.
14758   bool AllLanes;
14759   if (!Subtarget.hasAVX2()) {
14760     bool LaneCrossing[2] = {false, false};
14761     for (int i = 0; i < Size; ++i)
14762       if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
14763         LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
14764     AllLanes = LaneCrossing[0] && LaneCrossing[1];
14765   } else {
14766     bool LaneUsed[2] = {false, false};
14767     for (int i = 0; i < Size; ++i)
14768       if (Mask[i] >= 0)
14769         LaneUsed[(Mask[i] % Size) / LaneSize] = true;
14770     AllLanes = LaneUsed[0] && LaneUsed[1];
14771   }
14772 
14773   // TODO - we could support shuffling V2 in the Flipped input.
14774   assert(V2.isUndef() &&
14775          "This last part of this routine only works on single input shuffles");
14776 
14777   SmallVector<int> InLaneMask;
14778   computeInLaneShuffleMask(Mask, Mask.size() / 2, InLaneMask);
14779 
14780   assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
14781          "In-lane shuffle mask expected");
14782 
14783   // If we're not using both lanes in each lane and the inlane mask is not
14784   // repeating, then we're better off splitting.
14785   if (!AllLanes && !is128BitLaneRepeatedShuffleMask(VT, InLaneMask))
14786     return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG,
14787                                 /*SimpleOnly*/ false);
14788 
14789   // Flip the lanes, and shuffle the results which should now be in-lane.
14790   MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
14791   SDValue Flipped = DAG.getBitcast(PVT, V1);
14792   Flipped =
14793       DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
14794   Flipped = DAG.getBitcast(VT, Flipped);
14795   return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
14796 }
14797 
14798 /// Handle lowering 2-lane 128-bit shuffles.
14799 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
14800                                   SDValue V2, ArrayRef<int> Mask,
14801                                   const APInt &Zeroable,
14802                                   const X86Subtarget &Subtarget,
14803                                   SelectionDAG &DAG) {
14804   if (V2.isUndef()) {
14805     // Attempt to match VBROADCAST*128 subvector broadcast load.
14806     bool SplatLo = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1);
14807     bool SplatHi = isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1);
14808     if ((SplatLo || SplatHi) && !Subtarget.hasAVX512() && V1.hasOneUse() &&
14809         X86::mayFoldLoad(peekThroughOneUseBitcasts(V1), Subtarget)) {
14810       MVT MemVT = VT.getHalfNumVectorElementsVT();
14811       unsigned Ofs = SplatLo ? 0 : MemVT.getStoreSize();
14812       auto *Ld = cast<LoadSDNode>(peekThroughOneUseBitcasts(V1));
14813       if (SDValue BcstLd = getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, DL,
14814                                              VT, MemVT, Ld, Ofs, DAG))
14815         return BcstLd;
14816     }
14817 
14818     // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
14819     if (Subtarget.hasAVX2())
14820       return SDValue();
14821   }
14822 
14823   bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
14824 
14825   SmallVector<int, 4> WidenedMask;
14826   if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
14827     return SDValue();
14828 
14829   bool IsLowZero = (Zeroable & 0x3) == 0x3;
14830   bool IsHighZero = (Zeroable & 0xc) == 0xc;
14831 
14832   // Try to use an insert into a zero vector.
14833   if (WidenedMask[0] == 0 && IsHighZero) {
14834     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14835     SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
14836                               DAG.getIntPtrConstant(0, DL));
14837     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
14838                        getZeroVector(VT, Subtarget, DAG, DL), LoV,
14839                        DAG.getIntPtrConstant(0, DL));
14840   }
14841 
14842   // TODO: If minimizing size and one of the inputs is a zero vector and the
14843   // the zero vector has only one use, we could use a VPERM2X128 to save the
14844   // instruction bytes needed to explicitly generate the zero vector.
14845 
14846   // Blends are faster and handle all the non-lane-crossing cases.
14847   if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
14848                                           Subtarget, DAG))
14849     return Blend;
14850 
14851   // If either input operand is a zero vector, use VPERM2X128 because its mask
14852   // allows us to replace the zero input with an implicit zero.
14853   if (!IsLowZero && !IsHighZero) {
14854     // Check for patterns which can be matched with a single insert of a 128-bit
14855     // subvector.
14856     bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2);
14857     if (OnlyUsesV1 || isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2)) {
14858 
14859       // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
14860       // this will likely become vinsertf128 which can't fold a 256-bit memop.
14861       if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
14862         MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14863         SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
14864                                      OnlyUsesV1 ? V1 : V2,
14865                                      DAG.getIntPtrConstant(0, DL));
14866         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
14867                            DAG.getIntPtrConstant(2, DL));
14868       }
14869     }
14870 
14871     // Try to use SHUF128 if possible.
14872     if (Subtarget.hasVLX()) {
14873       if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
14874         unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
14875                             ((WidenedMask[1] % 2) << 1);
14876         return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
14877                            DAG.getTargetConstant(PermMask, DL, MVT::i8));
14878       }
14879     }
14880   }
14881 
14882   // Otherwise form a 128-bit permutation. After accounting for undefs,
14883   // convert the 64-bit shuffle mask selection values into 128-bit
14884   // selection bits by dividing the indexes by 2 and shifting into positions
14885   // defined by a vperm2*128 instruction's immediate control byte.
14886 
14887   // The immediate permute control byte looks like this:
14888   //    [1:0] - select 128 bits from sources for low half of destination
14889   //    [2]   - ignore
14890   //    [3]   - zero low half of destination
14891   //    [5:4] - select 128 bits from sources for high half of destination
14892   //    [6]   - ignore
14893   //    [7]   - zero high half of destination
14894 
14895   assert((WidenedMask[0] >= 0 || IsLowZero) &&
14896          (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
14897 
14898   unsigned PermMask = 0;
14899   PermMask |= IsLowZero  ? 0x08 : (WidenedMask[0] << 0);
14900   PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
14901 
14902   // Check the immediate mask and replace unused sources with undef.
14903   if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
14904     V1 = DAG.getUNDEF(VT);
14905   if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
14906     V2 = DAG.getUNDEF(VT);
14907 
14908   return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
14909                      DAG.getTargetConstant(PermMask, DL, MVT::i8));
14910 }
14911 
14912 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
14913 /// shuffling each lane.
14914 ///
14915 /// This attempts to create a repeated lane shuffle where each lane uses one
14916 /// or two of the lanes of the inputs. The lanes of the input vectors are
14917 /// shuffled in one or two independent shuffles to get the lanes into the
14918 /// position needed by the final shuffle.
14919 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
14920     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14921     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14922   assert(!V2.isUndef() && "This is only useful with multiple inputs.");
14923 
14924   if (is128BitLaneRepeatedShuffleMask(VT, Mask))
14925     return SDValue();
14926 
14927   int NumElts = Mask.size();
14928   int NumLanes = VT.getSizeInBits() / 128;
14929   int NumLaneElts = 128 / VT.getScalarSizeInBits();
14930   SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
14931   SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
14932 
14933   // First pass will try to fill in the RepeatMask from lanes that need two
14934   // sources.
14935   for (int Lane = 0; Lane != NumLanes; ++Lane) {
14936     int Srcs[2] = {-1, -1};
14937     SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
14938     for (int i = 0; i != NumLaneElts; ++i) {
14939       int M = Mask[(Lane * NumLaneElts) + i];
14940       if (M < 0)
14941         continue;
14942       // Determine which of the possible input lanes (NumLanes from each source)
14943       // this element comes from. Assign that as one of the sources for this
14944       // lane. We can assign up to 2 sources for this lane. If we run out
14945       // sources we can't do anything.
14946       int LaneSrc = M / NumLaneElts;
14947       int Src;
14948       if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
14949         Src = 0;
14950       else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
14951         Src = 1;
14952       else
14953         return SDValue();
14954 
14955       Srcs[Src] = LaneSrc;
14956       InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
14957     }
14958 
14959     // If this lane has two sources, see if it fits with the repeat mask so far.
14960     if (Srcs[1] < 0)
14961       continue;
14962 
14963     LaneSrcs[Lane][0] = Srcs[0];
14964     LaneSrcs[Lane][1] = Srcs[1];
14965 
14966     auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
14967       assert(M1.size() == M2.size() && "Unexpected mask size");
14968       for (int i = 0, e = M1.size(); i != e; ++i)
14969         if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
14970           return false;
14971       return true;
14972     };
14973 
14974     auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
14975       assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
14976       for (int i = 0, e = MergedMask.size(); i != e; ++i) {
14977         int M = Mask[i];
14978         if (M < 0)
14979           continue;
14980         assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
14981                "Unexpected mask element");
14982         MergedMask[i] = M;
14983       }
14984     };
14985 
14986     if (MatchMasks(InLaneMask, RepeatMask)) {
14987       // Merge this lane mask into the final repeat mask.
14988       MergeMasks(InLaneMask, RepeatMask);
14989       continue;
14990     }
14991 
14992     // Didn't find a match. Swap the operands and try again.
14993     std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
14994     ShuffleVectorSDNode::commuteMask(InLaneMask);
14995 
14996     if (MatchMasks(InLaneMask, RepeatMask)) {
14997       // Merge this lane mask into the final repeat mask.
14998       MergeMasks(InLaneMask, RepeatMask);
14999       continue;
15000     }
15001 
15002     // Couldn't find a match with the operands in either order.
15003     return SDValue();
15004   }
15005 
15006   // Now handle any lanes with only one source.
15007   for (int Lane = 0; Lane != NumLanes; ++Lane) {
15008     // If this lane has already been processed, skip it.
15009     if (LaneSrcs[Lane][0] >= 0)
15010       continue;
15011 
15012     for (int i = 0; i != NumLaneElts; ++i) {
15013       int M = Mask[(Lane * NumLaneElts) + i];
15014       if (M < 0)
15015         continue;
15016 
15017       // If RepeatMask isn't defined yet we can define it ourself.
15018       if (RepeatMask[i] < 0)
15019         RepeatMask[i] = M % NumLaneElts;
15020 
15021       if (RepeatMask[i] < NumElts) {
15022         if (RepeatMask[i] != M % NumLaneElts)
15023           return SDValue();
15024         LaneSrcs[Lane][0] = M / NumLaneElts;
15025       } else {
15026         if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
15027           return SDValue();
15028         LaneSrcs[Lane][1] = M / NumLaneElts;
15029       }
15030     }
15031 
15032     if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
15033       return SDValue();
15034   }
15035 
15036   SmallVector<int, 16> NewMask(NumElts, -1);
15037   for (int Lane = 0; Lane != NumLanes; ++Lane) {
15038     int Src = LaneSrcs[Lane][0];
15039     for (int i = 0; i != NumLaneElts; ++i) {
15040       int M = -1;
15041       if (Src >= 0)
15042         M = Src * NumLaneElts + i;
15043       NewMask[Lane * NumLaneElts + i] = M;
15044     }
15045   }
15046   SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15047   // Ensure we didn't get back the shuffle we started with.
15048   // FIXME: This is a hack to make up for some splat handling code in
15049   // getVectorShuffle.
15050   if (isa<ShuffleVectorSDNode>(NewV1) &&
15051       cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
15052     return SDValue();
15053 
15054   for (int Lane = 0; Lane != NumLanes; ++Lane) {
15055     int Src = LaneSrcs[Lane][1];
15056     for (int i = 0; i != NumLaneElts; ++i) {
15057       int M = -1;
15058       if (Src >= 0)
15059         M = Src * NumLaneElts + i;
15060       NewMask[Lane * NumLaneElts + i] = M;
15061     }
15062   }
15063   SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15064   // Ensure we didn't get back the shuffle we started with.
15065   // FIXME: This is a hack to make up for some splat handling code in
15066   // getVectorShuffle.
15067   if (isa<ShuffleVectorSDNode>(NewV2) &&
15068       cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
15069     return SDValue();
15070 
15071   for (int i = 0; i != NumElts; ++i) {
15072     if (Mask[i] < 0) {
15073       NewMask[i] = -1;
15074       continue;
15075     }
15076     NewMask[i] = RepeatMask[i % NumLaneElts];
15077     if (NewMask[i] < 0)
15078       continue;
15079 
15080     NewMask[i] += (i / NumLaneElts) * NumLaneElts;
15081   }
15082   return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
15083 }
15084 
15085 /// If the input shuffle mask results in a vector that is undefined in all upper
15086 /// or lower half elements and that mask accesses only 2 halves of the
15087 /// shuffle's operands, return true. A mask of half the width with mask indexes
15088 /// adjusted to access the extracted halves of the original shuffle operands is
15089 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
15090 /// lower half of each input operand is accessed.
15091 static bool
15092 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
15093                    int &HalfIdx1, int &HalfIdx2) {
15094   assert((Mask.size() == HalfMask.size() * 2) &&
15095          "Expected input mask to be twice as long as output");
15096 
15097   // Exactly one half of the result must be undef to allow narrowing.
15098   bool UndefLower = isUndefLowerHalf(Mask);
15099   bool UndefUpper = isUndefUpperHalf(Mask);
15100   if (UndefLower == UndefUpper)
15101     return false;
15102 
15103   unsigned HalfNumElts = HalfMask.size();
15104   unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
15105   HalfIdx1 = -1;
15106   HalfIdx2 = -1;
15107   for (unsigned i = 0; i != HalfNumElts; ++i) {
15108     int M = Mask[i + MaskIndexOffset];
15109     if (M < 0) {
15110       HalfMask[i] = M;
15111       continue;
15112     }
15113 
15114     // Determine which of the 4 half vectors this element is from.
15115     // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
15116     int HalfIdx = M / HalfNumElts;
15117 
15118     // Determine the element index into its half vector source.
15119     int HalfElt = M % HalfNumElts;
15120 
15121     // We can shuffle with up to 2 half vectors, set the new 'half'
15122     // shuffle mask accordingly.
15123     if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
15124       HalfMask[i] = HalfElt;
15125       HalfIdx1 = HalfIdx;
15126       continue;
15127     }
15128     if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
15129       HalfMask[i] = HalfElt + HalfNumElts;
15130       HalfIdx2 = HalfIdx;
15131       continue;
15132     }
15133 
15134     // Too many half vectors referenced.
15135     return false;
15136   }
15137 
15138   return true;
15139 }
15140 
15141 /// Given the output values from getHalfShuffleMask(), create a half width
15142 /// shuffle of extracted vectors followed by an insert back to full width.
15143 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
15144                                      ArrayRef<int> HalfMask, int HalfIdx1,
15145                                      int HalfIdx2, bool UndefLower,
15146                                      SelectionDAG &DAG, bool UseConcat = false) {
15147   assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
15148   assert(V1.getValueType().isSimple() && "Expecting only simple types");
15149 
15150   MVT VT = V1.getSimpleValueType();
15151   MVT HalfVT = VT.getHalfNumVectorElementsVT();
15152   unsigned HalfNumElts = HalfVT.getVectorNumElements();
15153 
15154   auto getHalfVector = [&](int HalfIdx) {
15155     if (HalfIdx < 0)
15156       return DAG.getUNDEF(HalfVT);
15157     SDValue V = (HalfIdx < 2 ? V1 : V2);
15158     HalfIdx = (HalfIdx % 2) * HalfNumElts;
15159     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
15160                        DAG.getIntPtrConstant(HalfIdx, DL));
15161   };
15162 
15163   // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
15164   SDValue Half1 = getHalfVector(HalfIdx1);
15165   SDValue Half2 = getHalfVector(HalfIdx2);
15166   SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
15167   if (UseConcat) {
15168     SDValue Op0 = V;
15169     SDValue Op1 = DAG.getUNDEF(HalfVT);
15170     if (UndefLower)
15171       std::swap(Op0, Op1);
15172     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
15173   }
15174 
15175   unsigned Offset = UndefLower ? HalfNumElts : 0;
15176   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
15177                      DAG.getIntPtrConstant(Offset, DL));
15178 }
15179 
15180 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
15181 /// This allows for fast cases such as subvector extraction/insertion
15182 /// or shuffling smaller vector types which can lower more efficiently.
15183 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
15184                                          SDValue V2, ArrayRef<int> Mask,
15185                                          const X86Subtarget &Subtarget,
15186                                          SelectionDAG &DAG) {
15187   assert((VT.is256BitVector() || VT.is512BitVector()) &&
15188          "Expected 256-bit or 512-bit vector");
15189 
15190   bool UndefLower = isUndefLowerHalf(Mask);
15191   if (!UndefLower && !isUndefUpperHalf(Mask))
15192     return SDValue();
15193 
15194   assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
15195          "Completely undef shuffle mask should have been simplified already");
15196 
15197   // Upper half is undef and lower half is whole upper subvector.
15198   // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
15199   MVT HalfVT = VT.getHalfNumVectorElementsVT();
15200   unsigned HalfNumElts = HalfVT.getVectorNumElements();
15201   if (!UndefLower &&
15202       isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
15203     SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15204                              DAG.getIntPtrConstant(HalfNumElts, DL));
15205     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15206                        DAG.getIntPtrConstant(0, DL));
15207   }
15208 
15209   // Lower half is undef and upper half is whole lower subvector.
15210   // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
15211   if (UndefLower &&
15212       isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
15213     SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15214                              DAG.getIntPtrConstant(0, DL));
15215     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15216                        DAG.getIntPtrConstant(HalfNumElts, DL));
15217   }
15218 
15219   int HalfIdx1, HalfIdx2;
15220   SmallVector<int, 8> HalfMask(HalfNumElts);
15221   if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
15222     return SDValue();
15223 
15224   assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
15225 
15226   // Only shuffle the halves of the inputs when useful.
15227   unsigned NumLowerHalves =
15228       (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
15229   unsigned NumUpperHalves =
15230       (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
15231   assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
15232 
15233   // Determine the larger pattern of undef/halves, then decide if it's worth
15234   // splitting the shuffle based on subtarget capabilities and types.
15235   unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
15236   if (!UndefLower) {
15237     // XXXXuuuu: no insert is needed.
15238     // Always extract lowers when setting lower - these are all free subreg ops.
15239     if (NumUpperHalves == 0)
15240       return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15241                                    UndefLower, DAG);
15242 
15243     if (NumUpperHalves == 1) {
15244       // AVX2 has efficient 32/64-bit element cross-lane shuffles.
15245       if (Subtarget.hasAVX2()) {
15246         // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
15247         if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
15248             !is128BitUnpackShuffleMask(HalfMask, DAG) &&
15249             (!isSingleSHUFPSMask(HalfMask) ||
15250              Subtarget.hasFastVariableCrossLaneShuffle()))
15251           return SDValue();
15252         // If this is a unary shuffle (assume that the 2nd operand is
15253         // canonicalized to undef), then we can use vpermpd. Otherwise, we
15254         // are better off extracting the upper half of 1 operand and using a
15255         // narrow shuffle.
15256         if (EltWidth == 64 && V2.isUndef())
15257           return SDValue();
15258       }
15259       // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15260       if (Subtarget.hasAVX512() && VT.is512BitVector())
15261         return SDValue();
15262       // Extract + narrow shuffle is better than the wide alternative.
15263       return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15264                                    UndefLower, DAG);
15265     }
15266 
15267     // Don't extract both uppers, instead shuffle and then extract.
15268     assert(NumUpperHalves == 2 && "Half vector count went wrong");
15269     return SDValue();
15270   }
15271 
15272   // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
15273   if (NumUpperHalves == 0) {
15274     // AVX2 has efficient 64-bit element cross-lane shuffles.
15275     // TODO: Refine to account for unary shuffle, splat, and other masks?
15276     if (Subtarget.hasAVX2() && EltWidth == 64)
15277       return SDValue();
15278     // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15279     if (Subtarget.hasAVX512() && VT.is512BitVector())
15280       return SDValue();
15281     // Narrow shuffle + insert is better than the wide alternative.
15282     return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15283                                  UndefLower, DAG);
15284   }
15285 
15286   // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
15287   return SDValue();
15288 }
15289 
15290 /// Handle case where shuffle sources are coming from the same 128-bit lane and
15291 /// every lane can be represented as the same repeating mask - allowing us to
15292 /// shuffle the sources with the repeating shuffle and then permute the result
15293 /// to the destination lanes.
15294 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
15295     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15296     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15297   int NumElts = VT.getVectorNumElements();
15298   int NumLanes = VT.getSizeInBits() / 128;
15299   int NumLaneElts = NumElts / NumLanes;
15300 
15301   // On AVX2 we may be able to just shuffle the lowest elements and then
15302   // broadcast the result.
15303   if (Subtarget.hasAVX2()) {
15304     for (unsigned BroadcastSize : {16, 32, 64}) {
15305       if (BroadcastSize <= VT.getScalarSizeInBits())
15306         continue;
15307       int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
15308 
15309       // Attempt to match a repeating pattern every NumBroadcastElts,
15310       // accounting for UNDEFs but only references the lowest 128-bit
15311       // lane of the inputs.
15312       auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
15313         for (int i = 0; i != NumElts; i += NumBroadcastElts)
15314           for (int j = 0; j != NumBroadcastElts; ++j) {
15315             int M = Mask[i + j];
15316             if (M < 0)
15317               continue;
15318             int &R = RepeatMask[j];
15319             if (0 != ((M % NumElts) / NumLaneElts))
15320               return false;
15321             if (0 <= R && R != M)
15322               return false;
15323             R = M;
15324           }
15325         return true;
15326       };
15327 
15328       SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
15329       if (!FindRepeatingBroadcastMask(RepeatMask))
15330         continue;
15331 
15332       // Shuffle the (lowest) repeated elements in place for broadcast.
15333       SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
15334 
15335       // Shuffle the actual broadcast.
15336       SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
15337       for (int i = 0; i != NumElts; i += NumBroadcastElts)
15338         for (int j = 0; j != NumBroadcastElts; ++j)
15339           BroadcastMask[i + j] = j;
15340 
15341       // Avoid returning the same shuffle operation. For example,
15342       // v8i32 = vector_shuffle<0,1,0,1,0,1,0,1> t5, undef:v8i32
15343       if (BroadcastMask == Mask)
15344         return SDValue();
15345 
15346       return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
15347                                   BroadcastMask);
15348     }
15349   }
15350 
15351   // Bail if the shuffle mask doesn't cross 128-bit lanes.
15352   if (!is128BitLaneCrossingShuffleMask(VT, Mask))
15353     return SDValue();
15354 
15355   // Bail if we already have a repeated lane shuffle mask.
15356   if (is128BitLaneRepeatedShuffleMask(VT, Mask))
15357     return SDValue();
15358 
15359   // Helper to look for repeated mask in each split sublane, and that those
15360   // sublanes can then be permuted into place.
15361   auto ShuffleSubLanes = [&](int SubLaneScale) {
15362     int NumSubLanes = NumLanes * SubLaneScale;
15363     int NumSubLaneElts = NumLaneElts / SubLaneScale;
15364 
15365     // Check that all the sources are coming from the same lane and see if we
15366     // can form a repeating shuffle mask (local to each sub-lane). At the same
15367     // time, determine the source sub-lane for each destination sub-lane.
15368     int TopSrcSubLane = -1;
15369     SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
15370     SmallVector<SmallVector<int, 8>> RepeatedSubLaneMasks(
15371         SubLaneScale,
15372         SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef));
15373 
15374     for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
15375       // Extract the sub-lane mask, check that it all comes from the same lane
15376       // and normalize the mask entries to come from the first lane.
15377       int SrcLane = -1;
15378       SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
15379       for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15380         int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
15381         if (M < 0)
15382           continue;
15383         int Lane = (M % NumElts) / NumLaneElts;
15384         if ((0 <= SrcLane) && (SrcLane != Lane))
15385           return SDValue();
15386         SrcLane = Lane;
15387         int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
15388         SubLaneMask[Elt] = LocalM;
15389       }
15390 
15391       // Whole sub-lane is UNDEF.
15392       if (SrcLane < 0)
15393         continue;
15394 
15395       // Attempt to match against the candidate repeated sub-lane masks.
15396       for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
15397         auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
15398           for (int i = 0; i != NumSubLaneElts; ++i) {
15399             if (M1[i] < 0 || M2[i] < 0)
15400               continue;
15401             if (M1[i] != M2[i])
15402               return false;
15403           }
15404           return true;
15405         };
15406 
15407         auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
15408         if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
15409           continue;
15410 
15411         // Merge the sub-lane mask into the matching repeated sub-lane mask.
15412         for (int i = 0; i != NumSubLaneElts; ++i) {
15413           int M = SubLaneMask[i];
15414           if (M < 0)
15415             continue;
15416           assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
15417                  "Unexpected mask element");
15418           RepeatedSubLaneMask[i] = M;
15419         }
15420 
15421         // Track the top most source sub-lane - by setting the remaining to
15422         // UNDEF we can greatly simplify shuffle matching.
15423         int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
15424         TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
15425         Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
15426         break;
15427       }
15428 
15429       // Bail if we failed to find a matching repeated sub-lane mask.
15430       if (Dst2SrcSubLanes[DstSubLane] < 0)
15431         return SDValue();
15432     }
15433     assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
15434            "Unexpected source lane");
15435 
15436     // Create a repeating shuffle mask for the entire vector.
15437     SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
15438     for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
15439       int Lane = SubLane / SubLaneScale;
15440       auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
15441       for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15442         int M = RepeatedSubLaneMask[Elt];
15443         if (M < 0)
15444           continue;
15445         int Idx = (SubLane * NumSubLaneElts) + Elt;
15446         RepeatedMask[Idx] = M + (Lane * NumLaneElts);
15447       }
15448     }
15449 
15450     // Shuffle each source sub-lane to its destination.
15451     SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
15452     for (int i = 0; i != NumElts; i += NumSubLaneElts) {
15453       int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
15454       if (SrcSubLane < 0)
15455         continue;
15456       for (int j = 0; j != NumSubLaneElts; ++j)
15457         SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
15458     }
15459 
15460     // Avoid returning the same shuffle operation.
15461     // v8i32 = vector_shuffle<0,1,4,5,2,3,6,7> t5, undef:v8i32
15462     if (RepeatedMask == Mask || SubLaneMask == Mask)
15463       return SDValue();
15464 
15465     SDValue RepeatedShuffle =
15466         DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
15467 
15468     return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
15469                                 SubLaneMask);
15470   };
15471 
15472   // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
15473   // (with PERMQ/PERMPD). On AVX2/AVX512BW targets, permuting 32-bit sub-lanes,
15474   // even with a variable shuffle, can be worth it for v32i8/v64i8 vectors.
15475   // Otherwise we can only permute whole 128-bit lanes.
15476   int MinSubLaneScale = 1, MaxSubLaneScale = 1;
15477   if (Subtarget.hasAVX2() && VT.is256BitVector()) {
15478     bool OnlyLowestElts = isUndefOrInRange(Mask, 0, NumLaneElts);
15479     MinSubLaneScale = 2;
15480     MaxSubLaneScale =
15481         (!OnlyLowestElts && V2.isUndef() && VT == MVT::v32i8) ? 4 : 2;
15482   }
15483   if (Subtarget.hasBWI() && VT == MVT::v64i8)
15484     MinSubLaneScale = MaxSubLaneScale = 4;
15485 
15486   for (int Scale = MinSubLaneScale; Scale <= MaxSubLaneScale; Scale *= 2)
15487     if (SDValue Shuffle = ShuffleSubLanes(Scale))
15488       return Shuffle;
15489 
15490   return SDValue();
15491 }
15492 
15493 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
15494                                    bool &ForceV1Zero, bool &ForceV2Zero,
15495                                    unsigned &ShuffleImm, ArrayRef<int> Mask,
15496                                    const APInt &Zeroable) {
15497   int NumElts = VT.getVectorNumElements();
15498   assert(VT.getScalarSizeInBits() == 64 &&
15499          (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
15500          "Unexpected data type for VSHUFPD");
15501   assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
15502          "Illegal shuffle mask");
15503 
15504   bool ZeroLane[2] = { true, true };
15505   for (int i = 0; i < NumElts; ++i)
15506     ZeroLane[i & 1] &= Zeroable[i];
15507 
15508   // Mask for V8F64: 0/1,  8/9,  2/3,  10/11, 4/5, ..
15509   // Mask for V4F64; 0/1,  4/5,  2/3,  6/7..
15510   ShuffleImm = 0;
15511   bool ShufpdMask = true;
15512   bool CommutableMask = true;
15513   for (int i = 0; i < NumElts; ++i) {
15514     if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
15515       continue;
15516     if (Mask[i] < 0)
15517       return false;
15518     int Val = (i & 6) + NumElts * (i & 1);
15519     int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
15520     if (Mask[i] < Val || Mask[i] > Val + 1)
15521       ShufpdMask = false;
15522     if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
15523       CommutableMask = false;
15524     ShuffleImm |= (Mask[i] % 2) << i;
15525   }
15526 
15527   if (!ShufpdMask && !CommutableMask)
15528     return false;
15529 
15530   if (!ShufpdMask && CommutableMask)
15531     std::swap(V1, V2);
15532 
15533   ForceV1Zero = ZeroLane[0];
15534   ForceV2Zero = ZeroLane[1];
15535   return true;
15536 }
15537 
15538 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
15539                                       SDValue V2, ArrayRef<int> Mask,
15540                                       const APInt &Zeroable,
15541                                       const X86Subtarget &Subtarget,
15542                                       SelectionDAG &DAG) {
15543   assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
15544          "Unexpected data type for VSHUFPD");
15545 
15546   unsigned Immediate = 0;
15547   bool ForceV1Zero = false, ForceV2Zero = false;
15548   if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
15549                               Mask, Zeroable))
15550     return SDValue();
15551 
15552   // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
15553   if (ForceV1Zero)
15554     V1 = getZeroVector(VT, Subtarget, DAG, DL);
15555   if (ForceV2Zero)
15556     V2 = getZeroVector(VT, Subtarget, DAG, DL);
15557 
15558   return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15559                      DAG.getTargetConstant(Immediate, DL, MVT::i8));
15560 }
15561 
15562 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
15563 // by zeroable elements in the remaining 24 elements. Turn this into two
15564 // vmovqb instructions shuffled together.
15565 static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
15566                                              SDValue V1, SDValue V2,
15567                                              ArrayRef<int> Mask,
15568                                              const APInt &Zeroable,
15569                                              SelectionDAG &DAG) {
15570   assert(VT == MVT::v32i8 && "Unexpected type!");
15571 
15572   // The first 8 indices should be every 8th element.
15573   if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
15574     return SDValue();
15575 
15576   // Remaining elements need to be zeroable.
15577   if (Zeroable.countl_one() < (Mask.size() - 8))
15578     return SDValue();
15579 
15580   V1 = DAG.getBitcast(MVT::v4i64, V1);
15581   V2 = DAG.getBitcast(MVT::v4i64, V2);
15582 
15583   V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
15584   V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
15585 
15586   // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
15587   // the upper bits of the result using an unpckldq.
15588   SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
15589                                         { 0, 1, 2, 3, 16, 17, 18, 19,
15590                                           4, 5, 6, 7, 20, 21, 22, 23 });
15591   // Insert the unpckldq into a zero vector to widen to v32i8.
15592   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
15593                      DAG.getConstant(0, DL, MVT::v32i8), Unpack,
15594                      DAG.getIntPtrConstant(0, DL));
15595 }
15596 
15597 // a = shuffle v1, v2, mask1    ; interleaving lower lanes of v1 and v2
15598 // b = shuffle v1, v2, mask2    ; interleaving higher lanes of v1 and v2
15599 //     =>
15600 // ul = unpckl v1, v2
15601 // uh = unpckh v1, v2
15602 // a = vperm ul, uh
15603 // b = vperm ul, uh
15604 //
15605 // Pattern-match interleave(256b v1, 256b v2) -> 512b v3 and lower it into unpck
15606 // and permute. We cannot directly match v3 because it is split into two
15607 // 256-bit vectors in earlier isel stages. Therefore, this function matches a
15608 // pair of 256-bit shuffles and makes sure the masks are consecutive.
15609 //
15610 // Once unpck and permute nodes are created, the permute corresponding to this
15611 // shuffle is returned, while the other permute replaces the other half of the
15612 // shuffle in the selection dag.
15613 static SDValue lowerShufflePairAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
15614                                                  SDValue V1, SDValue V2,
15615                                                  ArrayRef<int> Mask,
15616                                                  SelectionDAG &DAG) {
15617   if (VT != MVT::v8f32 && VT != MVT::v8i32 && VT != MVT::v16i16 &&
15618       VT != MVT::v32i8)
15619     return SDValue();
15620   // <B0, B1, B0+1, B1+1, ..., >
15621   auto IsInterleavingPattern = [&](ArrayRef<int> Mask, unsigned Begin0,
15622                                    unsigned Begin1) {
15623     size_t Size = Mask.size();
15624     assert(Size % 2 == 0 && "Expected even mask size");
15625     for (unsigned I = 0; I < Size; I += 2) {
15626       if (Mask[I] != (int)(Begin0 + I / 2) ||
15627           Mask[I + 1] != (int)(Begin1 + I / 2))
15628         return false;
15629     }
15630     return true;
15631   };
15632   // Check which half is this shuffle node
15633   int NumElts = VT.getVectorNumElements();
15634   size_t FirstQtr = NumElts / 2;
15635   size_t ThirdQtr = NumElts + NumElts / 2;
15636   bool IsFirstHalf = IsInterleavingPattern(Mask, 0, NumElts);
15637   bool IsSecondHalf = IsInterleavingPattern(Mask, FirstQtr, ThirdQtr);
15638   if (!IsFirstHalf && !IsSecondHalf)
15639     return SDValue();
15640 
15641   // Find the intersection between shuffle users of V1 and V2.
15642   SmallVector<SDNode *, 2> Shuffles;
15643   for (SDNode *User : V1->uses())
15644     if (User->getOpcode() == ISD::VECTOR_SHUFFLE && User->getOperand(0) == V1 &&
15645         User->getOperand(1) == V2)
15646       Shuffles.push_back(User);
15647   // Limit user size to two for now.
15648   if (Shuffles.size() != 2)
15649     return SDValue();
15650   // Find out which half of the 512-bit shuffles is each smaller shuffle
15651   auto *SVN1 = cast<ShuffleVectorSDNode>(Shuffles[0]);
15652   auto *SVN2 = cast<ShuffleVectorSDNode>(Shuffles[1]);
15653   SDNode *FirstHalf;
15654   SDNode *SecondHalf;
15655   if (IsInterleavingPattern(SVN1->getMask(), 0, NumElts) &&
15656       IsInterleavingPattern(SVN2->getMask(), FirstQtr, ThirdQtr)) {
15657     FirstHalf = Shuffles[0];
15658     SecondHalf = Shuffles[1];
15659   } else if (IsInterleavingPattern(SVN1->getMask(), FirstQtr, ThirdQtr) &&
15660              IsInterleavingPattern(SVN2->getMask(), 0, NumElts)) {
15661     FirstHalf = Shuffles[1];
15662     SecondHalf = Shuffles[0];
15663   } else {
15664     return SDValue();
15665   }
15666   // Lower into unpck and perm. Return the perm of this shuffle and replace
15667   // the other.
15668   SDValue Unpckl = DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
15669   SDValue Unpckh = DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
15670   SDValue Perm1 = DAG.getNode(X86ISD::VPERM2X128, DL, VT, Unpckl, Unpckh,
15671                               DAG.getTargetConstant(0x20, DL, MVT::i8));
15672   SDValue Perm2 = DAG.getNode(X86ISD::VPERM2X128, DL, VT, Unpckl, Unpckh,
15673                               DAG.getTargetConstant(0x31, DL, MVT::i8));
15674   if (IsFirstHalf) {
15675     DAG.ReplaceAllUsesWith(SecondHalf, &Perm2);
15676     return Perm1;
15677   }
15678   DAG.ReplaceAllUsesWith(FirstHalf, &Perm1);
15679   return Perm2;
15680 }
15681 
15682 /// Handle lowering of 4-lane 64-bit floating point shuffles.
15683 ///
15684 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
15685 /// isn't available.
15686 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15687                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15688                                  const X86Subtarget &Subtarget,
15689                                  SelectionDAG &DAG) {
15690   assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15691   assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15692   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15693 
15694   if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
15695                                      Subtarget, DAG))
15696     return V;
15697 
15698   if (V2.isUndef()) {
15699     // Check for being able to broadcast a single element.
15700     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
15701                                                     Mask, Subtarget, DAG))
15702       return Broadcast;
15703 
15704     // Use low duplicate instructions for masks that match their pattern.
15705     if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
15706       return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
15707 
15708     if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
15709       // Non-half-crossing single input shuffles can be lowered with an
15710       // interleaved permutation.
15711       unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
15712                               ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
15713       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
15714                          DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
15715     }
15716 
15717     // With AVX2 we have direct support for this permutation.
15718     if (Subtarget.hasAVX2())
15719       return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
15720                          getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15721 
15722     // Try to create an in-lane repeating shuffle mask and then shuffle the
15723     // results into the target lanes.
15724     if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15725             DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15726       return V;
15727 
15728     // Try to permute the lanes and then use a per-lane permute.
15729     if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
15730                                                         Mask, DAG, Subtarget))
15731       return V;
15732 
15733     // Otherwise, fall back.
15734     return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
15735                                                DAG, Subtarget);
15736   }
15737 
15738   // Use dedicated unpack instructions for masks that match their pattern.
15739   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
15740     return V;
15741 
15742   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
15743                                           Zeroable, Subtarget, DAG))
15744     return Blend;
15745 
15746   // Check if the blend happens to exactly fit that of SHUFPD.
15747   if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
15748                                           Zeroable, Subtarget, DAG))
15749     return Op;
15750 
15751   bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
15752   bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
15753 
15754   // If we have lane crossing shuffles AND they don't all come from the lower
15755   // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15756   // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently
15757   // canonicalize to a blend of splat which isn't necessary for this combine.
15758   if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) &&
15759       !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) &&
15760       (V1.getOpcode() != ISD::BUILD_VECTOR) &&
15761       (V2.getOpcode() != ISD::BUILD_VECTOR))
15762     return lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2, Mask, DAG);
15763 
15764   // If we have one input in place, then we can permute the other input and
15765   // blend the result.
15766   if (V1IsInPlace || V2IsInPlace)
15767     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
15768                                                 Subtarget, DAG);
15769 
15770   // Try to create an in-lane repeating shuffle mask and then shuffle the
15771   // results into the target lanes.
15772   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15773           DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15774     return V;
15775 
15776   // Try to simplify this by merging 128-bit lanes to enable a lane-based
15777   // shuffle. However, if we have AVX2 and either inputs are already in place,
15778   // we will be able to shuffle even across lanes the other input in a single
15779   // instruction so skip this pattern.
15780   if (!(Subtarget.hasAVX2() && (V1IsInPlace || V2IsInPlace)))
15781     if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
15782             DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15783       return V;
15784 
15785   // If we have VLX support, we can use VEXPAND.
15786   if (Subtarget.hasVLX())
15787     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
15788                                          DAG, Subtarget))
15789       return V;
15790 
15791   // If we have AVX2 then we always want to lower with a blend because an v4 we
15792   // can fully permute the elements.
15793   if (Subtarget.hasAVX2())
15794     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
15795                                                 Subtarget, DAG);
15796 
15797   // Otherwise fall back on generic lowering.
15798   return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
15799                                     Subtarget, DAG);
15800 }
15801 
15802 /// Handle lowering of 4-lane 64-bit integer shuffles.
15803 ///
15804 /// This routine is only called when we have AVX2 and thus a reasonable
15805 /// instruction set for v4i64 shuffling..
15806 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15807                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15808                                  const X86Subtarget &Subtarget,
15809                                  SelectionDAG &DAG) {
15810   assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15811   assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15812   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15813   assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
15814 
15815   if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15816                                      Subtarget, DAG))
15817     return V;
15818 
15819   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
15820                                           Zeroable, Subtarget, DAG))
15821     return Blend;
15822 
15823   // Check for being able to broadcast a single element.
15824   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
15825                                                   Subtarget, DAG))
15826     return Broadcast;
15827 
15828   // Try to use shift instructions if fast.
15829   if (Subtarget.preferLowerShuffleAsShift())
15830     if (SDValue Shift =
15831             lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15832                                 Subtarget, DAG, /*BitwiseOnly*/ true))
15833       return Shift;
15834 
15835   if (V2.isUndef()) {
15836     // When the shuffle is mirrored between the 128-bit lanes of the unit, we
15837     // can use lower latency instructions that will operate on both lanes.
15838     SmallVector<int, 2> RepeatedMask;
15839     if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
15840       SmallVector<int, 4> PSHUFDMask;
15841       narrowShuffleMaskElts(2, RepeatedMask, PSHUFDMask);
15842       return DAG.getBitcast(
15843           MVT::v4i64,
15844           DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
15845                       DAG.getBitcast(MVT::v8i32, V1),
15846                       getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15847     }
15848 
15849     // AVX2 provides a direct instruction for permuting a single input across
15850     // lanes.
15851     return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
15852                        getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15853   }
15854 
15855   // Try to use shift instructions.
15856   if (SDValue Shift =
15857           lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, Zeroable, Subtarget,
15858                               DAG, /*BitwiseOnly*/ false))
15859     return Shift;
15860 
15861   // If we have VLX support, we can use VALIGN or VEXPAND.
15862   if (Subtarget.hasVLX()) {
15863     if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i64, V1, V2, Mask,
15864                                               Zeroable, Subtarget, DAG))
15865       return Rotate;
15866 
15867     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
15868                                          DAG, Subtarget))
15869       return V;
15870   }
15871 
15872   // Try to use PALIGNR.
15873   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
15874                                                 Subtarget, DAG))
15875     return Rotate;
15876 
15877   // Use dedicated unpack instructions for masks that match their pattern.
15878   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
15879     return V;
15880 
15881   bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
15882   bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
15883 
15884   // If we have one input in place, then we can permute the other input and
15885   // blend the result.
15886   if (V1IsInPlace || V2IsInPlace)
15887     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
15888                                                 Subtarget, DAG);
15889 
15890   // Try to create an in-lane repeating shuffle mask and then shuffle the
15891   // results into the target lanes.
15892   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15893           DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15894     return V;
15895 
15896   // Try to lower to PERMQ(BLENDD(V1,V2)).
15897   if (SDValue V =
15898           lowerShuffleAsBlendAndPermute(DL, MVT::v4i64, V1, V2, Mask, DAG))
15899     return V;
15900 
15901   // Try to simplify this by merging 128-bit lanes to enable a lane-based
15902   // shuffle. However, if we have AVX2 and either inputs are already in place,
15903   // we will be able to shuffle even across lanes the other input in a single
15904   // instruction so skip this pattern.
15905   if (!V1IsInPlace && !V2IsInPlace)
15906     if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15907             DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15908       return Result;
15909 
15910   // Otherwise fall back on generic blend lowering.
15911   return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
15912                                               Subtarget, DAG);
15913 }
15914 
15915 /// Handle lowering of 8-lane 32-bit floating point shuffles.
15916 ///
15917 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
15918 /// isn't available.
15919 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15920                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15921                                  const X86Subtarget &Subtarget,
15922                                  SelectionDAG &DAG) {
15923   assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
15924   assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
15925   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
15926 
15927   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
15928                                           Zeroable, Subtarget, DAG))
15929     return Blend;
15930 
15931   // Check for being able to broadcast a single element.
15932   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
15933                                                   Subtarget, DAG))
15934     return Broadcast;
15935 
15936   if (!Subtarget.hasAVX2()) {
15937     SmallVector<int> InLaneMask;
15938     computeInLaneShuffleMask(Mask, Mask.size() / 2, InLaneMask);
15939 
15940     if (!is128BitLaneRepeatedShuffleMask(MVT::v8f32, InLaneMask))
15941       if (SDValue R = splitAndLowerShuffle(DL, MVT::v8f32, V1, V2, Mask, DAG,
15942                                            /*SimpleOnly*/ true))
15943         return R;
15944   }
15945   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
15946                                                    Zeroable, Subtarget, DAG))
15947     return DAG.getBitcast(MVT::v8f32, ZExt);
15948 
15949   // If the shuffle mask is repeated in each 128-bit lane, we have many more
15950   // options to efficiently lower the shuffle.
15951   SmallVector<int, 4> RepeatedMask;
15952   if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
15953     assert(RepeatedMask.size() == 4 &&
15954            "Repeated masks must be half the mask width!");
15955 
15956     // Use even/odd duplicate instructions for masks that match their pattern.
15957     if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
15958       return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
15959     if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
15960       return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
15961 
15962     if (V2.isUndef())
15963       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
15964                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
15965 
15966     // Use dedicated unpack instructions for masks that match their pattern.
15967     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
15968       return V;
15969 
15970     // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
15971     // have already handled any direct blends.
15972     return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
15973   }
15974 
15975   // Try to create an in-lane repeating shuffle mask and then shuffle the
15976   // results into the target lanes.
15977   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15978           DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
15979     return V;
15980 
15981   // If we have a single input shuffle with different shuffle patterns in the
15982   // two 128-bit lanes use the variable mask to VPERMILPS.
15983   if (V2.isUndef()) {
15984     if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) {
15985       SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
15986       return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
15987     }
15988     if (Subtarget.hasAVX2()) {
15989       SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
15990       return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
15991     }
15992     // Otherwise, fall back.
15993     return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
15994                                                DAG, Subtarget);
15995   }
15996 
15997   // Try to simplify this by merging 128-bit lanes to enable a lane-based
15998   // shuffle.
15999   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16000           DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16001     return Result;
16002 
16003   // If we have VLX support, we can use VEXPAND.
16004   if (Subtarget.hasVLX())
16005     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
16006                                          DAG, Subtarget))
16007       return V;
16008 
16009   // Try to match an interleave of two v8f32s and lower them as unpck and
16010   // permutes using ymms. This needs to go before we try to split the vectors.
16011   //
16012   // TODO: Expand this to AVX1. Currently v8i32 is casted to v8f32 and hits
16013   // this path inadvertently.
16014   if (Subtarget.hasAVX2() && !Subtarget.hasAVX512())
16015     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v8f32, V1, V2,
16016                                                       Mask, DAG))
16017       return V;
16018 
16019   // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16020   // since after split we get a more efficient code using vpunpcklwd and
16021   // vpunpckhwd instrs than vblend.
16022   if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32, DAG))
16023     return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, Subtarget,
16024                                       DAG);
16025 
16026   // If we have AVX2 then we always want to lower with a blend because at v8 we
16027   // can fully permute the elements.
16028   if (Subtarget.hasAVX2())
16029     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8f32, V1, V2, Mask,
16030                                                 Subtarget, DAG);
16031 
16032   // Otherwise fall back on generic lowering.
16033   return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16034                                     Subtarget, DAG);
16035 }
16036 
16037 /// Handle lowering of 8-lane 32-bit integer shuffles.
16038 ///
16039 /// This routine is only called when we have AVX2 and thus a reasonable
16040 /// instruction set for v8i32 shuffling..
16041 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16042                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16043                                  const X86Subtarget &Subtarget,
16044                                  SelectionDAG &DAG) {
16045   assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16046   assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16047   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16048   assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
16049 
16050   int NumV2Elements = count_if(Mask, [](int M) { return M >= 8; });
16051 
16052   // Whenever we can lower this as a zext, that instruction is strictly faster
16053   // than any alternative. It also allows us to fold memory operands into the
16054   // shuffle in many cases.
16055   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
16056                                                    Zeroable, Subtarget, DAG))
16057     return ZExt;
16058 
16059   // Try to match an interleave of two v8i32s and lower them as unpck and
16060   // permutes using ymms. This needs to go before we try to split the vectors.
16061   if (!Subtarget.hasAVX512())
16062     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v8i32, V1, V2,
16063                                                       Mask, DAG))
16064       return V;
16065 
16066   // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16067   // since after split we get a more efficient code than vblend by using
16068   // vpunpcklwd and vpunpckhwd instrs.
16069   if (isUnpackWdShuffleMask(Mask, MVT::v8i32, DAG) && !V2.isUndef() &&
16070       !Subtarget.hasAVX512())
16071     return lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask, Subtarget,
16072                                       DAG);
16073 
16074   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
16075                                           Zeroable, Subtarget, DAG))
16076     return Blend;
16077 
16078   // Check for being able to broadcast a single element.
16079   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
16080                                                   Subtarget, DAG))
16081     return Broadcast;
16082 
16083   // Try to use shift instructions if fast.
16084   if (Subtarget.preferLowerShuffleAsShift()) {
16085     if (SDValue Shift =
16086             lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, Zeroable,
16087                                 Subtarget, DAG, /*BitwiseOnly*/ true))
16088       return Shift;
16089     if (NumV2Elements == 0)
16090       if (SDValue Rotate =
16091               lowerShuffleAsBitRotate(DL, MVT::v8i32, V1, Mask, Subtarget, DAG))
16092         return Rotate;
16093   }
16094 
16095   // If the shuffle mask is repeated in each 128-bit lane we can use more
16096   // efficient instructions that mirror the shuffles across the two 128-bit
16097   // lanes.
16098   SmallVector<int, 4> RepeatedMask;
16099   bool Is128BitLaneRepeatedShuffle =
16100       is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
16101   if (Is128BitLaneRepeatedShuffle) {
16102     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16103     if (V2.isUndef())
16104       return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
16105                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16106 
16107     // Use dedicated unpack instructions for masks that match their pattern.
16108     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
16109       return V;
16110   }
16111 
16112   // Try to use shift instructions.
16113   if (SDValue Shift =
16114           lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, Zeroable, Subtarget,
16115                               DAG, /*BitwiseOnly*/ false))
16116     return Shift;
16117 
16118   if (!Subtarget.preferLowerShuffleAsShift() && NumV2Elements == 0)
16119     if (SDValue Rotate =
16120             lowerShuffleAsBitRotate(DL, MVT::v8i32, V1, Mask, Subtarget, DAG))
16121       return Rotate;
16122 
16123   // If we have VLX support, we can use VALIGN or EXPAND.
16124   if (Subtarget.hasVLX()) {
16125     if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i32, V1, V2, Mask,
16126                                               Zeroable, Subtarget, DAG))
16127       return Rotate;
16128 
16129     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
16130                                          DAG, Subtarget))
16131       return V;
16132   }
16133 
16134   // Try to use byte rotation instructions.
16135   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
16136                                                 Subtarget, DAG))
16137     return Rotate;
16138 
16139   // Try to create an in-lane repeating shuffle mask and then shuffle the
16140   // results into the target lanes.
16141   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16142           DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16143     return V;
16144 
16145   if (V2.isUndef()) {
16146     // Try to produce a fixed cross-128-bit lane permute followed by unpack
16147     // because that should be faster than the variable permute alternatives.
16148     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v8i32, Mask, V1, V2, DAG))
16149       return V;
16150 
16151     // If the shuffle patterns aren't repeated but it's a single input, directly
16152     // generate a cross-lane VPERMD instruction.
16153     SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16154     return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
16155   }
16156 
16157   // Assume that a single SHUFPS is faster than an alternative sequence of
16158   // multiple instructions (even if the CPU has a domain penalty).
16159   // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16160   if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16161     SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
16162     SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
16163     SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
16164                                             CastV1, CastV2, DAG);
16165     return DAG.getBitcast(MVT::v8i32, ShufPS);
16166   }
16167 
16168   // Try to simplify this by merging 128-bit lanes to enable a lane-based
16169   // shuffle.
16170   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16171           DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16172     return Result;
16173 
16174   // Otherwise fall back on generic blend lowering.
16175   return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i32, V1, V2, Mask,
16176                                               Subtarget, DAG);
16177 }
16178 
16179 /// Handle lowering of 16-lane 16-bit integer shuffles.
16180 ///
16181 /// This routine is only called when we have AVX2 and thus a reasonable
16182 /// instruction set for v16i16 shuffling..
16183 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16184                                   const APInt &Zeroable, SDValue V1, SDValue V2,
16185                                   const X86Subtarget &Subtarget,
16186                                   SelectionDAG &DAG) {
16187   assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16188   assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16189   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16190   assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
16191 
16192   // Whenever we can lower this as a zext, that instruction is strictly faster
16193   // than any alternative. It also allows us to fold memory operands into the
16194   // shuffle in many cases.
16195   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16196           DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16197     return ZExt;
16198 
16199   // Check for being able to broadcast a single element.
16200   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
16201                                                   Subtarget, DAG))
16202     return Broadcast;
16203 
16204   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
16205                                           Zeroable, Subtarget, DAG))
16206     return Blend;
16207 
16208   // Use dedicated unpack instructions for masks that match their pattern.
16209   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
16210     return V;
16211 
16212   // Use dedicated pack instructions for masks that match their pattern.
16213   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
16214                                        Subtarget))
16215     return V;
16216 
16217   // Try to use lower using a truncation.
16218   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i16, V1, V2, Mask, Zeroable,
16219                                        Subtarget, DAG))
16220     return V;
16221 
16222   // Try to use shift instructions.
16223   if (SDValue Shift =
16224           lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, Zeroable,
16225                               Subtarget, DAG, /*BitwiseOnly*/ false))
16226     return Shift;
16227 
16228   // Try to use byte rotation instructions.
16229   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
16230                                                 Subtarget, DAG))
16231     return Rotate;
16232 
16233   // Try to create an in-lane repeating shuffle mask and then shuffle the
16234   // results into the target lanes.
16235   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16236           DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16237     return V;
16238 
16239   if (V2.isUndef()) {
16240     // Try to use bit rotation instructions.
16241     if (SDValue Rotate =
16242             lowerShuffleAsBitRotate(DL, MVT::v16i16, V1, Mask, Subtarget, DAG))
16243       return Rotate;
16244 
16245     // Try to produce a fixed cross-128-bit lane permute followed by unpack
16246     // because that should be faster than the variable permute alternatives.
16247     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v16i16, Mask, V1, V2, DAG))
16248       return V;
16249 
16250     // There are no generalized cross-lane shuffle operations available on i16
16251     // element types.
16252     if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
16253       if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16254               DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16255         return V;
16256 
16257       return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
16258                                                  DAG, Subtarget);
16259     }
16260 
16261     SmallVector<int, 8> RepeatedMask;
16262     if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
16263       // As this is a single-input shuffle, the repeated mask should be
16264       // a strictly valid v8i16 mask that we can pass through to the v8i16
16265       // lowering to handle even the v16 case.
16266       return lowerV8I16GeneralSingleInputShuffle(
16267           DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
16268     }
16269   }
16270 
16271   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
16272                                               Zeroable, Subtarget, DAG))
16273     return PSHUFB;
16274 
16275   // AVX512BW can lower to VPERMW (non-VLX will pad to v32i16).
16276   if (Subtarget.hasBWI())
16277     return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, Subtarget, DAG);
16278 
16279   // Try to simplify this by merging 128-bit lanes to enable a lane-based
16280   // shuffle.
16281   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16282           DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16283     return Result;
16284 
16285   // Try to permute the lanes and then use a per-lane permute.
16286   if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16287           DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16288     return V;
16289 
16290   // Try to match an interleave of two v16i16s and lower them as unpck and
16291   // permutes using ymms.
16292   if (!Subtarget.hasAVX512())
16293     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v16i16, V1, V2,
16294                                                       Mask, DAG))
16295       return V;
16296 
16297   // Otherwise fall back on generic lowering.
16298   return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
16299                                     Subtarget, DAG);
16300 }
16301 
16302 /// Handle lowering of 32-lane 8-bit integer shuffles.
16303 ///
16304 /// This routine is only called when we have AVX2 and thus a reasonable
16305 /// instruction set for v32i8 shuffling..
16306 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16307                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16308                                  const X86Subtarget &Subtarget,
16309                                  SelectionDAG &DAG) {
16310   assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16311   assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16312   assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16313   assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
16314 
16315   // Whenever we can lower this as a zext, that instruction is strictly faster
16316   // than any alternative. It also allows us to fold memory operands into the
16317   // shuffle in many cases.
16318   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
16319                                                    Zeroable, Subtarget, DAG))
16320     return ZExt;
16321 
16322   // Check for being able to broadcast a single element.
16323   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
16324                                                   Subtarget, DAG))
16325     return Broadcast;
16326 
16327   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
16328                                           Zeroable, Subtarget, DAG))
16329     return Blend;
16330 
16331   // Use dedicated unpack instructions for masks that match their pattern.
16332   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
16333     return V;
16334 
16335   // Use dedicated pack instructions for masks that match their pattern.
16336   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
16337                                        Subtarget))
16338     return V;
16339 
16340   // Try to use lower using a truncation.
16341   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v32i8, V1, V2, Mask, Zeroable,
16342                                        Subtarget, DAG))
16343     return V;
16344 
16345   // Try to use shift instructions.
16346   if (SDValue Shift =
16347           lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, Zeroable, Subtarget,
16348                               DAG, /*BitwiseOnly*/ false))
16349     return Shift;
16350 
16351   // Try to use byte rotation instructions.
16352   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
16353                                                 Subtarget, DAG))
16354     return Rotate;
16355 
16356   // Try to use bit rotation instructions.
16357   if (V2.isUndef())
16358     if (SDValue Rotate =
16359             lowerShuffleAsBitRotate(DL, MVT::v32i8, V1, Mask, Subtarget, DAG))
16360       return Rotate;
16361 
16362   // Try to create an in-lane repeating shuffle mask and then shuffle the
16363   // results into the target lanes.
16364   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16365           DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16366     return V;
16367 
16368   // There are no generalized cross-lane shuffle operations available on i8
16369   // element types.
16370   if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
16371     // Try to produce a fixed cross-128-bit lane permute followed by unpack
16372     // because that should be faster than the variable permute alternatives.
16373     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v32i8, Mask, V1, V2, DAG))
16374       return V;
16375 
16376     if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16377             DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16378       return V;
16379 
16380     return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
16381                                                DAG, Subtarget);
16382   }
16383 
16384   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
16385                                               Zeroable, Subtarget, DAG))
16386     return PSHUFB;
16387 
16388   // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
16389   if (Subtarget.hasVBMI())
16390     return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, Subtarget, DAG);
16391 
16392   // Try to simplify this by merging 128-bit lanes to enable a lane-based
16393   // shuffle.
16394   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16395           DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16396     return Result;
16397 
16398   // Try to permute the lanes and then use a per-lane permute.
16399   if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16400           DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16401     return V;
16402 
16403   // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
16404   // by zeroable elements in the remaining 24 elements. Turn this into two
16405   // vmovqb instructions shuffled together.
16406   if (Subtarget.hasVLX())
16407     if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
16408                                                   Mask, Zeroable, DAG))
16409       return V;
16410 
16411   // Try to match an interleave of two v32i8s and lower them as unpck and
16412   // permutes using ymms.
16413   if (!Subtarget.hasAVX512())
16414     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v32i8, V1, V2,
16415                                                       Mask, DAG))
16416       return V;
16417 
16418   // Otherwise fall back on generic lowering.
16419   return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
16420                                     Subtarget, DAG);
16421 }
16422 
16423 /// High-level routine to lower various 256-bit x86 vector shuffles.
16424 ///
16425 /// This routine either breaks down the specific type of a 256-bit x86 vector
16426 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
16427 /// together based on the available instructions.
16428 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
16429                                   SDValue V1, SDValue V2, const APInt &Zeroable,
16430                                   const X86Subtarget &Subtarget,
16431                                   SelectionDAG &DAG) {
16432   // If we have a single input to the zero element, insert that into V1 if we
16433   // can do so cheaply.
16434   int NumElts = VT.getVectorNumElements();
16435   int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16436 
16437   if (NumV2Elements == 1 && Mask[0] >= NumElts)
16438     if (SDValue Insertion = lowerShuffleAsElementInsertion(
16439             DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16440       return Insertion;
16441 
16442   // Handle special cases where the lower or upper half is UNDEF.
16443   if (SDValue V =
16444           lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16445     return V;
16446 
16447   // There is a really nice hard cut-over between AVX1 and AVX2 that means we
16448   // can check for those subtargets here and avoid much of the subtarget
16449   // querying in the per-vector-type lowering routines. With AVX1 we have
16450   // essentially *zero* ability to manipulate a 256-bit vector with integer
16451   // types. Since we'll use floating point types there eventually, just
16452   // immediately cast everything to a float and operate entirely in that domain.
16453   if (VT.isInteger() && !Subtarget.hasAVX2()) {
16454     int ElementBits = VT.getScalarSizeInBits();
16455     if (ElementBits < 32) {
16456       // No floating point type available, if we can't use the bit operations
16457       // for masking/blending then decompose into 128-bit vectors.
16458       if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
16459                                             Subtarget, DAG))
16460         return V;
16461       if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
16462         return V;
16463       return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG, /*SimpleOnly*/ false);
16464     }
16465 
16466     MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
16467                                 VT.getVectorNumElements());
16468     V1 = DAG.getBitcast(FpVT, V1);
16469     V2 = DAG.getBitcast(FpVT, V2);
16470     return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
16471   }
16472 
16473   if (VT == MVT::v16f16 || VT == MVT::v16bf16) {
16474     V1 = DAG.getBitcast(MVT::v16i16, V1);
16475     V2 = DAG.getBitcast(MVT::v16i16, V2);
16476     return DAG.getBitcast(VT,
16477                           DAG.getVectorShuffle(MVT::v16i16, DL, V1, V2, Mask));
16478   }
16479 
16480   switch (VT.SimpleTy) {
16481   case MVT::v4f64:
16482     return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16483   case MVT::v4i64:
16484     return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16485   case MVT::v8f32:
16486     return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16487   case MVT::v8i32:
16488     return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16489   case MVT::v16i16:
16490     return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16491   case MVT::v32i8:
16492     return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16493 
16494   default:
16495     llvm_unreachable("Not a valid 256-bit x86 vector type!");
16496   }
16497 }
16498 
16499 /// Try to lower a vector shuffle as a 128-bit shuffles.
16500 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
16501                                   const APInt &Zeroable, SDValue V1, SDValue V2,
16502                                   const X86Subtarget &Subtarget,
16503                                   SelectionDAG &DAG) {
16504   assert(VT.getScalarSizeInBits() == 64 &&
16505          "Unexpected element type size for 128bit shuffle.");
16506 
16507   // To handle 256 bit vector requires VLX and most probably
16508   // function lowerV2X128VectorShuffle() is better solution.
16509   assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
16510 
16511   // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
16512   SmallVector<int, 4> Widened128Mask;
16513   if (!canWidenShuffleElements(Mask, Widened128Mask))
16514     return SDValue();
16515   assert(Widened128Mask.size() == 4 && "Shuffle widening mismatch");
16516 
16517   // Try to use an insert into a zero vector.
16518   if (Widened128Mask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
16519       (Widened128Mask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
16520     unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
16521     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
16522     SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
16523                               DAG.getIntPtrConstant(0, DL));
16524     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16525                        getZeroVector(VT, Subtarget, DAG, DL), LoV,
16526                        DAG.getIntPtrConstant(0, DL));
16527   }
16528 
16529   // Check for patterns which can be matched with a single insert of a 256-bit
16530   // subvector.
16531   bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3}, V1, V2);
16532   if (OnlyUsesV1 ||
16533       isShuffleEquivalent(Mask, {0, 1, 2, 3, 8, 9, 10, 11}, V1, V2)) {
16534     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
16535     SDValue SubVec =
16536         DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, OnlyUsesV1 ? V1 : V2,
16537                     DAG.getIntPtrConstant(0, DL));
16538     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
16539                        DAG.getIntPtrConstant(4, DL));
16540   }
16541 
16542   // See if this is an insertion of the lower 128-bits of V2 into V1.
16543   bool IsInsert = true;
16544   int V2Index = -1;
16545   for (int i = 0; i < 4; ++i) {
16546     assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
16547     if (Widened128Mask[i] < 0)
16548       continue;
16549 
16550     // Make sure all V1 subvectors are in place.
16551     if (Widened128Mask[i] < 4) {
16552       if (Widened128Mask[i] != i) {
16553         IsInsert = false;
16554         break;
16555       }
16556     } else {
16557       // Make sure we only have a single V2 index and its the lowest 128-bits.
16558       if (V2Index >= 0 || Widened128Mask[i] != 4) {
16559         IsInsert = false;
16560         break;
16561       }
16562       V2Index = i;
16563     }
16564   }
16565   if (IsInsert && V2Index >= 0) {
16566     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
16567     SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
16568                                  DAG.getIntPtrConstant(0, DL));
16569     return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
16570   }
16571 
16572   // See if we can widen to a 256-bit lane shuffle, we're going to lose 128-lane
16573   // UNDEF info by lowering to X86ISD::SHUF128 anyway, so by widening where
16574   // possible we at least ensure the lanes stay sequential to help later
16575   // combines.
16576   SmallVector<int, 2> Widened256Mask;
16577   if (canWidenShuffleElements(Widened128Mask, Widened256Mask)) {
16578     Widened128Mask.clear();
16579     narrowShuffleMaskElts(2, Widened256Mask, Widened128Mask);
16580   }
16581 
16582   // Try to lower to vshuf64x2/vshuf32x4.
16583   SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
16584   int PermMask[4] = {-1, -1, -1, -1};
16585   // Ensure elements came from the same Op.
16586   for (int i = 0; i < 4; ++i) {
16587     assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
16588     if (Widened128Mask[i] < 0)
16589       continue;
16590 
16591     SDValue Op = Widened128Mask[i] >= 4 ? V2 : V1;
16592     unsigned OpIndex = i / 2;
16593     if (Ops[OpIndex].isUndef())
16594       Ops[OpIndex] = Op;
16595     else if (Ops[OpIndex] != Op)
16596       return SDValue();
16597 
16598     PermMask[i] = Widened128Mask[i] % 4;
16599   }
16600 
16601   return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
16602                      getV4X86ShuffleImm8ForMask(PermMask, DL, DAG));
16603 }
16604 
16605 /// Handle lowering of 8-lane 64-bit floating point shuffles.
16606 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16607                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16608                                  const X86Subtarget &Subtarget,
16609                                  SelectionDAG &DAG) {
16610   assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16611   assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16612   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16613 
16614   if (V2.isUndef()) {
16615     // Use low duplicate instructions for masks that match their pattern.
16616     if (isShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6}, V1, V2))
16617       return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
16618 
16619     if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
16620       // Non-half-crossing single input shuffles can be lowered with an
16621       // interleaved permutation.
16622       unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
16623                               ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
16624                               ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
16625                               ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
16626       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
16627                          DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
16628     }
16629 
16630     SmallVector<int, 4> RepeatedMask;
16631     if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
16632       return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
16633                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16634   }
16635 
16636   if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
16637                                            V2, Subtarget, DAG))
16638     return Shuf128;
16639 
16640   if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
16641     return Unpck;
16642 
16643   // Check if the blend happens to exactly fit that of SHUFPD.
16644   if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
16645                                           Zeroable, Subtarget, DAG))
16646     return Op;
16647 
16648   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
16649                                        DAG, Subtarget))
16650     return V;
16651 
16652   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
16653                                           Zeroable, Subtarget, DAG))
16654     return Blend;
16655 
16656   return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, Subtarget, DAG);
16657 }
16658 
16659 /// Handle lowering of 16-lane 32-bit floating point shuffles.
16660 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16661                                   const APInt &Zeroable, SDValue V1, SDValue V2,
16662                                   const X86Subtarget &Subtarget,
16663                                   SelectionDAG &DAG) {
16664   assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16665   assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16666   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16667 
16668   // If the shuffle mask is repeated in each 128-bit lane, we have many more
16669   // options to efficiently lower the shuffle.
16670   SmallVector<int, 4> RepeatedMask;
16671   if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
16672     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16673 
16674     // Use even/odd duplicate instructions for masks that match their pattern.
16675     if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
16676       return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
16677     if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
16678       return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
16679 
16680     if (V2.isUndef())
16681       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
16682                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16683 
16684     // Use dedicated unpack instructions for masks that match their pattern.
16685     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
16686       return V;
16687 
16688     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16689                                             Zeroable, Subtarget, DAG))
16690       return Blend;
16691 
16692     // Otherwise, fall back to a SHUFPS sequence.
16693     return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
16694   }
16695 
16696   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16697                                           Zeroable, Subtarget, DAG))
16698     return Blend;
16699 
16700   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16701           DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16702     return DAG.getBitcast(MVT::v16f32, ZExt);
16703 
16704   // Try to create an in-lane repeating shuffle mask and then shuffle the
16705   // results into the target lanes.
16706   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16707           DL, MVT::v16f32, V1, V2, Mask, Subtarget, DAG))
16708     return V;
16709 
16710   // If we have a single input shuffle with different shuffle patterns in the
16711   // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
16712   if (V2.isUndef() &&
16713       !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
16714     SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
16715     return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
16716   }
16717 
16718   // If we have AVX512F support, we can use VEXPAND.
16719   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
16720                                              V1, V2, DAG, Subtarget))
16721     return V;
16722 
16723   return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, Subtarget, DAG);
16724 }
16725 
16726 /// Handle lowering of 8-lane 64-bit integer shuffles.
16727 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16728                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16729                                  const X86Subtarget &Subtarget,
16730                                  SelectionDAG &DAG) {
16731   assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16732   assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16733   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16734 
16735   // Try to use shift instructions if fast.
16736   if (Subtarget.preferLowerShuffleAsShift())
16737     if (SDValue Shift =
16738             lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask, Zeroable,
16739                                 Subtarget, DAG, /*BitwiseOnly*/ true))
16740       return Shift;
16741 
16742   if (V2.isUndef()) {
16743     // When the shuffle is mirrored between the 128-bit lanes of the unit, we
16744     // can use lower latency instructions that will operate on all four
16745     // 128-bit lanes.
16746     SmallVector<int, 2> Repeated128Mask;
16747     if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
16748       SmallVector<int, 4> PSHUFDMask;
16749       narrowShuffleMaskElts(2, Repeated128Mask, PSHUFDMask);
16750       return DAG.getBitcast(
16751           MVT::v8i64,
16752           DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
16753                       DAG.getBitcast(MVT::v16i32, V1),
16754                       getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16755     }
16756 
16757     SmallVector<int, 4> Repeated256Mask;
16758     if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
16759       return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
16760                          getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
16761   }
16762 
16763   if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
16764                                            V2, Subtarget, DAG))
16765     return Shuf128;
16766 
16767   // Try to use shift instructions.
16768   if (SDValue Shift =
16769           lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask, Zeroable, Subtarget,
16770                               DAG, /*BitwiseOnly*/ false))
16771     return Shift;
16772 
16773   // Try to use VALIGN.
16774   if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i64, V1, V2, Mask,
16775                                             Zeroable, Subtarget, DAG))
16776     return Rotate;
16777 
16778   // Try to use PALIGNR.
16779   if (Subtarget.hasBWI())
16780     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
16781                                                   Subtarget, DAG))
16782       return Rotate;
16783 
16784   if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
16785     return Unpck;
16786 
16787   // If we have AVX512F support, we can use VEXPAND.
16788   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
16789                                        DAG, Subtarget))
16790     return V;
16791 
16792   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
16793                                           Zeroable, Subtarget, DAG))
16794     return Blend;
16795 
16796   return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, Subtarget, DAG);
16797 }
16798 
16799 /// Handle lowering of 16-lane 32-bit integer shuffles.
16800 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16801                                   const APInt &Zeroable, SDValue V1, SDValue V2,
16802                                   const X86Subtarget &Subtarget,
16803                                   SelectionDAG &DAG) {
16804   assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16805   assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16806   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16807 
16808   int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
16809 
16810   // Whenever we can lower this as a zext, that instruction is strictly faster
16811   // than any alternative. It also allows us to fold memory operands into the
16812   // shuffle in many cases.
16813   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16814           DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16815     return ZExt;
16816 
16817   // Try to use shift instructions if fast.
16818   if (Subtarget.preferLowerShuffleAsShift()) {
16819     if (SDValue Shift =
16820             lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask, Zeroable,
16821                                 Subtarget, DAG, /*BitwiseOnly*/ true))
16822       return Shift;
16823     if (NumV2Elements == 0)
16824       if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i32, V1, Mask,
16825                                                    Subtarget, DAG))
16826         return Rotate;
16827   }
16828 
16829   // If the shuffle mask is repeated in each 128-bit lane we can use more
16830   // efficient instructions that mirror the shuffles across the four 128-bit
16831   // lanes.
16832   SmallVector<int, 4> RepeatedMask;
16833   bool Is128BitLaneRepeatedShuffle =
16834       is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
16835   if (Is128BitLaneRepeatedShuffle) {
16836     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16837     if (V2.isUndef())
16838       return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
16839                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16840 
16841     // Use dedicated unpack instructions for masks that match their pattern.
16842     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
16843       return V;
16844   }
16845 
16846   // Try to use shift instructions.
16847   if (SDValue Shift =
16848           lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask, Zeroable,
16849                               Subtarget, DAG, /*BitwiseOnly*/ false))
16850     return Shift;
16851 
16852   if (!Subtarget.preferLowerShuffleAsShift() && NumV2Elements != 0)
16853     if (SDValue Rotate =
16854             lowerShuffleAsBitRotate(DL, MVT::v16i32, V1, Mask, Subtarget, DAG))
16855       return Rotate;
16856 
16857   // Try to use VALIGN.
16858   if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v16i32, V1, V2, Mask,
16859                                             Zeroable, Subtarget, DAG))
16860     return Rotate;
16861 
16862   // Try to use byte rotation instructions.
16863   if (Subtarget.hasBWI())
16864     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
16865                                                   Subtarget, DAG))
16866       return Rotate;
16867 
16868   // Assume that a single SHUFPS is faster than using a permv shuffle.
16869   // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16870   if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16871     SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
16872     SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
16873     SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
16874                                             CastV1, CastV2, DAG);
16875     return DAG.getBitcast(MVT::v16i32, ShufPS);
16876   }
16877 
16878   // Try to create an in-lane repeating shuffle mask and then shuffle the
16879   // results into the target lanes.
16880   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16881           DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
16882     return V;
16883 
16884   // If we have AVX512F support, we can use VEXPAND.
16885   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
16886                                        DAG, Subtarget))
16887     return V;
16888 
16889   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
16890                                           Zeroable, Subtarget, DAG))
16891     return Blend;
16892 
16893   return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, Subtarget, DAG);
16894 }
16895 
16896 /// Handle lowering of 32-lane 16-bit integer shuffles.
16897 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16898                                   const APInt &Zeroable, SDValue V1, SDValue V2,
16899                                   const X86Subtarget &Subtarget,
16900                                   SelectionDAG &DAG) {
16901   assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16902   assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16903   assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16904   assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
16905 
16906   // Whenever we can lower this as a zext, that instruction is strictly faster
16907   // than any alternative. It also allows us to fold memory operands into the
16908   // shuffle in many cases.
16909   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16910           DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16911     return ZExt;
16912 
16913   // Use dedicated unpack instructions for masks that match their pattern.
16914   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
16915     return V;
16916 
16917   // Use dedicated pack instructions for masks that match their pattern.
16918   if (SDValue V =
16919           lowerShuffleWithPACK(DL, MVT::v32i16, Mask, V1, V2, DAG, Subtarget))
16920     return V;
16921 
16922   // Try to use shift instructions.
16923   if (SDValue Shift =
16924           lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask, Zeroable,
16925                               Subtarget, DAG, /*BitwiseOnly*/ false))
16926     return Shift;
16927 
16928   // Try to use byte rotation instructions.
16929   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
16930                                                 Subtarget, DAG))
16931     return Rotate;
16932 
16933   if (V2.isUndef()) {
16934     // Try to use bit rotation instructions.
16935     if (SDValue Rotate =
16936             lowerShuffleAsBitRotate(DL, MVT::v32i16, V1, Mask, Subtarget, DAG))
16937       return Rotate;
16938 
16939     SmallVector<int, 8> RepeatedMask;
16940     if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
16941       // As this is a single-input shuffle, the repeated mask should be
16942       // a strictly valid v8i16 mask that we can pass through to the v8i16
16943       // lowering to handle even the v32 case.
16944       return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v32i16, V1,
16945                                                  RepeatedMask, Subtarget, DAG);
16946     }
16947   }
16948 
16949   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
16950                                           Zeroable, Subtarget, DAG))
16951     return Blend;
16952 
16953   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
16954                                               Zeroable, Subtarget, DAG))
16955     return PSHUFB;
16956 
16957   return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, Subtarget, DAG);
16958 }
16959 
16960 /// Handle lowering of 64-lane 8-bit integer shuffles.
16961 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16962                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16963                                  const X86Subtarget &Subtarget,
16964                                  SelectionDAG &DAG) {
16965   assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16966   assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16967   assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
16968   assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
16969 
16970   // Whenever we can lower this as a zext, that instruction is strictly faster
16971   // than any alternative. It also allows us to fold memory operands into the
16972   // shuffle in many cases.
16973   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16974           DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16975     return ZExt;
16976 
16977   // Use dedicated unpack instructions for masks that match their pattern.
16978   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
16979     return V;
16980 
16981   // Use dedicated pack instructions for masks that match their pattern.
16982   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
16983                                        Subtarget))
16984     return V;
16985 
16986   // Try to use shift instructions.
16987   if (SDValue Shift =
16988           lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget,
16989                               DAG, /*BitwiseOnly*/ false))
16990     return Shift;
16991 
16992   // Try to use byte rotation instructions.
16993   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
16994                                                 Subtarget, DAG))
16995     return Rotate;
16996 
16997   // Try to use bit rotation instructions.
16998   if (V2.isUndef())
16999     if (SDValue Rotate =
17000             lowerShuffleAsBitRotate(DL, MVT::v64i8, V1, Mask, Subtarget, DAG))
17001       return Rotate;
17002 
17003   // Lower as AND if possible.
17004   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v64i8, V1, V2, Mask,
17005                                              Zeroable, Subtarget, DAG))
17006     return Masked;
17007 
17008   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
17009                                               Zeroable, Subtarget, DAG))
17010     return PSHUFB;
17011 
17012   // Try to create an in-lane repeating shuffle mask and then shuffle the
17013   // results into the target lanes.
17014   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17015           DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
17016     return V;
17017 
17018   if (SDValue Result = lowerShuffleAsLanePermuteAndPermute(
17019           DL, MVT::v64i8, V1, V2, Mask, DAG, Subtarget))
17020     return Result;
17021 
17022   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
17023                                           Zeroable, Subtarget, DAG))
17024     return Blend;
17025 
17026   if (!is128BitLaneCrossingShuffleMask(MVT::v64i8, Mask)) {
17027     // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
17028     // PALIGNR will be cheaper than the second PSHUFB+OR.
17029     if (SDValue V = lowerShuffleAsByteRotateAndPermute(DL, MVT::v64i8, V1, V2,
17030                                                        Mask, Subtarget, DAG))
17031       return V;
17032 
17033     // If we can't directly blend but can use PSHUFB, that will be better as it
17034     // can both shuffle and set up the inefficient blend.
17035     bool V1InUse, V2InUse;
17036     return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v64i8, V1, V2, Mask, Zeroable,
17037                                         DAG, V1InUse, V2InUse);
17038   }
17039 
17040   // Try to simplify this by merging 128-bit lanes to enable a lane-based
17041   // shuffle.
17042   if (!V2.isUndef())
17043     if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
17044             DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
17045       return Result;
17046 
17047   // VBMI can use VPERMV/VPERMV3 byte shuffles.
17048   if (Subtarget.hasVBMI())
17049     return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, Subtarget, DAG);
17050 
17051   return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG, /*SimpleOnly*/ false);
17052 }
17053 
17054 /// High-level routine to lower various 512-bit x86 vector shuffles.
17055 ///
17056 /// This routine either breaks down the specific type of a 512-bit x86 vector
17057 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
17058 /// together based on the available instructions.
17059 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17060                                   MVT VT, SDValue V1, SDValue V2,
17061                                   const APInt &Zeroable,
17062                                   const X86Subtarget &Subtarget,
17063                                   SelectionDAG &DAG) {
17064   assert(Subtarget.hasAVX512() &&
17065          "Cannot lower 512-bit vectors w/ basic ISA!");
17066 
17067   // If we have a single input to the zero element, insert that into V1 if we
17068   // can do so cheaply.
17069   int NumElts = Mask.size();
17070   int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
17071 
17072   if (NumV2Elements == 1 && Mask[0] >= NumElts)
17073     if (SDValue Insertion = lowerShuffleAsElementInsertion(
17074             DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
17075       return Insertion;
17076 
17077   // Handle special cases where the lower or upper half is UNDEF.
17078   if (SDValue V =
17079           lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
17080     return V;
17081 
17082   // Check for being able to broadcast a single element.
17083   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
17084                                                   Subtarget, DAG))
17085     return Broadcast;
17086 
17087   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI()) {
17088     // Try using bit ops for masking and blending before falling back to
17089     // splitting.
17090     if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
17091                                           Subtarget, DAG))
17092       return V;
17093     if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
17094       return V;
17095 
17096     return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG, /*SimpleOnly*/ false);
17097   }
17098 
17099   if (VT == MVT::v32f16) {
17100     if (!Subtarget.hasBWI())
17101       return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG,
17102                                   /*SimpleOnly*/ false);
17103 
17104     V1 = DAG.getBitcast(MVT::v32i16, V1);
17105     V2 = DAG.getBitcast(MVT::v32i16, V2);
17106     return DAG.getBitcast(MVT::v32f16,
17107                           DAG.getVectorShuffle(MVT::v32i16, DL, V1, V2, Mask));
17108   }
17109 
17110   // Dispatch to each element type for lowering. If we don't have support for
17111   // specific element type shuffles at 512 bits, immediately split them and
17112   // lower them. Each lowering routine of a given type is allowed to assume that
17113   // the requisite ISA extensions for that element type are available.
17114   switch (VT.SimpleTy) {
17115   case MVT::v8f64:
17116     return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17117   case MVT::v16f32:
17118     return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17119   case MVT::v8i64:
17120     return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17121   case MVT::v16i32:
17122     return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17123   case MVT::v32i16:
17124     return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17125   case MVT::v64i8:
17126     return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17127 
17128   default:
17129     llvm_unreachable("Not a valid 512-bit x86 vector type!");
17130   }
17131 }
17132 
17133 static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
17134                                          MVT VT, SDValue V1, SDValue V2,
17135                                          const X86Subtarget &Subtarget,
17136                                          SelectionDAG &DAG) {
17137   // Shuffle should be unary.
17138   if (!V2.isUndef())
17139     return SDValue();
17140 
17141   int ShiftAmt = -1;
17142   int NumElts = Mask.size();
17143   for (int i = 0; i != NumElts; ++i) {
17144     int M = Mask[i];
17145     assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
17146            "Unexpected mask index.");
17147     if (M < 0)
17148       continue;
17149 
17150     // The first non-undef element determines our shift amount.
17151     if (ShiftAmt < 0) {
17152       ShiftAmt = M - i;
17153       // Need to be shifting right.
17154       if (ShiftAmt <= 0)
17155         return SDValue();
17156     }
17157     // All non-undef elements must shift by the same amount.
17158     if (ShiftAmt != M - i)
17159       return SDValue();
17160   }
17161   assert(ShiftAmt >= 0 && "All undef?");
17162 
17163   // Great we found a shift right.
17164   SDValue Res = widenMaskVector(V1, false, Subtarget, DAG, DL);
17165   Res = DAG.getNode(X86ISD::KSHIFTR, DL, Res.getValueType(), Res,
17166                     DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17167   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17168                      DAG.getIntPtrConstant(0, DL));
17169 }
17170 
17171 // Determine if this shuffle can be implemented with a KSHIFT instruction.
17172 // Returns the shift amount if possible or -1 if not. This is a simplified
17173 // version of matchShuffleAsShift.
17174 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
17175                                     int MaskOffset, const APInt &Zeroable) {
17176   int Size = Mask.size();
17177 
17178   auto CheckZeros = [&](int Shift, bool Left) {
17179     for (int j = 0; j < Shift; ++j)
17180       if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
17181         return false;
17182 
17183     return true;
17184   };
17185 
17186   auto MatchShift = [&](int Shift, bool Left) {
17187     unsigned Pos = Left ? Shift : 0;
17188     unsigned Low = Left ? 0 : Shift;
17189     unsigned Len = Size - Shift;
17190     return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
17191   };
17192 
17193   for (int Shift = 1; Shift != Size; ++Shift)
17194     for (bool Left : {true, false})
17195       if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
17196         Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
17197         return Shift;
17198       }
17199 
17200   return -1;
17201 }
17202 
17203 
17204 // Lower vXi1 vector shuffles.
17205 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
17206 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
17207 // vector, shuffle and then truncate it back.
17208 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17209                                 MVT VT, SDValue V1, SDValue V2,
17210                                 const APInt &Zeroable,
17211                                 const X86Subtarget &Subtarget,
17212                                 SelectionDAG &DAG) {
17213   assert(Subtarget.hasAVX512() &&
17214          "Cannot lower 512-bit vectors w/o basic ISA!");
17215 
17216   int NumElts = Mask.size();
17217 
17218   // Try to recognize shuffles that are just padding a subvector with zeros.
17219   int SubvecElts = 0;
17220   int Src = -1;
17221   for (int i = 0; i != NumElts; ++i) {
17222     if (Mask[i] >= 0) {
17223       // Grab the source from the first valid mask. All subsequent elements need
17224       // to use this same source.
17225       if (Src < 0)
17226         Src = Mask[i] / NumElts;
17227       if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
17228         break;
17229     }
17230 
17231     ++SubvecElts;
17232   }
17233   assert(SubvecElts != NumElts && "Identity shuffle?");
17234 
17235   // Clip to a power 2.
17236   SubvecElts = llvm::bit_floor<uint32_t>(SubvecElts);
17237 
17238   // Make sure the number of zeroable bits in the top at least covers the bits
17239   // not covered by the subvector.
17240   if ((int)Zeroable.countl_one() >= (NumElts - SubvecElts)) {
17241     assert(Src >= 0 && "Expected a source!");
17242     MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
17243     SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
17244                                   Src == 0 ? V1 : V2,
17245                                   DAG.getIntPtrConstant(0, DL));
17246     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17247                        DAG.getConstant(0, DL, VT),
17248                        Extract, DAG.getIntPtrConstant(0, DL));
17249   }
17250 
17251   // Try a simple shift right with undef elements. Later we'll try with zeros.
17252   if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
17253                                                 DAG))
17254     return Shift;
17255 
17256   // Try to match KSHIFTs.
17257   unsigned Offset = 0;
17258   for (SDValue V : { V1, V2 }) {
17259     unsigned Opcode;
17260     int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
17261     if (ShiftAmt >= 0) {
17262       SDValue Res = widenMaskVector(V, false, Subtarget, DAG, DL);
17263       MVT WideVT = Res.getSimpleValueType();
17264       // Widened right shifts need two shifts to ensure we shift in zeroes.
17265       if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
17266         int WideElts = WideVT.getVectorNumElements();
17267         // Shift left to put the original vector in the MSBs of the new size.
17268         Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
17269                           DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
17270         // Increase the shift amount to account for the left shift.
17271         ShiftAmt += WideElts - NumElts;
17272       }
17273 
17274       Res = DAG.getNode(Opcode, DL, WideVT, Res,
17275                         DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17276       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17277                          DAG.getIntPtrConstant(0, DL));
17278     }
17279     Offset += NumElts; // Increment for next iteration.
17280   }
17281 
17282   // If we're broadcasting a SETCC result, try to broadcast the ops instead.
17283   // TODO: What other unary shuffles would benefit from this?
17284   if (isBroadcastShuffleMask(Mask) && V1.getOpcode() == ISD::SETCC &&
17285       V1->hasOneUse()) {
17286     SDValue Op0 = V1.getOperand(0);
17287     SDValue Op1 = V1.getOperand(1);
17288     ISD::CondCode CC = cast<CondCodeSDNode>(V1.getOperand(2))->get();
17289     EVT OpVT = Op0.getValueType();
17290     return DAG.getSetCC(
17291         DL, VT, DAG.getVectorShuffle(OpVT, DL, Op0, DAG.getUNDEF(OpVT), Mask),
17292         DAG.getVectorShuffle(OpVT, DL, Op1, DAG.getUNDEF(OpVT), Mask), CC);
17293   }
17294 
17295   MVT ExtVT;
17296   switch (VT.SimpleTy) {
17297   default:
17298     llvm_unreachable("Expected a vector of i1 elements");
17299   case MVT::v2i1:
17300     ExtVT = MVT::v2i64;
17301     break;
17302   case MVT::v4i1:
17303     ExtVT = MVT::v4i32;
17304     break;
17305   case MVT::v8i1:
17306     // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
17307     // shuffle.
17308     ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
17309     break;
17310   case MVT::v16i1:
17311     // Take 512-bit type, unless we are avoiding 512-bit types and have the
17312     // 256-bit operation available.
17313     ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
17314     break;
17315   case MVT::v32i1:
17316     // Take 512-bit type, unless we are avoiding 512-bit types and have the
17317     // 256-bit operation available.
17318     assert(Subtarget.hasBWI() && "Expected AVX512BW support");
17319     ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
17320     break;
17321   case MVT::v64i1:
17322     // Fall back to scalarization. FIXME: We can do better if the shuffle
17323     // can be partitioned cleanly.
17324     if (!Subtarget.useBWIRegs())
17325       return SDValue();
17326     ExtVT = MVT::v64i8;
17327     break;
17328   }
17329 
17330   V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
17331   V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
17332 
17333   SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
17334   // i1 was sign extended we can use X86ISD::CVT2MASK.
17335   int NumElems = VT.getVectorNumElements();
17336   if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
17337       (Subtarget.hasDQI() && (NumElems < 32)))
17338     return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
17339                        Shuffle, ISD::SETGT);
17340 
17341   return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
17342 }
17343 
17344 /// Helper function that returns true if the shuffle mask should be
17345 /// commuted to improve canonicalization.
17346 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
17347   int NumElements = Mask.size();
17348 
17349   int NumV1Elements = 0, NumV2Elements = 0;
17350   for (int M : Mask)
17351     if (M < 0)
17352       continue;
17353     else if (M < NumElements)
17354       ++NumV1Elements;
17355     else
17356       ++NumV2Elements;
17357 
17358   // Commute the shuffle as needed such that more elements come from V1 than
17359   // V2. This allows us to match the shuffle pattern strictly on how many
17360   // elements come from V1 without handling the symmetric cases.
17361   if (NumV2Elements > NumV1Elements)
17362     return true;
17363 
17364   assert(NumV1Elements > 0 && "No V1 indices");
17365 
17366   if (NumV2Elements == 0)
17367     return false;
17368 
17369   // When the number of V1 and V2 elements are the same, try to minimize the
17370   // number of uses of V2 in the low half of the vector. When that is tied,
17371   // ensure that the sum of indices for V1 is equal to or lower than the sum
17372   // indices for V2. When those are equal, try to ensure that the number of odd
17373   // indices for V1 is lower than the number of odd indices for V2.
17374   if (NumV1Elements == NumV2Elements) {
17375     int LowV1Elements = 0, LowV2Elements = 0;
17376     for (int M : Mask.slice(0, NumElements / 2))
17377       if (M >= NumElements)
17378         ++LowV2Elements;
17379       else if (M >= 0)
17380         ++LowV1Elements;
17381     if (LowV2Elements > LowV1Elements)
17382       return true;
17383     if (LowV2Elements == LowV1Elements) {
17384       int SumV1Indices = 0, SumV2Indices = 0;
17385       for (int i = 0, Size = Mask.size(); i < Size; ++i)
17386         if (Mask[i] >= NumElements)
17387           SumV2Indices += i;
17388         else if (Mask[i] >= 0)
17389           SumV1Indices += i;
17390       if (SumV2Indices < SumV1Indices)
17391         return true;
17392       if (SumV2Indices == SumV1Indices) {
17393         int NumV1OddIndices = 0, NumV2OddIndices = 0;
17394         for (int i = 0, Size = Mask.size(); i < Size; ++i)
17395           if (Mask[i] >= NumElements)
17396             NumV2OddIndices += i % 2;
17397           else if (Mask[i] >= 0)
17398             NumV1OddIndices += i % 2;
17399         if (NumV2OddIndices < NumV1OddIndices)
17400           return true;
17401       }
17402     }
17403   }
17404 
17405   return false;
17406 }
17407 
17408 static bool canCombineAsMaskOperation(SDValue V,
17409                                       const X86Subtarget &Subtarget) {
17410   if (!Subtarget.hasAVX512())
17411     return false;
17412 
17413   if (!V.getValueType().isSimple())
17414     return false;
17415 
17416   MVT VT = V.getSimpleValueType().getScalarType();
17417   if ((VT == MVT::i16 || VT == MVT::i8) && !Subtarget.hasBWI())
17418     return false;
17419 
17420   // If vec width < 512, widen i8/i16 even with BWI as blendd/blendps/blendpd
17421   // are preferable to blendw/blendvb/masked-mov.
17422   if ((VT == MVT::i16 || VT == MVT::i8) &&
17423       V.getSimpleValueType().getSizeInBits() < 512)
17424     return false;
17425 
17426   auto HasMaskOperation = [&](SDValue V) {
17427     // TODO: Currently we only check limited opcode. We probably extend
17428     // it to all binary operation by checking TLI.isBinOp().
17429     switch (V->getOpcode()) {
17430     default:
17431       return false;
17432     case ISD::ADD:
17433     case ISD::SUB:
17434     case ISD::AND:
17435     case ISD::XOR:
17436     case ISD::OR:
17437     case ISD::SMAX:
17438     case ISD::SMIN:
17439     case ISD::UMAX:
17440     case ISD::UMIN:
17441     case ISD::ABS:
17442     case ISD::SHL:
17443     case ISD::SRL:
17444     case ISD::SRA:
17445     case ISD::MUL:
17446       break;
17447     }
17448     if (!V->hasOneUse())
17449       return false;
17450 
17451     return true;
17452   };
17453 
17454   if (HasMaskOperation(V))
17455     return true;
17456 
17457   return false;
17458 }
17459 
17460 // Forward declaration.
17461 static SDValue canonicalizeShuffleMaskWithHorizOp(
17462     MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
17463     unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
17464     const X86Subtarget &Subtarget);
17465 
17466     /// Top-level lowering for x86 vector shuffles.
17467 ///
17468 /// This handles decomposition, canonicalization, and lowering of all x86
17469 /// vector shuffles. Most of the specific lowering strategies are encapsulated
17470 /// above in helper routines. The canonicalization attempts to widen shuffles
17471 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
17472 /// s.t. only one of the two inputs needs to be tested, etc.
17473 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
17474                                    SelectionDAG &DAG) {
17475   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
17476   ArrayRef<int> OrigMask = SVOp->getMask();
17477   SDValue V1 = Op.getOperand(0);
17478   SDValue V2 = Op.getOperand(1);
17479   MVT VT = Op.getSimpleValueType();
17480   int NumElements = VT.getVectorNumElements();
17481   SDLoc DL(Op);
17482   bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
17483 
17484   assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
17485          "Can't lower MMX shuffles");
17486 
17487   bool V1IsUndef = V1.isUndef();
17488   bool V2IsUndef = V2.isUndef();
17489   if (V1IsUndef && V2IsUndef)
17490     return DAG.getUNDEF(VT);
17491 
17492   // When we create a shuffle node we put the UNDEF node to second operand,
17493   // but in some cases the first operand may be transformed to UNDEF.
17494   // In this case we should just commute the node.
17495   if (V1IsUndef)
17496     return DAG.getCommutedVectorShuffle(*SVOp);
17497 
17498   // Check for non-undef masks pointing at an undef vector and make the masks
17499   // undef as well. This makes it easier to match the shuffle based solely on
17500   // the mask.
17501   if (V2IsUndef &&
17502       any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
17503     SmallVector<int, 8> NewMask(OrigMask);
17504     for (int &M : NewMask)
17505       if (M >= NumElements)
17506         M = -1;
17507     return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17508   }
17509 
17510   // Check for illegal shuffle mask element index values.
17511   int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
17512   (void)MaskUpperLimit;
17513   assert(llvm::all_of(OrigMask,
17514                       [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
17515          "Out of bounds shuffle index");
17516 
17517   // We actually see shuffles that are entirely re-arrangements of a set of
17518   // zero inputs. This mostly happens while decomposing complex shuffles into
17519   // simple ones. Directly lower these as a buildvector of zeros.
17520   APInt KnownUndef, KnownZero;
17521   computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero);
17522 
17523   APInt Zeroable = KnownUndef | KnownZero;
17524   if (Zeroable.isAllOnes())
17525     return getZeroVector(VT, Subtarget, DAG, DL);
17526 
17527   bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
17528 
17529   // Try to collapse shuffles into using a vector type with fewer elements but
17530   // wider element types. We cap this to not form integers or floating point
17531   // elements wider than 64 bits. It does not seem beneficial to form i128
17532   // integers to handle flipping the low and high halves of AVX 256-bit vectors.
17533   SmallVector<int, 16> WidenedMask;
17534   if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
17535       !canCombineAsMaskOperation(V1, Subtarget) &&
17536       !canCombineAsMaskOperation(V2, Subtarget) &&
17537       canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
17538     // Shuffle mask widening should not interfere with a broadcast opportunity
17539     // by obfuscating the operands with bitcasts.
17540     // TODO: Avoid lowering directly from this top-level function: make this
17541     // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
17542     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
17543                                                     Subtarget, DAG))
17544       return Broadcast;
17545 
17546     MVT NewEltVT = VT.isFloatingPoint()
17547                        ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
17548                        : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
17549     int NewNumElts = NumElements / 2;
17550     MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
17551     // Make sure that the new vector type is legal. For example, v2f64 isn't
17552     // legal on SSE1.
17553     if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
17554       if (V2IsZero) {
17555         // Modify the new Mask to take all zeros from the all-zero vector.
17556         // Choose indices that are blend-friendly.
17557         bool UsedZeroVector = false;
17558         assert(is_contained(WidenedMask, SM_SentinelZero) &&
17559                "V2's non-undef elements are used?!");
17560         for (int i = 0; i != NewNumElts; ++i)
17561           if (WidenedMask[i] == SM_SentinelZero) {
17562             WidenedMask[i] = i + NewNumElts;
17563             UsedZeroVector = true;
17564           }
17565         // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
17566         // some elements to be undef.
17567         if (UsedZeroVector)
17568           V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
17569       }
17570       V1 = DAG.getBitcast(NewVT, V1);
17571       V2 = DAG.getBitcast(NewVT, V2);
17572       return DAG.getBitcast(
17573           VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
17574     }
17575   }
17576 
17577   SmallVector<SDValue> Ops = {V1, V2};
17578   SmallVector<int> Mask(OrigMask);
17579 
17580   // Canonicalize the shuffle with any horizontal ops inputs.
17581   // NOTE: This may update Ops and Mask.
17582   if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
17583           Ops, Mask, VT.getSizeInBits(), DL, DAG, Subtarget))
17584     return DAG.getBitcast(VT, HOp);
17585 
17586   V1 = DAG.getBitcast(VT, Ops[0]);
17587   V2 = DAG.getBitcast(VT, Ops[1]);
17588   assert(NumElements == (int)Mask.size() &&
17589          "canonicalizeShuffleMaskWithHorizOp "
17590          "shouldn't alter the shuffle mask size");
17591 
17592   // Commute the shuffle if it will improve canonicalization.
17593   if (canonicalizeShuffleMaskWithCommute(Mask)) {
17594     ShuffleVectorSDNode::commuteMask(Mask);
17595     std::swap(V1, V2);
17596   }
17597 
17598   // For each vector width, delegate to a specialized lowering routine.
17599   if (VT.is128BitVector())
17600     return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17601 
17602   if (VT.is256BitVector())
17603     return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17604 
17605   if (VT.is512BitVector())
17606     return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17607 
17608   if (Is1BitVector)
17609     return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17610 
17611   llvm_unreachable("Unimplemented!");
17612 }
17613 
17614 /// Try to lower a VSELECT instruction to a vector shuffle.
17615 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
17616                                            const X86Subtarget &Subtarget,
17617                                            SelectionDAG &DAG) {
17618   SDValue Cond = Op.getOperand(0);
17619   SDValue LHS = Op.getOperand(1);
17620   SDValue RHS = Op.getOperand(2);
17621   MVT VT = Op.getSimpleValueType();
17622 
17623   // Only non-legal VSELECTs reach this lowering, convert those into generic
17624   // shuffles and re-use the shuffle lowering path for blends.
17625   if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
17626     SmallVector<int, 32> Mask;
17627     if (createShuffleMaskFromVSELECT(Mask, Cond))
17628       return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
17629   }
17630 
17631   return SDValue();
17632 }
17633 
17634 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
17635   SDValue Cond = Op.getOperand(0);
17636   SDValue LHS = Op.getOperand(1);
17637   SDValue RHS = Op.getOperand(2);
17638 
17639   SDLoc dl(Op);
17640   MVT VT = Op.getSimpleValueType();
17641   if (isSoftF16(VT, Subtarget)) {
17642     MVT NVT = VT.changeVectorElementTypeToInteger();
17643     return DAG.getBitcast(VT, DAG.getNode(ISD::VSELECT, dl, NVT, Cond,
17644                                           DAG.getBitcast(NVT, LHS),
17645                                           DAG.getBitcast(NVT, RHS)));
17646   }
17647 
17648   // A vselect where all conditions and data are constants can be optimized into
17649   // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
17650   if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
17651       ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
17652       ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
17653     return SDValue();
17654 
17655   // Try to lower this to a blend-style vector shuffle. This can handle all
17656   // constant condition cases.
17657   if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
17658     return BlendOp;
17659 
17660   // If this VSELECT has a vector if i1 as a mask, it will be directly matched
17661   // with patterns on the mask registers on AVX-512.
17662   MVT CondVT = Cond.getSimpleValueType();
17663   unsigned CondEltSize = Cond.getScalarValueSizeInBits();
17664   if (CondEltSize == 1)
17665     return Op;
17666 
17667   // Variable blends are only legal from SSE4.1 onward.
17668   if (!Subtarget.hasSSE41())
17669     return SDValue();
17670 
17671   unsigned EltSize = VT.getScalarSizeInBits();
17672   unsigned NumElts = VT.getVectorNumElements();
17673 
17674   // Expand v32i16/v64i8 without BWI.
17675   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
17676     return SDValue();
17677 
17678   // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
17679   // into an i1 condition so that we can use the mask-based 512-bit blend
17680   // instructions.
17681   if (VT.getSizeInBits() == 512) {
17682     // Build a mask by testing the condition against zero.
17683     MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
17684     SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
17685                                 DAG.getConstant(0, dl, CondVT),
17686                                 ISD::SETNE);
17687     // Now return a new VSELECT using the mask.
17688     return DAG.getSelect(dl, VT, Mask, LHS, RHS);
17689   }
17690 
17691   // SEXT/TRUNC cases where the mask doesn't match the destination size.
17692   if (CondEltSize != EltSize) {
17693     // If we don't have a sign splat, rely on the expansion.
17694     if (CondEltSize != DAG.ComputeNumSignBits(Cond))
17695       return SDValue();
17696 
17697     MVT NewCondSVT = MVT::getIntegerVT(EltSize);
17698     MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
17699     Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
17700     return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
17701   }
17702 
17703   // Only some types will be legal on some subtargets. If we can emit a legal
17704   // VSELECT-matching blend, return Op, and but if we need to expand, return
17705   // a null value.
17706   switch (VT.SimpleTy) {
17707   default:
17708     // Most of the vector types have blends past SSE4.1.
17709     return Op;
17710 
17711   case MVT::v32i8:
17712     // The byte blends for AVX vectors were introduced only in AVX2.
17713     if (Subtarget.hasAVX2())
17714       return Op;
17715 
17716     return SDValue();
17717 
17718   case MVT::v8i16:
17719   case MVT::v16i16: {
17720     // Bitcast everything to the vXi8 type and use a vXi8 vselect.
17721     MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
17722     Cond = DAG.getBitcast(CastVT, Cond);
17723     LHS = DAG.getBitcast(CastVT, LHS);
17724     RHS = DAG.getBitcast(CastVT, RHS);
17725     SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
17726     return DAG.getBitcast(VT, Select);
17727   }
17728   }
17729 }
17730 
17731 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
17732   MVT VT = Op.getSimpleValueType();
17733   SDValue Vec = Op.getOperand(0);
17734   SDValue Idx = Op.getOperand(1);
17735   assert(isa<ConstantSDNode>(Idx) && "Constant index expected");
17736   SDLoc dl(Op);
17737 
17738   if (!Vec.getSimpleValueType().is128BitVector())
17739     return SDValue();
17740 
17741   if (VT.getSizeInBits() == 8) {
17742     // If IdxVal is 0, it's cheaper to do a move instead of a pextrb, unless
17743     // we're going to zero extend the register or fold the store.
17744     if (llvm::isNullConstant(Idx) && !X86::mayFoldIntoZeroExtend(Op) &&
17745         !X86::mayFoldIntoStore(Op))
17746       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
17747                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17748                                      DAG.getBitcast(MVT::v4i32, Vec), Idx));
17749 
17750     unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17751     SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec,
17752                                   DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17753     return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17754   }
17755 
17756   if (VT == MVT::f32) {
17757     // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
17758     // the result back to FR32 register. It's only worth matching if the
17759     // result has a single use which is a store or a bitcast to i32.  And in
17760     // the case of a store, it's not worth it if the index is a constant 0,
17761     // because a MOVSSmr can be used instead, which is smaller and faster.
17762     if (!Op.hasOneUse())
17763       return SDValue();
17764     SDNode *User = *Op.getNode()->use_begin();
17765     if ((User->getOpcode() != ISD::STORE || isNullConstant(Idx)) &&
17766         (User->getOpcode() != ISD::BITCAST ||
17767          User->getValueType(0) != MVT::i32))
17768       return SDValue();
17769     SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17770                                   DAG.getBitcast(MVT::v4i32, Vec), Idx);
17771     return DAG.getBitcast(MVT::f32, Extract);
17772   }
17773 
17774   if (VT == MVT::i32 || VT == MVT::i64)
17775       return Op;
17776 
17777   return SDValue();
17778 }
17779 
17780 /// Extract one bit from mask vector, like v16i1 or v8i1.
17781 /// AVX-512 feature.
17782 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
17783                                         const X86Subtarget &Subtarget) {
17784   SDValue Vec = Op.getOperand(0);
17785   SDLoc dl(Vec);
17786   MVT VecVT = Vec.getSimpleValueType();
17787   SDValue Idx = Op.getOperand(1);
17788   auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
17789   MVT EltVT = Op.getSimpleValueType();
17790 
17791   assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
17792          "Unexpected vector type in ExtractBitFromMaskVector");
17793 
17794   // variable index can't be handled in mask registers,
17795   // extend vector to VR512/128
17796   if (!IdxC) {
17797     unsigned NumElts = VecVT.getVectorNumElements();
17798     // Extending v8i1/v16i1 to 512-bit get better performance on KNL
17799     // than extending to 128/256bit.
17800     if (NumElts == 1) {
17801       Vec = widenMaskVector(Vec, false, Subtarget, DAG, dl);
17802       MVT IntVT = MVT::getIntegerVT(Vec.getValueType().getVectorNumElements());
17803       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, DAG.getBitcast(IntVT, Vec));
17804     }
17805     MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17806     MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17807     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
17808     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
17809     return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
17810   }
17811 
17812   unsigned IdxVal = IdxC->getZExtValue();
17813   if (IdxVal == 0) // the operation is legal
17814     return Op;
17815 
17816   // Extend to natively supported kshift.
17817   Vec = widenMaskVector(Vec, false, Subtarget, DAG, dl);
17818 
17819   // Use kshiftr instruction to move to the lower element.
17820   Vec = DAG.getNode(X86ISD::KSHIFTR, dl, Vec.getSimpleValueType(), Vec,
17821                     DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17822 
17823   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17824                      DAG.getIntPtrConstant(0, dl));
17825 }
17826 
17827 // Helper to find all the extracted elements from a vector.
17828 static APInt getExtractedDemandedElts(SDNode *N) {
17829   MVT VT = N->getSimpleValueType(0);
17830   unsigned NumElts = VT.getVectorNumElements();
17831   APInt DemandedElts = APInt::getZero(NumElts);
17832   for (SDNode *User : N->uses()) {
17833     switch (User->getOpcode()) {
17834     case X86ISD::PEXTRB:
17835     case X86ISD::PEXTRW:
17836     case ISD::EXTRACT_VECTOR_ELT:
17837       if (!isa<ConstantSDNode>(User->getOperand(1))) {
17838         DemandedElts.setAllBits();
17839         return DemandedElts;
17840       }
17841       DemandedElts.setBit(User->getConstantOperandVal(1));
17842       break;
17843     case ISD::BITCAST: {
17844       if (!User->getValueType(0).isSimple() ||
17845           !User->getValueType(0).isVector()) {
17846         DemandedElts.setAllBits();
17847         return DemandedElts;
17848       }
17849       APInt DemandedSrcElts = getExtractedDemandedElts(User);
17850       DemandedElts |= APIntOps::ScaleBitMask(DemandedSrcElts, NumElts);
17851       break;
17852     }
17853     default:
17854       DemandedElts.setAllBits();
17855       return DemandedElts;
17856     }
17857   }
17858   return DemandedElts;
17859 }
17860 
17861 SDValue
17862 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
17863                                            SelectionDAG &DAG) const {
17864   SDLoc dl(Op);
17865   SDValue Vec = Op.getOperand(0);
17866   MVT VecVT = Vec.getSimpleValueType();
17867   SDValue Idx = Op.getOperand(1);
17868   auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
17869 
17870   if (VecVT.getVectorElementType() == MVT::i1)
17871     return ExtractBitFromMaskVector(Op, DAG, Subtarget);
17872 
17873   if (!IdxC) {
17874     // Its more profitable to go through memory (1 cycles throughput)
17875     // than using VMOVD + VPERMV/PSHUFB sequence (2/3 cycles throughput)
17876     // IACA tool was used to get performance estimation
17877     // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
17878     //
17879     // example : extractelement <16 x i8> %a, i32 %i
17880     //
17881     // Block Throughput: 3.00 Cycles
17882     // Throughput Bottleneck: Port5
17883     //
17884     // | Num Of |   Ports pressure in cycles  |    |
17885     // |  Uops  |  0  - DV  |  5  |  6  |  7  |    |
17886     // ---------------------------------------------
17887     // |   1    |           | 1.0 |     |     | CP | vmovd xmm1, edi
17888     // |   1    |           | 1.0 |     |     | CP | vpshufb xmm0, xmm0, xmm1
17889     // |   2    | 1.0       | 1.0 |     |     | CP | vpextrb eax, xmm0, 0x0
17890     // Total Num Of Uops: 4
17891     //
17892     //
17893     // Block Throughput: 1.00 Cycles
17894     // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
17895     //
17896     // |    |  Ports pressure in cycles   |  |
17897     // |Uops| 1 | 2 - D  |3 -  D  | 4 | 5 |  |
17898     // ---------------------------------------------------------
17899     // |2^  |   | 0.5    | 0.5    |1.0|   |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
17900     // |1   |0.5|        |        |   |0.5|  | lea rax, ptr [rsp-0x18]
17901     // |1   |   |0.5, 0.5|0.5, 0.5|   |   |CP| mov al, byte ptr [rdi+rax*1]
17902     // Total Num Of Uops: 4
17903 
17904     return SDValue();
17905   }
17906 
17907   unsigned IdxVal = IdxC->getZExtValue();
17908 
17909   // If this is a 256-bit vector result, first extract the 128-bit vector and
17910   // then extract the element from the 128-bit vector.
17911   if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
17912     // Get the 128-bit vector.
17913     Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
17914     MVT EltVT = VecVT.getVectorElementType();
17915 
17916     unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
17917     assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
17918 
17919     // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
17920     // this can be done with a mask.
17921     IdxVal &= ElemsPerChunk - 1;
17922     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17923                        DAG.getIntPtrConstant(IdxVal, dl));
17924   }
17925 
17926   assert(VecVT.is128BitVector() && "Unexpected vector length");
17927 
17928   MVT VT = Op.getSimpleValueType();
17929 
17930   if (VT == MVT::i16) {
17931     // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
17932     // we're going to zero extend the register or fold the store (SSE41 only).
17933     if (IdxVal == 0 && !X86::mayFoldIntoZeroExtend(Op) &&
17934         !(Subtarget.hasSSE41() && X86::mayFoldIntoStore(Op))) {
17935       if (Subtarget.hasFP16())
17936         return Op;
17937 
17938       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
17939                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17940                                      DAG.getBitcast(MVT::v4i32, Vec), Idx));
17941     }
17942 
17943     SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, Vec,
17944                                   DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17945     return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17946   }
17947 
17948   if (Subtarget.hasSSE41())
17949     if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
17950       return Res;
17951 
17952   // Only extract a single element from a v16i8 source - determine the common
17953   // DWORD/WORD that all extractions share, and extract the sub-byte.
17954   // TODO: Add QWORD MOVQ extraction?
17955   if (VT == MVT::i8) {
17956     APInt DemandedElts = getExtractedDemandedElts(Vec.getNode());
17957     assert(DemandedElts.getBitWidth() == 16 && "Vector width mismatch");
17958 
17959     // Extract either the lowest i32 or any i16, and extract the sub-byte.
17960     int DWordIdx = IdxVal / 4;
17961     if (DWordIdx == 0 && DemandedElts == (DemandedElts & 15)) {
17962       SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17963                                 DAG.getBitcast(MVT::v4i32, Vec),
17964                                 DAG.getIntPtrConstant(DWordIdx, dl));
17965       int ShiftVal = (IdxVal % 4) * 8;
17966       if (ShiftVal != 0)
17967         Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
17968                           DAG.getConstant(ShiftVal, dl, MVT::i8));
17969       return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17970     }
17971 
17972     int WordIdx = IdxVal / 2;
17973     if (DemandedElts == (DemandedElts & (3 << (WordIdx * 2)))) {
17974       SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
17975                                 DAG.getBitcast(MVT::v8i16, Vec),
17976                                 DAG.getIntPtrConstant(WordIdx, dl));
17977       int ShiftVal = (IdxVal % 2) * 8;
17978       if (ShiftVal != 0)
17979         Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
17980                           DAG.getConstant(ShiftVal, dl, MVT::i8));
17981       return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17982     }
17983   }
17984 
17985   if (VT == MVT::f16 || VT.getSizeInBits() == 32) {
17986     if (IdxVal == 0)
17987       return Op;
17988 
17989     // Shuffle the element to the lowest element, then movss or movsh.
17990     SmallVector<int, 8> Mask(VecVT.getVectorNumElements(), -1);
17991     Mask[0] = static_cast<int>(IdxVal);
17992     Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17993     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17994                        DAG.getIntPtrConstant(0, dl));
17995   }
17996 
17997   if (VT.getSizeInBits() == 64) {
17998     // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
17999     // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
18000     //        to match extract_elt for f64.
18001     if (IdxVal == 0)
18002       return Op;
18003 
18004     // UNPCKHPD the element to the lowest double word, then movsd.
18005     // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
18006     // to a f64mem, the whole operation is folded into a single MOVHPDmr.
18007     int Mask[2] = { 1, -1 };
18008     Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
18009     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
18010                        DAG.getIntPtrConstant(0, dl));
18011   }
18012 
18013   return SDValue();
18014 }
18015 
18016 /// Insert one bit to mask vector, like v16i1 or v8i1.
18017 /// AVX-512 feature.
18018 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
18019                                      const X86Subtarget &Subtarget) {
18020   SDLoc dl(Op);
18021   SDValue Vec = Op.getOperand(0);
18022   SDValue Elt = Op.getOperand(1);
18023   SDValue Idx = Op.getOperand(2);
18024   MVT VecVT = Vec.getSimpleValueType();
18025 
18026   if (!isa<ConstantSDNode>(Idx)) {
18027     // Non constant index. Extend source and destination,
18028     // insert element and then truncate the result.
18029     unsigned NumElts = VecVT.getVectorNumElements();
18030     MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
18031     MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
18032     SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
18033       DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
18034       DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
18035     return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
18036   }
18037 
18038   // Copy into a k-register, extract to v1i1 and insert_subvector.
18039   SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
18040   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec, Idx);
18041 }
18042 
18043 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
18044                                                   SelectionDAG &DAG) const {
18045   MVT VT = Op.getSimpleValueType();
18046   MVT EltVT = VT.getVectorElementType();
18047   unsigned NumElts = VT.getVectorNumElements();
18048   unsigned EltSizeInBits = EltVT.getScalarSizeInBits();
18049 
18050   if (EltVT == MVT::i1)
18051     return InsertBitToMaskVector(Op, DAG, Subtarget);
18052 
18053   SDLoc dl(Op);
18054   SDValue N0 = Op.getOperand(0);
18055   SDValue N1 = Op.getOperand(1);
18056   SDValue N2 = Op.getOperand(2);
18057   auto *N2C = dyn_cast<ConstantSDNode>(N2);
18058 
18059   if (EltVT == MVT::bf16) {
18060     MVT IVT = VT.changeVectorElementTypeToInteger();
18061     SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVT,
18062                               DAG.getBitcast(IVT, N0),
18063                               DAG.getBitcast(MVT::i16, N1), N2);
18064     return DAG.getBitcast(VT, Res);
18065   }
18066 
18067   if (!N2C) {
18068     // Variable insertion indices, usually we're better off spilling to stack,
18069     // but AVX512 can use a variable compare+select by comparing against all
18070     // possible vector indices, and FP insertion has less gpr->simd traffic.
18071     if (!(Subtarget.hasBWI() ||
18072           (Subtarget.hasAVX512() && EltSizeInBits >= 32) ||
18073           (Subtarget.hasSSE41() && (EltVT == MVT::f32 || EltVT == MVT::f64))))
18074       return SDValue();
18075 
18076     MVT IdxSVT = MVT::getIntegerVT(EltSizeInBits);
18077     MVT IdxVT = MVT::getVectorVT(IdxSVT, NumElts);
18078     if (!isTypeLegal(IdxSVT) || !isTypeLegal(IdxVT))
18079       return SDValue();
18080 
18081     SDValue IdxExt = DAG.getZExtOrTrunc(N2, dl, IdxSVT);
18082     SDValue IdxSplat = DAG.getSplatBuildVector(IdxVT, dl, IdxExt);
18083     SDValue EltSplat = DAG.getSplatBuildVector(VT, dl, N1);
18084 
18085     SmallVector<SDValue, 16> RawIndices;
18086     for (unsigned I = 0; I != NumElts; ++I)
18087       RawIndices.push_back(DAG.getConstant(I, dl, IdxSVT));
18088     SDValue Indices = DAG.getBuildVector(IdxVT, dl, RawIndices);
18089 
18090     // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
18091     return DAG.getSelectCC(dl, IdxSplat, Indices, EltSplat, N0,
18092                            ISD::CondCode::SETEQ);
18093   }
18094 
18095   if (N2C->getAPIntValue().uge(NumElts))
18096     return SDValue();
18097   uint64_t IdxVal = N2C->getZExtValue();
18098 
18099   bool IsZeroElt = X86::isZeroNode(N1);
18100   bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
18101 
18102   if (IsZeroElt || IsAllOnesElt) {
18103     // Lower insertion of v16i8/v32i8/v64i16 -1 elts as an 'OR' blend.
18104     // We don't deal with i8 0 since it appears to be handled elsewhere.
18105     if (IsAllOnesElt &&
18106         ((VT == MVT::v16i8 && !Subtarget.hasSSE41()) ||
18107          ((VT == MVT::v32i8 || VT == MVT::v16i16) && !Subtarget.hasInt256()))) {
18108       SDValue ZeroCst = DAG.getConstant(0, dl, VT.getScalarType());
18109       SDValue OnesCst = DAG.getAllOnesConstant(dl, VT.getScalarType());
18110       SmallVector<SDValue, 8> CstVectorElts(NumElts, ZeroCst);
18111       CstVectorElts[IdxVal] = OnesCst;
18112       SDValue CstVector = DAG.getBuildVector(VT, dl, CstVectorElts);
18113       return DAG.getNode(ISD::OR, dl, VT, N0, CstVector);
18114     }
18115     // See if we can do this more efficiently with a blend shuffle with a
18116     // rematerializable vector.
18117     if (Subtarget.hasSSE41() &&
18118         (EltSizeInBits >= 16 || (IsZeroElt && !VT.is128BitVector()))) {
18119       SmallVector<int, 8> BlendMask;
18120       for (unsigned i = 0; i != NumElts; ++i)
18121         BlendMask.push_back(i == IdxVal ? i + NumElts : i);
18122       SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
18123                                     : getOnesVector(VT, DAG, dl);
18124       return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
18125     }
18126   }
18127 
18128   // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
18129   // into that, and then insert the subvector back into the result.
18130   if (VT.is256BitVector() || VT.is512BitVector()) {
18131     // With a 256-bit vector, we can insert into the zero element efficiently
18132     // using a blend if we have AVX or AVX2 and the right data type.
18133     if (VT.is256BitVector() && IdxVal == 0) {
18134       // TODO: It is worthwhile to cast integer to floating point and back
18135       // and incur a domain crossing penalty if that's what we'll end up
18136       // doing anyway after extracting to a 128-bit vector.
18137       if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
18138           (Subtarget.hasAVX2() && (EltVT == MVT::i32 || EltVT == MVT::i64))) {
18139         SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
18140         return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
18141                            DAG.getTargetConstant(1, dl, MVT::i8));
18142       }
18143     }
18144 
18145     unsigned NumEltsIn128 = 128 / EltSizeInBits;
18146     assert(isPowerOf2_32(NumEltsIn128) &&
18147            "Vectors will always have power-of-two number of elements.");
18148 
18149     // If we are not inserting into the low 128-bit vector chunk,
18150     // then prefer the broadcast+blend sequence.
18151     // FIXME: relax the profitability check iff all N1 uses are insertions.
18152     if (IdxVal >= NumEltsIn128 &&
18153         ((Subtarget.hasAVX2() && EltSizeInBits != 8) ||
18154          (Subtarget.hasAVX() && (EltSizeInBits >= 32) &&
18155           X86::mayFoldLoad(N1, Subtarget)))) {
18156       SDValue N1SplatVec = DAG.getSplatBuildVector(VT, dl, N1);
18157       SmallVector<int, 8> BlendMask;
18158       for (unsigned i = 0; i != NumElts; ++i)
18159         BlendMask.push_back(i == IdxVal ? i + NumElts : i);
18160       return DAG.getVectorShuffle(VT, dl, N0, N1SplatVec, BlendMask);
18161     }
18162 
18163     // Get the desired 128-bit vector chunk.
18164     SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
18165 
18166     // Insert the element into the desired chunk.
18167     // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
18168     unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
18169 
18170     V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
18171                     DAG.getIntPtrConstant(IdxIn128, dl));
18172 
18173     // Insert the changed part back into the bigger vector
18174     return insert128BitVector(N0, V, IdxVal, DAG, dl);
18175   }
18176   assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
18177 
18178   // This will be just movw/movd/movq/movsh/movss/movsd.
18179   if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode())) {
18180     if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
18181         EltVT == MVT::f16 || EltVT == MVT::i64) {
18182       N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
18183       return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
18184     }
18185 
18186     // We can't directly insert an i8 or i16 into a vector, so zero extend
18187     // it to i32 first.
18188     if (EltVT == MVT::i16 || EltVT == MVT::i8) {
18189       N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, N1);
18190       MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
18191       N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, N1);
18192       N1 = getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
18193       return DAG.getBitcast(VT, N1);
18194     }
18195   }
18196 
18197   // Transform it so it match pinsr{b,w} which expects a GR32 as its second
18198   // argument. SSE41 required for pinsrb.
18199   if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
18200     unsigned Opc;
18201     if (VT == MVT::v8i16) {
18202       assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
18203       Opc = X86ISD::PINSRW;
18204     } else {
18205       assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
18206       assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
18207       Opc = X86ISD::PINSRB;
18208     }
18209 
18210     assert(N1.getValueType() != MVT::i32 && "Unexpected VT");
18211     N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
18212     N2 = DAG.getTargetConstant(IdxVal, dl, MVT::i8);
18213     return DAG.getNode(Opc, dl, VT, N0, N1, N2);
18214   }
18215 
18216   if (Subtarget.hasSSE41()) {
18217     if (EltVT == MVT::f32) {
18218       // Bits [7:6] of the constant are the source select. This will always be
18219       //   zero here. The DAG Combiner may combine an extract_elt index into
18220       //   these bits. For example (insert (extract, 3), 2) could be matched by
18221       //   putting the '3' into bits [7:6] of X86ISD::INSERTPS.
18222       // Bits [5:4] of the constant are the destination select. This is the
18223       //   value of the incoming immediate.
18224       // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
18225       //   combine either bitwise AND or insert of float 0.0 to set these bits.
18226 
18227       bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
18228       if (IdxVal == 0 && (!MinSize || !X86::mayFoldLoad(N1, Subtarget))) {
18229         // If this is an insertion of 32-bits into the low 32-bits of
18230         // a vector, we prefer to generate a blend with immediate rather
18231         // than an insertps. Blends are simpler operations in hardware and so
18232         // will always have equal or better performance than insertps.
18233         // But if optimizing for size and there's a load folding opportunity,
18234         // generate insertps because blendps does not have a 32-bit memory
18235         // operand form.
18236         N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
18237         return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
18238                            DAG.getTargetConstant(1, dl, MVT::i8));
18239       }
18240       // Create this as a scalar to vector..
18241       N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
18242       return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
18243                          DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
18244     }
18245 
18246     // PINSR* works with constant index.
18247     if (EltVT == MVT::i32 || EltVT == MVT::i64)
18248       return Op;
18249   }
18250 
18251   return SDValue();
18252 }
18253 
18254 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
18255                                      SelectionDAG &DAG) {
18256   SDLoc dl(Op);
18257   MVT OpVT = Op.getSimpleValueType();
18258 
18259   // It's always cheaper to replace a xor+movd with xorps and simplifies further
18260   // combines.
18261   if (X86::isZeroNode(Op.getOperand(0)))
18262     return getZeroVector(OpVT, Subtarget, DAG, dl);
18263 
18264   // If this is a 256-bit vector result, first insert into a 128-bit
18265   // vector and then insert into the 256-bit vector.
18266   if (!OpVT.is128BitVector()) {
18267     // Insert into a 128-bit vector.
18268     unsigned SizeFactor = OpVT.getSizeInBits() / 128;
18269     MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
18270                                  OpVT.getVectorNumElements() / SizeFactor);
18271 
18272     Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
18273 
18274     // Insert the 128-bit vector.
18275     return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
18276   }
18277   assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
18278          "Expected an SSE type!");
18279 
18280   // Pass through a v4i32 or V8i16 SCALAR_TO_VECTOR as that's what we use in
18281   // tblgen.
18282   if (OpVT == MVT::v4i32 || (OpVT == MVT::v8i16 && Subtarget.hasFP16()))
18283     return Op;
18284 
18285   SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
18286   return DAG.getBitcast(
18287       OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
18288 }
18289 
18290 // Lower a node with an INSERT_SUBVECTOR opcode.  This may result in a
18291 // simple superregister reference or explicit instructions to insert
18292 // the upper bits of a vector.
18293 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
18294                                      SelectionDAG &DAG) {
18295   assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
18296 
18297   return insert1BitVector(Op, DAG, Subtarget);
18298 }
18299 
18300 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
18301                                       SelectionDAG &DAG) {
18302   assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
18303          "Only vXi1 extract_subvectors need custom lowering");
18304 
18305   SDLoc dl(Op);
18306   SDValue Vec = Op.getOperand(0);
18307   uint64_t IdxVal = Op.getConstantOperandVal(1);
18308 
18309   if (IdxVal == 0) // the operation is legal
18310     return Op;
18311 
18312   // Extend to natively supported kshift.
18313   Vec = widenMaskVector(Vec, false, Subtarget, DAG, dl);
18314 
18315   // Shift to the LSB.
18316   Vec = DAG.getNode(X86ISD::KSHIFTR, dl, Vec.getSimpleValueType(), Vec,
18317                     DAG.getTargetConstant(IdxVal, dl, MVT::i8));
18318 
18319   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
18320                      DAG.getIntPtrConstant(0, dl));
18321 }
18322 
18323 // Returns the appropriate wrapper opcode for a global reference.
18324 unsigned X86TargetLowering::getGlobalWrapperKind(
18325     const GlobalValue *GV, const unsigned char OpFlags) const {
18326   // References to absolute symbols are never PC-relative.
18327   if (GV && GV->isAbsoluteSymbolRef())
18328     return X86ISD::Wrapper;
18329 
18330   // The following OpFlags under RIP-rel PIC use RIP.
18331   if (Subtarget.isPICStyleRIPRel() &&
18332       (OpFlags == X86II::MO_NO_FLAG || OpFlags == X86II::MO_COFFSTUB ||
18333        OpFlags == X86II::MO_DLLIMPORT))
18334     return X86ISD::WrapperRIP;
18335 
18336   // GOTPCREL references must always use RIP.
18337   if (OpFlags == X86II::MO_GOTPCREL || OpFlags == X86II::MO_GOTPCREL_NORELAX)
18338     return X86ISD::WrapperRIP;
18339 
18340   return X86ISD::Wrapper;
18341 }
18342 
18343 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
18344 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
18345 // one of the above mentioned nodes. It has to be wrapped because otherwise
18346 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
18347 // be used to form addressing mode. These wrapped nodes will be selected
18348 // into MOV32ri.
18349 SDValue
18350 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
18351   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
18352 
18353   // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18354   // global base reg.
18355   unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18356 
18357   auto PtrVT = getPointerTy(DAG.getDataLayout());
18358   SDValue Result = DAG.getTargetConstantPool(
18359       CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
18360   SDLoc DL(CP);
18361   Result =
18362       DAG.getNode(getGlobalWrapperKind(nullptr, OpFlag), DL, PtrVT, Result);
18363   // With PIC, the address is actually $g + Offset.
18364   if (OpFlag) {
18365     Result =
18366         DAG.getNode(ISD::ADD, DL, PtrVT,
18367                     DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18368   }
18369 
18370   return Result;
18371 }
18372 
18373 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
18374   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
18375 
18376   // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18377   // global base reg.
18378   unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18379 
18380   auto PtrVT = getPointerTy(DAG.getDataLayout());
18381   SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
18382   SDLoc DL(JT);
18383   Result =
18384       DAG.getNode(getGlobalWrapperKind(nullptr, OpFlag), DL, PtrVT, Result);
18385 
18386   // With PIC, the address is actually $g + Offset.
18387   if (OpFlag)
18388     Result =
18389         DAG.getNode(ISD::ADD, DL, PtrVT,
18390                     DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18391 
18392   return Result;
18393 }
18394 
18395 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
18396                                                SelectionDAG &DAG) const {
18397   return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18398 }
18399 
18400 SDValue
18401 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
18402   // Create the TargetBlockAddressAddress node.
18403   unsigned char OpFlags =
18404     Subtarget.classifyBlockAddressReference();
18405   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
18406   int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
18407   SDLoc dl(Op);
18408   auto PtrVT = getPointerTy(DAG.getDataLayout());
18409   SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
18410   Result =
18411       DAG.getNode(getGlobalWrapperKind(nullptr, OpFlags), dl, PtrVT, Result);
18412 
18413   // With PIC, the address is actually $g + Offset.
18414   if (isGlobalRelativeToPICBase(OpFlags)) {
18415     Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18416                          DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18417   }
18418 
18419   return Result;
18420 }
18421 
18422 /// Creates target global address or external symbol nodes for calls or
18423 /// other uses.
18424 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
18425                                                  bool ForCall) const {
18426   // Unpack the global address or external symbol.
18427   const SDLoc &dl = SDLoc(Op);
18428   const GlobalValue *GV = nullptr;
18429   int64_t Offset = 0;
18430   const char *ExternalSym = nullptr;
18431   if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
18432     GV = G->getGlobal();
18433     Offset = G->getOffset();
18434   } else {
18435     const auto *ES = cast<ExternalSymbolSDNode>(Op);
18436     ExternalSym = ES->getSymbol();
18437   }
18438 
18439   // Calculate some flags for address lowering.
18440   const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
18441   unsigned char OpFlags;
18442   if (ForCall)
18443     OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
18444   else
18445     OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
18446   bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
18447   bool NeedsLoad = isGlobalStubReference(OpFlags);
18448 
18449   CodeModel::Model M = DAG.getTarget().getCodeModel();
18450   auto PtrVT = getPointerTy(DAG.getDataLayout());
18451   SDValue Result;
18452 
18453   if (GV) {
18454     // Create a target global address if this is a global. If possible, fold the
18455     // offset into the global address reference. Otherwise, ADD it on later.
18456     // Suppress the folding if Offset is negative: movl foo-1, %eax is not
18457     // allowed because if the address of foo is 0, the ELF R_X86_64_32
18458     // relocation will compute to a negative value, which is invalid.
18459     int64_t GlobalOffset = 0;
18460     if (OpFlags == X86II::MO_NO_FLAG && Offset >= 0 &&
18461         X86::isOffsetSuitableForCodeModel(Offset, M, true)) {
18462       std::swap(GlobalOffset, Offset);
18463     }
18464     Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
18465   } else {
18466     // If this is not a global address, this must be an external symbol.
18467     Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
18468   }
18469 
18470   // If this is a direct call, avoid the wrapper if we don't need to do any
18471   // loads or adds. This allows SDAG ISel to match direct calls.
18472   if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
18473     return Result;
18474 
18475   Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
18476 
18477   // With PIC, the address is actually $g + Offset.
18478   if (HasPICReg) {
18479     Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18480                          DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18481   }
18482 
18483   // For globals that require a load from a stub to get the address, emit the
18484   // load.
18485   if (NeedsLoad)
18486     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
18487                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18488 
18489   // If there was a non-zero offset that we didn't fold, create an explicit
18490   // addition for it.
18491   if (Offset != 0)
18492     Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
18493                          DAG.getConstant(Offset, dl, PtrVT));
18494 
18495   return Result;
18496 }
18497 
18498 SDValue
18499 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
18500   return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18501 }
18502 
18503 static SDValue
18504 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
18505            SDValue *InGlue, const EVT PtrVT, unsigned ReturnReg,
18506            unsigned char OperandFlags, bool LocalDynamic = false) {
18507   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18508   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18509   SDLoc dl(GA);
18510   SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18511                                            GA->getValueType(0),
18512                                            GA->getOffset(),
18513                                            OperandFlags);
18514 
18515   X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
18516                                            : X86ISD::TLSADDR;
18517 
18518   if (InGlue) {
18519     SDValue Ops[] = { Chain,  TGA, *InGlue };
18520     Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18521   } else {
18522     SDValue Ops[]  = { Chain, TGA };
18523     Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18524   }
18525 
18526   // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
18527   MFI.setAdjustsStack(true);
18528   MFI.setHasCalls(true);
18529 
18530   SDValue Glue = Chain.getValue(1);
18531   return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);
18532 }
18533 
18534 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
18535 static SDValue
18536 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18537                                 const EVT PtrVT) {
18538   SDValue InGlue;
18539   SDLoc dl(GA);  // ? function entry point might be better
18540   SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18541                                    DAG.getNode(X86ISD::GlobalBaseReg,
18542                                                SDLoc(), PtrVT), InGlue);
18543   InGlue = Chain.getValue(1);
18544 
18545   return GetTLSADDR(DAG, Chain, GA, &InGlue, PtrVT, X86::EAX, X86II::MO_TLSGD);
18546 }
18547 
18548 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit LP64
18549 static SDValue
18550 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18551                                 const EVT PtrVT) {
18552   return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
18553                     X86::RAX, X86II::MO_TLSGD);
18554 }
18555 
18556 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit ILP32
18557 static SDValue
18558 LowerToTLSGeneralDynamicModelX32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18559                                  const EVT PtrVT) {
18560   return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
18561                     X86::EAX, X86II::MO_TLSGD);
18562 }
18563 
18564 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
18565                                            SelectionDAG &DAG, const EVT PtrVT,
18566                                            bool Is64Bit, bool Is64BitLP64) {
18567   SDLoc dl(GA);
18568 
18569   // Get the start address of the TLS block for this module.
18570   X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
18571       .getInfo<X86MachineFunctionInfo>();
18572   MFI->incNumLocalDynamicTLSAccesses();
18573 
18574   SDValue Base;
18575   if (Is64Bit) {
18576     unsigned ReturnReg = Is64BitLP64 ? X86::RAX : X86::EAX;
18577     Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, ReturnReg,
18578                       X86II::MO_TLSLD, /*LocalDynamic=*/true);
18579   } else {
18580     SDValue InGlue;
18581     SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18582         DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InGlue);
18583     InGlue = Chain.getValue(1);
18584     Base = GetTLSADDR(DAG, Chain, GA, &InGlue, PtrVT, X86::EAX,
18585                       X86II::MO_TLSLDM, /*LocalDynamic=*/true);
18586   }
18587 
18588   // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
18589   // of Base.
18590 
18591   // Build x@dtpoff.
18592   unsigned char OperandFlags = X86II::MO_DTPOFF;
18593   unsigned WrapperKind = X86ISD::Wrapper;
18594   SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18595                                            GA->getValueType(0),
18596                                            GA->getOffset(), OperandFlags);
18597   SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18598 
18599   // Add x@dtpoff with the base.
18600   return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
18601 }
18602 
18603 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
18604 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18605                                    const EVT PtrVT, TLSModel::Model model,
18606                                    bool is64Bit, bool isPIC) {
18607   SDLoc dl(GA);
18608 
18609   // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
18610   Value *Ptr = Constant::getNullValue(
18611       PointerType::get(*DAG.getContext(), is64Bit ? 257 : 256));
18612 
18613   SDValue ThreadPointer =
18614       DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
18615                   MachinePointerInfo(Ptr));
18616 
18617   unsigned char OperandFlags = 0;
18618   // Most TLS accesses are not RIP relative, even on x86-64.  One exception is
18619   // initialexec.
18620   unsigned WrapperKind = X86ISD::Wrapper;
18621   if (model == TLSModel::LocalExec) {
18622     OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
18623   } else if (model == TLSModel::InitialExec) {
18624     if (is64Bit) {
18625       OperandFlags = X86II::MO_GOTTPOFF;
18626       WrapperKind = X86ISD::WrapperRIP;
18627     } else {
18628       OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
18629     }
18630   } else {
18631     llvm_unreachable("Unexpected model");
18632   }
18633 
18634   // emit "addl x@ntpoff,%eax" (local exec)
18635   // or "addl x@indntpoff,%eax" (initial exec)
18636   // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
18637   SDValue TGA =
18638       DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
18639                                  GA->getOffset(), OperandFlags);
18640   SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18641 
18642   if (model == TLSModel::InitialExec) {
18643     if (isPIC && !is64Bit) {
18644       Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
18645                            DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18646                            Offset);
18647     }
18648 
18649     Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
18650                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18651   }
18652 
18653   // The address of the thread local variable is the add of the thread
18654   // pointer with the offset of the variable.
18655   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
18656 }
18657 
18658 SDValue
18659 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
18660 
18661   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
18662 
18663   if (DAG.getTarget().useEmulatedTLS())
18664     return LowerToTLSEmulatedModel(GA, DAG);
18665 
18666   const GlobalValue *GV = GA->getGlobal();
18667   auto PtrVT = getPointerTy(DAG.getDataLayout());
18668   bool PositionIndependent = isPositionIndependent();
18669 
18670   if (Subtarget.isTargetELF()) {
18671     TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
18672     switch (model) {
18673       case TLSModel::GeneralDynamic:
18674         if (Subtarget.is64Bit()) {
18675           if (Subtarget.isTarget64BitLP64())
18676             return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
18677           return LowerToTLSGeneralDynamicModelX32(GA, DAG, PtrVT);
18678         }
18679         return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
18680       case TLSModel::LocalDynamic:
18681         return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT, Subtarget.is64Bit(),
18682                                            Subtarget.isTarget64BitLP64());
18683       case TLSModel::InitialExec:
18684       case TLSModel::LocalExec:
18685         return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
18686                                    PositionIndependent);
18687     }
18688     llvm_unreachable("Unknown TLS model.");
18689   }
18690 
18691   if (Subtarget.isTargetDarwin()) {
18692     // Darwin only has one model of TLS.  Lower to that.
18693     unsigned char OpFlag = 0;
18694     unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
18695                            X86ISD::WrapperRIP : X86ISD::Wrapper;
18696 
18697     // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18698     // global base reg.
18699     bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
18700     if (PIC32)
18701       OpFlag = X86II::MO_TLVP_PIC_BASE;
18702     else
18703       OpFlag = X86II::MO_TLVP;
18704     SDLoc DL(Op);
18705     SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
18706                                                 GA->getValueType(0),
18707                                                 GA->getOffset(), OpFlag);
18708     SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
18709 
18710     // With PIC32, the address is actually $g + Offset.
18711     if (PIC32)
18712       Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
18713                            DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18714                            Offset);
18715 
18716     // Lowering the machine isd will make sure everything is in the right
18717     // location.
18718     SDValue Chain = DAG.getEntryNode();
18719     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18720     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
18721     SDValue Args[] = { Chain, Offset };
18722     Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
18723     Chain = DAG.getCALLSEQ_END(Chain, 0, 0, Chain.getValue(1), DL);
18724 
18725     // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
18726     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18727     MFI.setAdjustsStack(true);
18728 
18729     // And our return value (tls address) is in the standard call return value
18730     // location.
18731     unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
18732     return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
18733   }
18734 
18735   if (Subtarget.isOSWindows()) {
18736     // Just use the implicit TLS architecture
18737     // Need to generate something similar to:
18738     //   mov     rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
18739     //                                  ; from TEB
18740     //   mov     ecx, dword [rel _tls_index]: Load index (from C runtime)
18741     //   mov     rcx, qword [rdx+rcx*8]
18742     //   mov     eax, .tls$:tlsvar
18743     //   [rax+rcx] contains the address
18744     // Windows 64bit: gs:0x58
18745     // Windows 32bit: fs:__tls_array
18746 
18747     SDLoc dl(GA);
18748     SDValue Chain = DAG.getEntryNode();
18749 
18750     // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
18751     // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
18752     // use its literal value of 0x2C.
18753     Value *Ptr = Constant::getNullValue(
18754         Subtarget.is64Bit() ? PointerType::get(*DAG.getContext(), 256)
18755                             : PointerType::get(*DAG.getContext(), 257));
18756 
18757     SDValue TlsArray = Subtarget.is64Bit()
18758                            ? DAG.getIntPtrConstant(0x58, dl)
18759                            : (Subtarget.isTargetWindowsGNU()
18760                                   ? DAG.getIntPtrConstant(0x2C, dl)
18761                                   : DAG.getExternalSymbol("_tls_array", PtrVT));
18762 
18763     SDValue ThreadPointer =
18764         DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
18765 
18766     SDValue res;
18767     if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
18768       res = ThreadPointer;
18769     } else {
18770       // Load the _tls_index variable
18771       SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
18772       if (Subtarget.is64Bit())
18773         IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
18774                              MachinePointerInfo(), MVT::i32);
18775       else
18776         IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
18777 
18778       const DataLayout &DL = DAG.getDataLayout();
18779       SDValue Scale =
18780           DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
18781       IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
18782 
18783       res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
18784     }
18785 
18786     res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
18787 
18788     // Get the offset of start of .tls section
18789     SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18790                                              GA->getValueType(0),
18791                                              GA->getOffset(), X86II::MO_SECREL);
18792     SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
18793 
18794     // The address of the thread local variable is the add of the thread
18795     // pointer with the offset of the variable.
18796     return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
18797   }
18798 
18799   llvm_unreachable("TLS not implemented for this target.");
18800 }
18801 
18802 /// Lower SRA_PARTS and friends, which return two i32 values
18803 /// and take a 2 x i32 value to shift plus a shift amount.
18804 /// TODO: Can this be moved to general expansion code?
18805 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
18806   SDValue Lo, Hi;
18807   DAG.getTargetLoweringInfo().expandShiftParts(Op.getNode(), Lo, Hi, DAG);
18808   return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
18809 }
18810 
18811 // Try to use a packed vector operation to handle i64 on 32-bit targets when
18812 // AVX512DQ is enabled.
18813 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
18814                                         const X86Subtarget &Subtarget) {
18815   assert((Op.getOpcode() == ISD::SINT_TO_FP ||
18816           Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
18817           Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
18818           Op.getOpcode() == ISD::UINT_TO_FP) &&
18819          "Unexpected opcode!");
18820   bool IsStrict = Op->isStrictFPOpcode();
18821   unsigned OpNo = IsStrict ? 1 : 0;
18822   SDValue Src = Op.getOperand(OpNo);
18823   MVT SrcVT = Src.getSimpleValueType();
18824   MVT VT = Op.getSimpleValueType();
18825 
18826    if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
18827        (VT != MVT::f32 && VT != MVT::f64))
18828     return SDValue();
18829 
18830   // Pack the i64 into a vector, do the operation and extract.
18831 
18832   // Using 256-bit to ensure result is 128-bits for f32 case.
18833   unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
18834   MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
18835   MVT VecVT = MVT::getVectorVT(VT, NumElts);
18836 
18837   SDLoc dl(Op);
18838   SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
18839   if (IsStrict) {
18840     SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {VecVT, MVT::Other},
18841                                  {Op.getOperand(0), InVec});
18842     SDValue Chain = CvtVec.getValue(1);
18843     SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18844                                 DAG.getIntPtrConstant(0, dl));
18845     return DAG.getMergeValues({Value, Chain}, dl);
18846   }
18847 
18848   SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
18849 
18850   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18851                      DAG.getIntPtrConstant(0, dl));
18852 }
18853 
18854 // Try to use a packed vector operation to handle i64 on 32-bit targets.
18855 static SDValue LowerI64IntToFP16(SDValue Op, SelectionDAG &DAG,
18856                                  const X86Subtarget &Subtarget) {
18857   assert((Op.getOpcode() == ISD::SINT_TO_FP ||
18858           Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
18859           Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
18860           Op.getOpcode() == ISD::UINT_TO_FP) &&
18861          "Unexpected opcode!");
18862   bool IsStrict = Op->isStrictFPOpcode();
18863   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
18864   MVT SrcVT = Src.getSimpleValueType();
18865   MVT VT = Op.getSimpleValueType();
18866 
18867   if (SrcVT != MVT::i64 || Subtarget.is64Bit() || VT != MVT::f16)
18868     return SDValue();
18869 
18870   // Pack the i64 into a vector, do the operation and extract.
18871 
18872   assert(Subtarget.hasFP16() && "Expected FP16");
18873 
18874   SDLoc dl(Op);
18875   SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
18876   if (IsStrict) {
18877     SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {MVT::v2f16, MVT::Other},
18878                                  {Op.getOperand(0), InVec});
18879     SDValue Chain = CvtVec.getValue(1);
18880     SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18881                                 DAG.getIntPtrConstant(0, dl));
18882     return DAG.getMergeValues({Value, Chain}, dl);
18883   }
18884 
18885   SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, MVT::v2f16, InVec);
18886 
18887   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18888                      DAG.getIntPtrConstant(0, dl));
18889 }
18890 
18891 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
18892                           const X86Subtarget &Subtarget) {
18893   switch (Opcode) {
18894     case ISD::SINT_TO_FP:
18895       // TODO: Handle wider types with AVX/AVX512.
18896       if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
18897         return false;
18898       // CVTDQ2PS or (V)CVTDQ2PD
18899       return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
18900 
18901     case ISD::UINT_TO_FP:
18902       // TODO: Handle wider types and i64 elements.
18903       if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
18904         return false;
18905       // VCVTUDQ2PS or VCVTUDQ2PD
18906       return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
18907 
18908     default:
18909       return false;
18910   }
18911 }
18912 
18913 /// Given a scalar cast operation that is extracted from a vector, try to
18914 /// vectorize the cast op followed by extraction. This will avoid an expensive
18915 /// round-trip between XMM and GPR.
18916 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
18917                                       const X86Subtarget &Subtarget) {
18918   // TODO: This could be enhanced to handle smaller integer types by peeking
18919   // through an extend.
18920   SDValue Extract = Cast.getOperand(0);
18921   MVT DestVT = Cast.getSimpleValueType();
18922   if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
18923       !isa<ConstantSDNode>(Extract.getOperand(1)))
18924     return SDValue();
18925 
18926   // See if we have a 128-bit vector cast op for this type of cast.
18927   SDValue VecOp = Extract.getOperand(0);
18928   MVT FromVT = VecOp.getSimpleValueType();
18929   unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
18930   MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
18931   MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
18932   if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
18933     return SDValue();
18934 
18935   // If we are extracting from a non-zero element, first shuffle the source
18936   // vector to allow extracting from element zero.
18937   SDLoc DL(Cast);
18938   if (!isNullConstant(Extract.getOperand(1))) {
18939     SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
18940     Mask[0] = Extract.getConstantOperandVal(1);
18941     VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
18942   }
18943   // If the source vector is wider than 128-bits, extract the low part. Do not
18944   // create an unnecessarily wide vector cast op.
18945   if (FromVT != Vec128VT)
18946     VecOp = extract128BitVector(VecOp, 0, DAG, DL);
18947 
18948   // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
18949   // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
18950   SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
18951   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
18952                      DAG.getIntPtrConstant(0, DL));
18953 }
18954 
18955 /// Given a scalar cast to FP with a cast to integer operand (almost an ftrunc),
18956 /// try to vectorize the cast ops. This will avoid an expensive round-trip
18957 /// between XMM and GPR.
18958 static SDValue lowerFPToIntToFP(SDValue CastToFP, SelectionDAG &DAG,
18959                                 const X86Subtarget &Subtarget) {
18960   // TODO: Allow FP_TO_UINT.
18961   SDValue CastToInt = CastToFP.getOperand(0);
18962   MVT VT = CastToFP.getSimpleValueType();
18963   if (CastToInt.getOpcode() != ISD::FP_TO_SINT || VT.isVector())
18964     return SDValue();
18965 
18966   MVT IntVT = CastToInt.getSimpleValueType();
18967   SDValue X = CastToInt.getOperand(0);
18968   MVT SrcVT = X.getSimpleValueType();
18969   if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
18970     return SDValue();
18971 
18972   // See if we have 128-bit vector cast instructions for this type of cast.
18973   // We need cvttps2dq/cvttpd2dq and cvtdq2ps/cvtdq2pd.
18974   if (!Subtarget.hasSSE2() || (VT != MVT::f32 && VT != MVT::f64) ||
18975       IntVT != MVT::i32)
18976     return SDValue();
18977 
18978   unsigned SrcSize = SrcVT.getSizeInBits();
18979   unsigned IntSize = IntVT.getSizeInBits();
18980   unsigned VTSize = VT.getSizeInBits();
18981   MVT VecSrcVT = MVT::getVectorVT(SrcVT, 128 / SrcSize);
18982   MVT VecIntVT = MVT::getVectorVT(IntVT, 128 / IntSize);
18983   MVT VecVT = MVT::getVectorVT(VT, 128 / VTSize);
18984 
18985   // We need target-specific opcodes if this is v2f64 -> v4i32 -> v2f64.
18986   unsigned ToIntOpcode =
18987       SrcSize != IntSize ? X86ISD::CVTTP2SI : (unsigned)ISD::FP_TO_SINT;
18988   unsigned ToFPOpcode =
18989       IntSize != VTSize ? X86ISD::CVTSI2P : (unsigned)ISD::SINT_TO_FP;
18990 
18991   // sint_to_fp (fp_to_sint X) --> extelt (sint_to_fp (fp_to_sint (s2v X))), 0
18992   //
18993   // We are not defining the high elements (for example, zero them) because
18994   // that could nullify any performance advantage that we hoped to gain from
18995   // this vector op hack. We do not expect any adverse effects (like denorm
18996   // penalties) with cast ops.
18997   SDLoc DL(CastToFP);
18998   SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
18999   SDValue VecX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecSrcVT, X);
19000   SDValue VCastToInt = DAG.getNode(ToIntOpcode, DL, VecIntVT, VecX);
19001   SDValue VCastToFP = DAG.getNode(ToFPOpcode, DL, VecVT, VCastToInt);
19002   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, VCastToFP, ZeroIdx);
19003 }
19004 
19005 static SDValue lowerINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG,
19006                                     const X86Subtarget &Subtarget) {
19007   SDLoc DL(Op);
19008   bool IsStrict = Op->isStrictFPOpcode();
19009   MVT VT = Op->getSimpleValueType(0);
19010   SDValue Src = Op->getOperand(IsStrict ? 1 : 0);
19011 
19012   if (Subtarget.hasDQI()) {
19013     assert(!Subtarget.hasVLX() && "Unexpected features");
19014 
19015     assert((Src.getSimpleValueType() == MVT::v2i64 ||
19016             Src.getSimpleValueType() == MVT::v4i64) &&
19017            "Unsupported custom type");
19018 
19019     // With AVX512DQ, but not VLX we need to widen to get a 512-bit result type.
19020     assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v4f64) &&
19021            "Unexpected VT!");
19022     MVT WideVT = VT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
19023 
19024     // Need to concat with zero vector for strict fp to avoid spurious
19025     // exceptions.
19026     SDValue Tmp = IsStrict ? DAG.getConstant(0, DL, MVT::v8i64)
19027                            : DAG.getUNDEF(MVT::v8i64);
19028     Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i64, Tmp, Src,
19029                       DAG.getIntPtrConstant(0, DL));
19030     SDValue Res, Chain;
19031     if (IsStrict) {
19032       Res = DAG.getNode(Op.getOpcode(), DL, {WideVT, MVT::Other},
19033                         {Op->getOperand(0), Src});
19034       Chain = Res.getValue(1);
19035     } else {
19036       Res = DAG.getNode(Op.getOpcode(), DL, WideVT, Src);
19037     }
19038 
19039     Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19040                       DAG.getIntPtrConstant(0, DL));
19041 
19042     if (IsStrict)
19043       return DAG.getMergeValues({Res, Chain}, DL);
19044     return Res;
19045   }
19046 
19047   bool IsSigned = Op->getOpcode() == ISD::SINT_TO_FP ||
19048                   Op->getOpcode() == ISD::STRICT_SINT_TO_FP;
19049   if (VT != MVT::v4f32 || IsSigned)
19050     return SDValue();
19051 
19052   SDValue Zero = DAG.getConstant(0, DL, MVT::v4i64);
19053   SDValue One  = DAG.getConstant(1, DL, MVT::v4i64);
19054   SDValue Sign = DAG.getNode(ISD::OR, DL, MVT::v4i64,
19055                              DAG.getNode(ISD::SRL, DL, MVT::v4i64, Src, One),
19056                              DAG.getNode(ISD::AND, DL, MVT::v4i64, Src, One));
19057   SDValue IsNeg = DAG.getSetCC(DL, MVT::v4i64, Src, Zero, ISD::SETLT);
19058   SDValue SignSrc = DAG.getSelect(DL, MVT::v4i64, IsNeg, Sign, Src);
19059   SmallVector<SDValue, 4> SignCvts(4);
19060   SmallVector<SDValue, 4> Chains(4);
19061   for (int i = 0; i != 4; ++i) {
19062     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, SignSrc,
19063                               DAG.getIntPtrConstant(i, DL));
19064     if (IsStrict) {
19065       SignCvts[i] =
19066           DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {MVT::f32, MVT::Other},
19067                       {Op.getOperand(0), Elt});
19068       Chains[i] = SignCvts[i].getValue(1);
19069     } else {
19070       SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, DL, MVT::f32, Elt);
19071     }
19072   }
19073   SDValue SignCvt = DAG.getBuildVector(VT, DL, SignCvts);
19074 
19075   SDValue Slow, Chain;
19076   if (IsStrict) {
19077     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
19078     Slow = DAG.getNode(ISD::STRICT_FADD, DL, {MVT::v4f32, MVT::Other},
19079                        {Chain, SignCvt, SignCvt});
19080     Chain = Slow.getValue(1);
19081   } else {
19082     Slow = DAG.getNode(ISD::FADD, DL, MVT::v4f32, SignCvt, SignCvt);
19083   }
19084 
19085   IsNeg = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i32, IsNeg);
19086   SDValue Cvt = DAG.getSelect(DL, MVT::v4f32, IsNeg, Slow, SignCvt);
19087 
19088   if (IsStrict)
19089     return DAG.getMergeValues({Cvt, Chain}, DL);
19090 
19091   return Cvt;
19092 }
19093 
19094 static SDValue promoteXINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
19095   bool IsStrict = Op->isStrictFPOpcode();
19096   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
19097   SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
19098   MVT VT = Op.getSimpleValueType();
19099   MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
19100   SDLoc dl(Op);
19101 
19102   SDValue Rnd = DAG.getIntPtrConstant(0, dl);
19103   if (IsStrict)
19104     return DAG.getNode(
19105         ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other},
19106         {Chain,
19107          DAG.getNode(Op.getOpcode(), dl, {NVT, MVT::Other}, {Chain, Src}),
19108          Rnd});
19109   return DAG.getNode(ISD::FP_ROUND, dl, VT,
19110                      DAG.getNode(Op.getOpcode(), dl, NVT, Src), Rnd);
19111 }
19112 
19113 static bool isLegalConversion(MVT VT, bool IsSigned,
19114                               const X86Subtarget &Subtarget) {
19115   if (VT == MVT::v4i32 && Subtarget.hasSSE2() && IsSigned)
19116     return true;
19117   if (VT == MVT::v8i32 && Subtarget.hasAVX() && IsSigned)
19118     return true;
19119   if (Subtarget.hasVLX() && (VT == MVT::v4i32 || VT == MVT::v8i32))
19120     return true;
19121   if (Subtarget.useAVX512Regs()) {
19122     if (VT == MVT::v16i32)
19123       return true;
19124     if (VT == MVT::v8i64 && Subtarget.hasDQI())
19125       return true;
19126   }
19127   if (Subtarget.hasDQI() && Subtarget.hasVLX() &&
19128       (VT == MVT::v2i64 || VT == MVT::v4i64))
19129     return true;
19130   return false;
19131 }
19132 
19133 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
19134                                            SelectionDAG &DAG) const {
19135   bool IsStrict = Op->isStrictFPOpcode();
19136   unsigned OpNo = IsStrict ? 1 : 0;
19137   SDValue Src = Op.getOperand(OpNo);
19138   SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
19139   MVT SrcVT = Src.getSimpleValueType();
19140   MVT VT = Op.getSimpleValueType();
19141   SDLoc dl(Op);
19142 
19143   if (isSoftF16(VT, Subtarget))
19144     return promoteXINT_TO_FP(Op, DAG);
19145   else if (isLegalConversion(SrcVT, true, Subtarget))
19146     return Op;
19147 
19148   if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
19149     return LowerWin64_INT128_TO_FP(Op, DAG);
19150 
19151   if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19152     return Extract;
19153 
19154   if (SDValue R = lowerFPToIntToFP(Op, DAG, Subtarget))
19155     return R;
19156 
19157   if (SrcVT.isVector()) {
19158     if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
19159       // Note: Since v2f64 is a legal type. We don't need to zero extend the
19160       // source for strict FP.
19161       if (IsStrict)
19162         return DAG.getNode(
19163             X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
19164             {Chain, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
19165                                 DAG.getUNDEF(SrcVT))});
19166       return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
19167                          DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
19168                                      DAG.getUNDEF(SrcVT)));
19169     }
19170     if (SrcVT == MVT::v2i64 || SrcVT == MVT::v4i64)
19171       return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19172 
19173     return SDValue();
19174   }
19175 
19176   assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
19177          "Unknown SINT_TO_FP to lower!");
19178 
19179   bool UseSSEReg = isScalarFPTypeInSSEReg(VT);
19180 
19181   // These are really Legal; return the operand so the caller accepts it as
19182   // Legal.
19183   if (SrcVT == MVT::i32 && UseSSEReg)
19184     return Op;
19185   if (SrcVT == MVT::i64 && UseSSEReg && Subtarget.is64Bit())
19186     return Op;
19187 
19188   if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19189     return V;
19190   if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
19191     return V;
19192 
19193   // SSE doesn't have an i16 conversion so we need to promote.
19194   if (SrcVT == MVT::i16 && (UseSSEReg || VT == MVT::f128)) {
19195     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Src);
19196     if (IsStrict)
19197       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
19198                          {Chain, Ext});
19199 
19200     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Ext);
19201   }
19202 
19203   if (VT == MVT::f128 || !Subtarget.hasX87())
19204     return SDValue();
19205 
19206   SDValue ValueToStore = Src;
19207   if (SrcVT == MVT::i64 && Subtarget.hasSSE2() && !Subtarget.is64Bit())
19208     // Bitcasting to f64 here allows us to do a single 64-bit store from
19209     // an SSE register, avoiding the store forwarding penalty that would come
19210     // with two 32-bit stores.
19211     ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19212 
19213   unsigned Size = SrcVT.getStoreSize();
19214   Align Alignment(Size);
19215   MachineFunction &MF = DAG.getMachineFunction();
19216   auto PtrVT = getPointerTy(MF.getDataLayout());
19217   int SSFI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false);
19218   MachinePointerInfo MPI =
19219       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
19220   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19221   Chain = DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Alignment);
19222   std::pair<SDValue, SDValue> Tmp =
19223       BuildFILD(VT, SrcVT, dl, Chain, StackSlot, MPI, Alignment, DAG);
19224 
19225   if (IsStrict)
19226     return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19227 
19228   return Tmp.first;
19229 }
19230 
19231 std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(
19232     EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer,
19233     MachinePointerInfo PtrInfo, Align Alignment, SelectionDAG &DAG) const {
19234   // Build the FILD
19235   SDVTList Tys;
19236   bool useSSE = isScalarFPTypeInSSEReg(DstVT);
19237   if (useSSE)
19238     Tys = DAG.getVTList(MVT::f80, MVT::Other);
19239   else
19240     Tys = DAG.getVTList(DstVT, MVT::Other);
19241 
19242   SDValue FILDOps[] = {Chain, Pointer};
19243   SDValue Result =
19244       DAG.getMemIntrinsicNode(X86ISD::FILD, DL, Tys, FILDOps, SrcVT, PtrInfo,
19245                               Alignment, MachineMemOperand::MOLoad);
19246   Chain = Result.getValue(1);
19247 
19248   if (useSSE) {
19249     MachineFunction &MF = DAG.getMachineFunction();
19250     unsigned SSFISize = DstVT.getStoreSize();
19251     int SSFI =
19252         MF.getFrameInfo().CreateStackObject(SSFISize, Align(SSFISize), false);
19253     auto PtrVT = getPointerTy(MF.getDataLayout());
19254     SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19255     Tys = DAG.getVTList(MVT::Other);
19256     SDValue FSTOps[] = {Chain, Result, StackSlot};
19257     MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
19258         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
19259         MachineMemOperand::MOStore, SSFISize, Align(SSFISize));
19260 
19261     Chain =
19262         DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps, DstVT, StoreMMO);
19263     Result = DAG.getLoad(
19264         DstVT, DL, Chain, StackSlot,
19265         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
19266     Chain = Result.getValue(1);
19267   }
19268 
19269   return { Result, Chain };
19270 }
19271 
19272 /// Horizontal vector math instructions may be slower than normal math with
19273 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
19274 /// implementation, and likely shuffle complexity of the alternate sequence.
19275 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
19276                                   const X86Subtarget &Subtarget) {
19277   bool IsOptimizingSize = DAG.shouldOptForSize();
19278   bool HasFastHOps = Subtarget.hasFastHorizontalOps();
19279   return !IsSingleSource || IsOptimizingSize || HasFastHOps;
19280 }
19281 
19282 /// 64-bit unsigned integer to double expansion.
19283 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
19284                                    const X86Subtarget &Subtarget) {
19285   // We can't use this algorithm for strict fp. It produces -0.0 instead of +0.0
19286   // when converting 0 when rounding toward negative infinity. Caller will
19287   // fall back to Expand for when i64 or is legal or use FILD in 32-bit mode.
19288   assert(!Op->isStrictFPOpcode() && "Expected non-strict uint_to_fp!");
19289   // This algorithm is not obvious. Here it is what we're trying to output:
19290   /*
19291      movq       %rax,  %xmm0
19292      punpckldq  (c0),  %xmm0  // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
19293      subpd      (c1),  %xmm0  // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
19294      #ifdef __SSE3__
19295        haddpd   %xmm0, %xmm0
19296      #else
19297        pshufd   $0x4e, %xmm0, %xmm1
19298        addpd    %xmm1, %xmm0
19299      #endif
19300   */
19301 
19302   SDLoc dl(Op);
19303   LLVMContext *Context = DAG.getContext();
19304 
19305   // Build some magic constants.
19306   static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
19307   Constant *C0 = ConstantDataVector::get(*Context, CV0);
19308   auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19309   SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, Align(16));
19310 
19311   SmallVector<Constant*,2> CV1;
19312   CV1.push_back(
19313     ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
19314                                       APInt(64, 0x4330000000000000ULL))));
19315   CV1.push_back(
19316     ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
19317                                       APInt(64, 0x4530000000000000ULL))));
19318   Constant *C1 = ConstantVector::get(CV1);
19319   SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, Align(16));
19320 
19321   // Load the 64-bit value into an XMM register.
19322   SDValue XR1 =
19323       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Op.getOperand(0));
19324   SDValue CLod0 = DAG.getLoad(
19325       MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
19326       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
19327   SDValue Unpck1 =
19328       getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
19329 
19330   SDValue CLod1 = DAG.getLoad(
19331       MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
19332       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
19333   SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
19334   // TODO: Are there any fast-math-flags to propagate here?
19335   SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
19336   SDValue Result;
19337 
19338   if (Subtarget.hasSSE3() &&
19339       shouldUseHorizontalOp(true, DAG, Subtarget)) {
19340     Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
19341   } else {
19342     SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
19343     Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
19344   }
19345   Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
19346                        DAG.getIntPtrConstant(0, dl));
19347   return Result;
19348 }
19349 
19350 /// 32-bit unsigned integer to float expansion.
19351 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
19352                                    const X86Subtarget &Subtarget) {
19353   unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19354   SDLoc dl(Op);
19355   // FP constant to bias correct the final result.
19356   SDValue Bias = DAG.getConstantFP(
19357       llvm::bit_cast<double>(0x4330000000000000ULL), dl, MVT::f64);
19358 
19359   // Load the 32-bit value into an XMM register.
19360   SDValue Load =
19361       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Op.getOperand(OpNo));
19362 
19363   // Zero out the upper parts of the register.
19364   Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
19365 
19366   // Or the load with the bias.
19367   SDValue Or = DAG.getNode(
19368       ISD::OR, dl, MVT::v2i64,
19369       DAG.getBitcast(MVT::v2i64, Load),
19370       DAG.getBitcast(MVT::v2i64,
19371                      DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
19372   Or =
19373       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
19374                   DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
19375 
19376   if (Op.getNode()->isStrictFPOpcode()) {
19377     // Subtract the bias.
19378     // TODO: Are there any fast-math-flags to propagate here?
19379     SDValue Chain = Op.getOperand(0);
19380     SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
19381                               {Chain, Or, Bias});
19382 
19383     if (Op.getValueType() == Sub.getValueType())
19384       return Sub;
19385 
19386     // Handle final rounding.
19387     std::pair<SDValue, SDValue> ResultPair = DAG.getStrictFPExtendOrRound(
19388         Sub, Sub.getValue(1), dl, Op.getSimpleValueType());
19389 
19390     return DAG.getMergeValues({ResultPair.first, ResultPair.second}, dl);
19391   }
19392 
19393   // Subtract the bias.
19394   // TODO: Are there any fast-math-flags to propagate here?
19395   SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
19396 
19397   // Handle final rounding.
19398   return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
19399 }
19400 
19401 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
19402                                      const X86Subtarget &Subtarget,
19403                                      const SDLoc &DL) {
19404   if (Op.getSimpleValueType() != MVT::v2f64)
19405     return SDValue();
19406 
19407   bool IsStrict = Op->isStrictFPOpcode();
19408 
19409   SDValue N0 = Op.getOperand(IsStrict ? 1 : 0);
19410   assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
19411 
19412   if (Subtarget.hasAVX512()) {
19413     if (!Subtarget.hasVLX()) {
19414       // Let generic type legalization widen this.
19415       if (!IsStrict)
19416         return SDValue();
19417       // Otherwise pad the integer input with 0s and widen the operation.
19418       N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19419                        DAG.getConstant(0, DL, MVT::v2i32));
19420       SDValue Res = DAG.getNode(Op->getOpcode(), DL, {MVT::v4f64, MVT::Other},
19421                                 {Op.getOperand(0), N0});
19422       SDValue Chain = Res.getValue(1);
19423       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2f64, Res,
19424                         DAG.getIntPtrConstant(0, DL));
19425       return DAG.getMergeValues({Res, Chain}, DL);
19426     }
19427 
19428     // Legalize to v4i32 type.
19429     N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19430                      DAG.getUNDEF(MVT::v2i32));
19431     if (IsStrict)
19432       return DAG.getNode(X86ISD::STRICT_CVTUI2P, DL, {MVT::v2f64, MVT::Other},
19433                          {Op.getOperand(0), N0});
19434     return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
19435   }
19436 
19437   // Zero extend to 2i64, OR with the floating point representation of 2^52.
19438   // This gives us the floating point equivalent of 2^52 + the i32 integer
19439   // since double has 52-bits of mantissa. Then subtract 2^52 in floating
19440   // point leaving just our i32 integers in double format.
19441   SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i64, N0);
19442   SDValue VBias = DAG.getConstantFP(
19443       llvm::bit_cast<double>(0x4330000000000000ULL), DL, MVT::v2f64);
19444   SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v2i64, ZExtIn,
19445                            DAG.getBitcast(MVT::v2i64, VBias));
19446   Or = DAG.getBitcast(MVT::v2f64, Or);
19447 
19448   if (IsStrict)
19449     return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v2f64, MVT::Other},
19450                        {Op.getOperand(0), Or, VBias});
19451   return DAG.getNode(ISD::FSUB, DL, MVT::v2f64, Or, VBias);
19452 }
19453 
19454 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
19455                                      const X86Subtarget &Subtarget) {
19456   SDLoc DL(Op);
19457   bool IsStrict = Op->isStrictFPOpcode();
19458   SDValue V = Op->getOperand(IsStrict ? 1 : 0);
19459   MVT VecIntVT = V.getSimpleValueType();
19460   assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
19461          "Unsupported custom type");
19462 
19463   if (Subtarget.hasAVX512()) {
19464     // With AVX512, but not VLX we need to widen to get a 512-bit result type.
19465     assert(!Subtarget.hasVLX() && "Unexpected features");
19466     MVT VT = Op->getSimpleValueType(0);
19467 
19468     // v8i32->v8f64 is legal with AVX512 so just return it.
19469     if (VT == MVT::v8f64)
19470       return Op;
19471 
19472     assert((VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v4f64) &&
19473            "Unexpected VT!");
19474     MVT WideVT = VT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
19475     MVT WideIntVT = VT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
19476     // Need to concat with zero vector for strict fp to avoid spurious
19477     // exceptions.
19478     SDValue Tmp =
19479         IsStrict ? DAG.getConstant(0, DL, WideIntVT) : DAG.getUNDEF(WideIntVT);
19480     V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideIntVT, Tmp, V,
19481                     DAG.getIntPtrConstant(0, DL));
19482     SDValue Res, Chain;
19483     if (IsStrict) {
19484       Res = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {WideVT, MVT::Other},
19485                         {Op->getOperand(0), V});
19486       Chain = Res.getValue(1);
19487     } else {
19488       Res = DAG.getNode(ISD::UINT_TO_FP, DL, WideVT, V);
19489     }
19490 
19491     Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19492                       DAG.getIntPtrConstant(0, DL));
19493 
19494     if (IsStrict)
19495       return DAG.getMergeValues({Res, Chain}, DL);
19496     return Res;
19497   }
19498 
19499   if (Subtarget.hasAVX() && VecIntVT == MVT::v4i32 &&
19500       Op->getSimpleValueType(0) == MVT::v4f64) {
19501     SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i64, V);
19502     Constant *Bias = ConstantFP::get(
19503         *DAG.getContext(),
19504         APFloat(APFloat::IEEEdouble(), APInt(64, 0x4330000000000000ULL)));
19505     auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19506     SDValue CPIdx = DAG.getConstantPool(Bias, PtrVT, Align(8));
19507     SDVTList Tys = DAG.getVTList(MVT::v4f64, MVT::Other);
19508     SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
19509     SDValue VBias = DAG.getMemIntrinsicNode(
19510         X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
19511         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(8),
19512         MachineMemOperand::MOLoad);
19513 
19514     SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn,
19515                              DAG.getBitcast(MVT::v4i64, VBias));
19516     Or = DAG.getBitcast(MVT::v4f64, Or);
19517 
19518     if (IsStrict)
19519       return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v4f64, MVT::Other},
19520                          {Op.getOperand(0), Or, VBias});
19521     return DAG.getNode(ISD::FSUB, DL, MVT::v4f64, Or, VBias);
19522   }
19523 
19524   // The algorithm is the following:
19525   // #ifdef __SSE4_1__
19526   //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19527   //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19528   //                                 (uint4) 0x53000000, 0xaa);
19529   // #else
19530   //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19531   //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
19532   // #endif
19533   //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19534   //     return (float4) lo + fhi;
19535 
19536   bool Is128 = VecIntVT == MVT::v4i32;
19537   MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
19538   // If we convert to something else than the supported type, e.g., to v4f64,
19539   // abort early.
19540   if (VecFloatVT != Op->getSimpleValueType(0))
19541     return SDValue();
19542 
19543   // In the #idef/#else code, we have in common:
19544   // - The vector of constants:
19545   // -- 0x4b000000
19546   // -- 0x53000000
19547   // - A shift:
19548   // -- v >> 16
19549 
19550   // Create the splat vector for 0x4b000000.
19551   SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
19552   // Create the splat vector for 0x53000000.
19553   SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
19554 
19555   // Create the right shift.
19556   SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
19557   SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
19558 
19559   SDValue Low, High;
19560   if (Subtarget.hasSSE41()) {
19561     MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
19562     //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19563     SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
19564     SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
19565     // Low will be bitcasted right away, so do not bother bitcasting back to its
19566     // original type.
19567     Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
19568                       VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19569     //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19570     //                                 (uint4) 0x53000000, 0xaa);
19571     SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
19572     SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
19573     // High will be bitcasted right away, so do not bother bitcasting back to
19574     // its original type.
19575     High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
19576                        VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19577   } else {
19578     SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
19579     //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19580     SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
19581     Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
19582 
19583     //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
19584     High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
19585   }
19586 
19587   // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
19588   SDValue VecCstFSub = DAG.getConstantFP(
19589       APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
19590 
19591   //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19592   // NOTE: By using fsub of a positive constant instead of fadd of a negative
19593   // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
19594   // enabled. See PR24512.
19595   SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
19596   // TODO: Are there any fast-math-flags to propagate here?
19597   //     (float4) lo;
19598   SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
19599   //     return (float4) lo + fhi;
19600   if (IsStrict) {
19601     SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
19602                                 {Op.getOperand(0), HighBitcast, VecCstFSub});
19603     return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
19604                        {FHigh.getValue(1), LowBitcast, FHigh});
19605   }
19606 
19607   SDValue FHigh =
19608       DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
19609   return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
19610 }
19611 
19612 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
19613                                    const X86Subtarget &Subtarget) {
19614   unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19615   SDValue N0 = Op.getOperand(OpNo);
19616   MVT SrcVT = N0.getSimpleValueType();
19617   SDLoc dl(Op);
19618 
19619   switch (SrcVT.SimpleTy) {
19620   default:
19621     llvm_unreachable("Custom UINT_TO_FP is not supported!");
19622   case MVT::v2i32:
19623     return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
19624   case MVT::v4i32:
19625   case MVT::v8i32:
19626     return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
19627   case MVT::v2i64:
19628   case MVT::v4i64:
19629     return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19630   }
19631 }
19632 
19633 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
19634                                            SelectionDAG &DAG) const {
19635   bool IsStrict = Op->isStrictFPOpcode();
19636   unsigned OpNo = IsStrict ? 1 : 0;
19637   SDValue Src = Op.getOperand(OpNo);
19638   SDLoc dl(Op);
19639   auto PtrVT = getPointerTy(DAG.getDataLayout());
19640   MVT SrcVT = Src.getSimpleValueType();
19641   MVT DstVT = Op->getSimpleValueType(0);
19642   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19643 
19644   // Bail out when we don't have native conversion instructions.
19645   if (DstVT == MVT::f128)
19646     return SDValue();
19647 
19648   if (isSoftF16(DstVT, Subtarget))
19649     return promoteXINT_TO_FP(Op, DAG);
19650   else if (isLegalConversion(SrcVT, false, Subtarget))
19651     return Op;
19652 
19653   if (DstVT.isVector())
19654     return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
19655 
19656   if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
19657     return LowerWin64_INT128_TO_FP(Op, DAG);
19658 
19659   if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19660     return Extract;
19661 
19662   if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
19663       (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
19664     // Conversions from unsigned i32 to f32/f64 are legal,
19665     // using VCVTUSI2SS/SD.  Same for i64 in 64-bit mode.
19666     return Op;
19667   }
19668 
19669   // Promote i32 to i64 and use a signed conversion on 64-bit targets.
19670   if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
19671     Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Src);
19672     if (IsStrict)
19673       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DstVT, MVT::Other},
19674                          {Chain, Src});
19675     return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
19676   }
19677 
19678   if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19679     return V;
19680   if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
19681     return V;
19682 
19683   // The transform for i64->f64 isn't correct for 0 when rounding to negative
19684   // infinity. It produces -0.0, so disable under strictfp.
19685   if (SrcVT == MVT::i64 && DstVT == MVT::f64 && Subtarget.hasSSE2() &&
19686       !IsStrict)
19687     return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
19688   // The transform for i32->f64/f32 isn't correct for 0 when rounding to
19689   // negative infinity. So disable under strictfp. Using FILD instead.
19690   if (SrcVT == MVT::i32 && Subtarget.hasSSE2() && DstVT != MVT::f80 &&
19691       !IsStrict)
19692     return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
19693   if (Subtarget.is64Bit() && SrcVT == MVT::i64 &&
19694       (DstVT == MVT::f32 || DstVT == MVT::f64))
19695     return SDValue();
19696 
19697   // Make a 64-bit buffer, and use it to build an FILD.
19698   SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64, 8);
19699   int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
19700   Align SlotAlign(8);
19701   MachinePointerInfo MPI =
19702     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
19703   if (SrcVT == MVT::i32) {
19704     SDValue OffsetSlot =
19705         DAG.getMemBasePlusOffset(StackSlot, TypeSize::getFixed(4), dl);
19706     SDValue Store1 = DAG.getStore(Chain, dl, Src, StackSlot, MPI, SlotAlign);
19707     SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
19708                                   OffsetSlot, MPI.getWithOffset(4), SlotAlign);
19709     std::pair<SDValue, SDValue> Tmp =
19710         BuildFILD(DstVT, MVT::i64, dl, Store2, StackSlot, MPI, SlotAlign, DAG);
19711     if (IsStrict)
19712       return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19713 
19714     return Tmp.first;
19715   }
19716 
19717   assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
19718   SDValue ValueToStore = Src;
19719   if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
19720     // Bitcasting to f64 here allows us to do a single 64-bit store from
19721     // an SSE register, avoiding the store forwarding penalty that would come
19722     // with two 32-bit stores.
19723     ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19724   }
19725   SDValue Store =
19726       DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, SlotAlign);
19727   // For i64 source, we need to add the appropriate power of 2 if the input
19728   // was negative. We must be careful to do the computation in x87 extended
19729   // precision, not in SSE.
19730   SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
19731   SDValue Ops[] = { Store, StackSlot };
19732   SDValue Fild =
19733       DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, MVT::i64, MPI,
19734                               SlotAlign, MachineMemOperand::MOLoad);
19735   Chain = Fild.getValue(1);
19736 
19737 
19738   // Check whether the sign bit is set.
19739   SDValue SignSet = DAG.getSetCC(
19740       dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
19741       Op.getOperand(OpNo), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
19742 
19743   // Build a 64 bit pair (FF, 0) in the constant pool, with FF in the hi bits.
19744   APInt FF(64, 0x5F80000000000000ULL);
19745   SDValue FudgePtr = DAG.getConstantPool(
19746       ConstantInt::get(*DAG.getContext(), FF), PtrVT);
19747   Align CPAlignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlign();
19748 
19749   // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
19750   SDValue Zero = DAG.getIntPtrConstant(0, dl);
19751   SDValue Four = DAG.getIntPtrConstant(4, dl);
19752   SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero);
19753   FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
19754 
19755   // Load the value out, extending it from f32 to f80.
19756   SDValue Fudge = DAG.getExtLoad(
19757       ISD::EXTLOAD, dl, MVT::f80, Chain, FudgePtr,
19758       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
19759       CPAlignment);
19760   Chain = Fudge.getValue(1);
19761   // Extend everything to 80 bits to force it to be done on x87.
19762   // TODO: Are there any fast-math-flags to propagate here?
19763   if (IsStrict) {
19764     unsigned Opc = ISD::STRICT_FADD;
19765     // Windows needs the precision control changed to 80bits around this add.
19766     if (Subtarget.isOSWindows() && DstVT == MVT::f32)
19767       Opc = X86ISD::STRICT_FP80_ADD;
19768 
19769     SDValue Add =
19770         DAG.getNode(Opc, dl, {MVT::f80, MVT::Other}, {Chain, Fild, Fudge});
19771     // STRICT_FP_ROUND can't handle equal types.
19772     if (DstVT == MVT::f80)
19773       return Add;
19774     return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
19775                        {Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
19776   }
19777   unsigned Opc = ISD::FADD;
19778   // Windows needs the precision control changed to 80bits around this add.
19779   if (Subtarget.isOSWindows() && DstVT == MVT::f32)
19780     Opc = X86ISD::FP80_ADD;
19781 
19782   SDValue Add = DAG.getNode(Opc, dl, MVT::f80, Fild, Fudge);
19783   return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
19784                      DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
19785 }
19786 
19787 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
19788 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
19789 // just return an SDValue().
19790 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
19791 // to i16, i32 or i64, and we lower it to a legal sequence and return the
19792 // result.
19793 SDValue
19794 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
19795                                    bool IsSigned, SDValue &Chain) const {
19796   bool IsStrict = Op->isStrictFPOpcode();
19797   SDLoc DL(Op);
19798 
19799   EVT DstTy = Op.getValueType();
19800   SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
19801   EVT TheVT = Value.getValueType();
19802   auto PtrVT = getPointerTy(DAG.getDataLayout());
19803 
19804   if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
19805     // f16 must be promoted before using the lowering in this routine.
19806     // fp128 does not use this lowering.
19807     return SDValue();
19808   }
19809 
19810   // If using FIST to compute an unsigned i64, we'll need some fixup
19811   // to handle values above the maximum signed i64.  A FIST is always
19812   // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
19813   bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
19814 
19815   // FIXME: This does not generate an invalid exception if the input does not
19816   // fit in i32. PR44019
19817   if (!IsSigned && DstTy != MVT::i64) {
19818     // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
19819     // The low 32 bits of the fist result will have the correct uint32 result.
19820     assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
19821     DstTy = MVT::i64;
19822   }
19823 
19824   assert(DstTy.getSimpleVT() <= MVT::i64 &&
19825          DstTy.getSimpleVT() >= MVT::i16 &&
19826          "Unknown FP_TO_INT to lower!");
19827 
19828   // We lower FP->int64 into FISTP64 followed by a load from a temporary
19829   // stack slot.
19830   MachineFunction &MF = DAG.getMachineFunction();
19831   unsigned MemSize = DstTy.getStoreSize();
19832   int SSFI =
19833       MF.getFrameInfo().CreateStackObject(MemSize, Align(MemSize), false);
19834   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19835 
19836   Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19837 
19838   SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
19839 
19840   if (UnsignedFixup) {
19841     //
19842     // Conversion to unsigned i64 is implemented with a select,
19843     // depending on whether the source value fits in the range
19844     // of a signed i64.  Let Thresh be the FP equivalent of
19845     // 0x8000000000000000ULL.
19846     //
19847     //  Adjust = (Value >= Thresh) ? 0x80000000 : 0;
19848     //  FltOfs = (Value >= Thresh) ? 0x80000000 : 0;
19849     //  FistSrc = (Value - FltOfs);
19850     //  Fist-to-mem64 FistSrc
19851     //  Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
19852     //  to XOR'ing the high 32 bits with Adjust.
19853     //
19854     // Being a power of 2, Thresh is exactly representable in all FP formats.
19855     // For X87 we'd like to use the smallest FP type for this constant, but
19856     // for DAG type consistency we have to match the FP operand type.
19857 
19858     APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
19859     LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
19860     bool LosesInfo = false;
19861     if (TheVT == MVT::f64)
19862       // The rounding mode is irrelevant as the conversion should be exact.
19863       Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
19864                               &LosesInfo);
19865     else if (TheVT == MVT::f80)
19866       Status = Thresh.convert(APFloat::x87DoubleExtended(),
19867                               APFloat::rmNearestTiesToEven, &LosesInfo);
19868 
19869     assert(Status == APFloat::opOK && !LosesInfo &&
19870            "FP conversion should have been exact");
19871 
19872     SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
19873 
19874     EVT ResVT = getSetCCResultType(DAG.getDataLayout(),
19875                                    *DAG.getContext(), TheVT);
19876     SDValue Cmp;
19877     if (IsStrict) {
19878       Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE, Chain,
19879                          /*IsSignaling*/ true);
19880       Chain = Cmp.getValue(1);
19881     } else {
19882       Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE);
19883     }
19884 
19885     // Our preferred lowering of
19886     //
19887     // (Value >= Thresh) ? 0x8000000000000000ULL : 0
19888     //
19889     // is
19890     //
19891     // (Value >= Thresh) << 63
19892     //
19893     // but since we can get here after LegalOperations, DAGCombine might do the
19894     // wrong thing if we create a select. So, directly create the preferred
19895     // version.
19896     SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Cmp);
19897     SDValue Const63 = DAG.getConstant(63, DL, MVT::i8);
19898     Adjust = DAG.getNode(ISD::SHL, DL, MVT::i64, Zext, Const63);
19899 
19900     SDValue FltOfs = DAG.getSelect(DL, TheVT, Cmp, ThreshVal,
19901                                    DAG.getConstantFP(0.0, DL, TheVT));
19902 
19903     if (IsStrict) {
19904       Value = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
19905                           { Chain, Value, FltOfs });
19906       Chain = Value.getValue(1);
19907     } else
19908       Value = DAG.getNode(ISD::FSUB, DL, TheVT, Value, FltOfs);
19909   }
19910 
19911   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
19912 
19913   // FIXME This causes a redundant load/store if the SSE-class value is already
19914   // in memory, such as if it is on the callstack.
19915   if (isScalarFPTypeInSSEReg(TheVT)) {
19916     assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
19917     Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
19918     SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
19919     SDValue Ops[] = { Chain, StackSlot };
19920 
19921     unsigned FLDSize = TheVT.getStoreSize();
19922     assert(FLDSize <= MemSize && "Stack slot not big enough");
19923     MachineMemOperand *MMO = MF.getMachineMemOperand(
19924         MPI, MachineMemOperand::MOLoad, FLDSize, Align(FLDSize));
19925     Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
19926     Chain = Value.getValue(1);
19927   }
19928 
19929   // Build the FP_TO_INT*_IN_MEM
19930   MachineMemOperand *MMO = MF.getMachineMemOperand(
19931       MPI, MachineMemOperand::MOStore, MemSize, Align(MemSize));
19932   SDValue Ops[] = { Chain, Value, StackSlot };
19933   SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
19934                                          DAG.getVTList(MVT::Other),
19935                                          Ops, DstTy, MMO);
19936 
19937   SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
19938   Chain = Res.getValue(1);
19939 
19940   // If we need an unsigned fixup, XOR the result with adjust.
19941   if (UnsignedFixup)
19942     Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
19943 
19944   return Res;
19945 }
19946 
19947 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
19948                               const X86Subtarget &Subtarget) {
19949   MVT VT = Op.getSimpleValueType();
19950   SDValue In = Op.getOperand(0);
19951   MVT InVT = In.getSimpleValueType();
19952   SDLoc dl(Op);
19953   unsigned Opc = Op.getOpcode();
19954 
19955   assert(VT.isVector() && InVT.isVector() && "Expected vector type");
19956   assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
19957          "Unexpected extension opcode");
19958   assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
19959          "Expected same number of elements");
19960   assert((VT.getVectorElementType() == MVT::i16 ||
19961           VT.getVectorElementType() == MVT::i32 ||
19962           VT.getVectorElementType() == MVT::i64) &&
19963          "Unexpected element type");
19964   assert((InVT.getVectorElementType() == MVT::i8 ||
19965           InVT.getVectorElementType() == MVT::i16 ||
19966           InVT.getVectorElementType() == MVT::i32) &&
19967          "Unexpected element type");
19968 
19969   unsigned ExtendInVecOpc = DAG.getOpcode_EXTEND_VECTOR_INREG(Opc);
19970 
19971   if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
19972     assert(InVT == MVT::v32i8 && "Unexpected VT!");
19973     return splitVectorIntUnary(Op, DAG);
19974   }
19975 
19976   if (Subtarget.hasInt256())
19977     return Op;
19978 
19979   // Optimize vectors in AVX mode:
19980   //
19981   //   v8i16 -> v8i32
19982   //   Use vpmovzwd for 4 lower elements  v8i16 -> v4i32.
19983   //   Use vpunpckhwd for 4 upper elements  v8i16 -> v4i32.
19984   //   Concat upper and lower parts.
19985   //
19986   //   v4i32 -> v4i64
19987   //   Use vpmovzdq for 4 lower elements  v4i32 -> v2i64.
19988   //   Use vpunpckhdq for 4 upper elements  v4i32 -> v2i64.
19989   //   Concat upper and lower parts.
19990   //
19991   MVT HalfVT = VT.getHalfNumVectorElementsVT();
19992   SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
19993 
19994   // Short-circuit if we can determine that each 128-bit half is the same value.
19995   // Otherwise, this is difficult to match and optimize.
19996   if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
19997     if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
19998       return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
19999 
20000   SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
20001   SDValue Undef = DAG.getUNDEF(InVT);
20002   bool NeedZero = Opc == ISD::ZERO_EXTEND;
20003   SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
20004   OpHi = DAG.getBitcast(HalfVT, OpHi);
20005 
20006   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
20007 }
20008 
20009 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
20010 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
20011                                    const SDLoc &dl, SelectionDAG &DAG) {
20012   assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
20013   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
20014                            DAG.getIntPtrConstant(0, dl));
20015   SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
20016                            DAG.getIntPtrConstant(8, dl));
20017   Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
20018   Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
20019   SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
20020   return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20021 }
20022 
20023 static  SDValue LowerZERO_EXTEND_Mask(SDValue Op,
20024                                       const X86Subtarget &Subtarget,
20025                                       SelectionDAG &DAG) {
20026   MVT VT = Op->getSimpleValueType(0);
20027   SDValue In = Op->getOperand(0);
20028   MVT InVT = In.getSimpleValueType();
20029   assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
20030   SDLoc DL(Op);
20031   unsigned NumElts = VT.getVectorNumElements();
20032 
20033   // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
20034   // avoids a constant pool load.
20035   if (VT.getVectorElementType() != MVT::i8) {
20036     SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
20037     return DAG.getNode(ISD::SRL, DL, VT, Extend,
20038                        DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
20039   }
20040 
20041   // Extend VT if BWI is not supported.
20042   MVT ExtVT = VT;
20043   if (!Subtarget.hasBWI()) {
20044     // If v16i32 is to be avoided, we'll need to split and concatenate.
20045     if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
20046       return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
20047 
20048     ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
20049   }
20050 
20051   // Widen to 512-bits if VLX is not supported.
20052   MVT WideVT = ExtVT;
20053   if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
20054     NumElts *= 512 / ExtVT.getSizeInBits();
20055     InVT = MVT::getVectorVT(MVT::i1, NumElts);
20056     In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
20057                      In, DAG.getIntPtrConstant(0, DL));
20058     WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
20059                               NumElts);
20060   }
20061 
20062   SDValue One = DAG.getConstant(1, DL, WideVT);
20063   SDValue Zero = DAG.getConstant(0, DL, WideVT);
20064 
20065   SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
20066 
20067   // Truncate if we had to extend above.
20068   if (VT != ExtVT) {
20069     WideVT = MVT::getVectorVT(MVT::i8, NumElts);
20070     SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
20071   }
20072 
20073   // Extract back to 128/256-bit if we widened.
20074   if (WideVT != VT)
20075     SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
20076                               DAG.getIntPtrConstant(0, DL));
20077 
20078   return SelectedVal;
20079 }
20080 
20081 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
20082                                 SelectionDAG &DAG) {
20083   SDValue In = Op.getOperand(0);
20084   MVT SVT = In.getSimpleValueType();
20085 
20086   if (SVT.getVectorElementType() == MVT::i1)
20087     return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
20088 
20089   assert(Subtarget.hasAVX() && "Expected AVX support");
20090   return LowerAVXExtend(Op, DAG, Subtarget);
20091 }
20092 
20093 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
20094 /// It makes use of the fact that vectors with enough leading sign/zero bits
20095 /// prevent the PACKSS/PACKUS from saturating the results.
20096 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
20097 /// within each 128-bit lane.
20098 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
20099                                       const SDLoc &DL, SelectionDAG &DAG,
20100                                       const X86Subtarget &Subtarget) {
20101   assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
20102          "Unexpected PACK opcode");
20103   assert(DstVT.isVector() && "VT not a vector?");
20104 
20105   // Requires SSE2 for PACKSS (SSE41 PACKUSDW is handled below).
20106   if (!Subtarget.hasSSE2())
20107     return SDValue();
20108 
20109   EVT SrcVT = In.getValueType();
20110 
20111   // No truncation required, we might get here due to recursive calls.
20112   if (SrcVT == DstVT)
20113     return In;
20114 
20115   unsigned NumElems = SrcVT.getVectorNumElements();
20116   if (NumElems < 2 || !isPowerOf2_32(NumElems) )
20117     return SDValue();
20118 
20119   unsigned DstSizeInBits = DstVT.getSizeInBits();
20120   unsigned SrcSizeInBits = SrcVT.getSizeInBits();
20121   assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
20122   assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
20123 
20124   LLVMContext &Ctx = *DAG.getContext();
20125   EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
20126   EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
20127 
20128   // Pack to the largest type possible:
20129   // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
20130   EVT InVT = MVT::i16, OutVT = MVT::i8;
20131   if (SrcVT.getScalarSizeInBits() > 16 &&
20132       (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
20133     InVT = MVT::i32;
20134     OutVT = MVT::i16;
20135   }
20136 
20137   // Sub-128-bit truncation - widen to 128-bit src and pack in the lower half.
20138   // On pre-AVX512, pack the src in both halves to help value tracking.
20139   if (SrcSizeInBits <= 128) {
20140     InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
20141     OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
20142     In = widenSubVector(In, false, Subtarget, DAG, DL, 128);
20143     SDValue LHS = DAG.getBitcast(InVT, In);
20144     SDValue RHS = Subtarget.hasAVX512() ? DAG.getUNDEF(InVT) : LHS;
20145     SDValue Res = DAG.getNode(Opcode, DL, OutVT, LHS, RHS);
20146     Res = extractSubVector(Res, 0, DAG, DL, SrcSizeInBits / 2);
20147     Res = DAG.getBitcast(PackedVT, Res);
20148     return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20149   }
20150 
20151   // Split lower/upper subvectors.
20152   SDValue Lo, Hi;
20153   std::tie(Lo, Hi) = splitVector(In, DAG, DL);
20154 
20155   // If Hi is undef, then don't bother packing it and widen the result instead.
20156   if (Hi.isUndef()) {
20157     EVT DstHalfVT = DstVT.getHalfNumVectorElementsVT(Ctx);
20158     if (SDValue Res =
20159             truncateVectorWithPACK(Opcode, DstHalfVT, Lo, DL, DAG, Subtarget))
20160       return widenSubVector(Res, false, Subtarget, DAG, DL, DstSizeInBits);
20161   }
20162 
20163   unsigned SubSizeInBits = SrcSizeInBits / 2;
20164   InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
20165   OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
20166 
20167   // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
20168   if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
20169     Lo = DAG.getBitcast(InVT, Lo);
20170     Hi = DAG.getBitcast(InVT, Hi);
20171     SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
20172     return DAG.getBitcast(DstVT, Res);
20173   }
20174 
20175   // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
20176   // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
20177   if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
20178     Lo = DAG.getBitcast(InVT, Lo);
20179     Hi = DAG.getBitcast(InVT, Hi);
20180     SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
20181 
20182     // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
20183     // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
20184     // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
20185     SmallVector<int, 64> Mask;
20186     int Scale = 64 / OutVT.getScalarSizeInBits();
20187     narrowShuffleMaskElts(Scale, { 0, 2, 1, 3 }, Mask);
20188     Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
20189 
20190     if (DstVT.is256BitVector())
20191       return DAG.getBitcast(DstVT, Res);
20192 
20193     // If 512bit -> 128bit truncate another stage.
20194     Res = DAG.getBitcast(PackedVT, Res);
20195     return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20196   }
20197 
20198   // Recursively pack lower/upper subvectors, concat result and pack again.
20199   assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
20200 
20201   if (PackedVT.is128BitVector()) {
20202     // Avoid CONCAT_VECTORS on sub-128bit nodes as these can fail after
20203     // type legalization.
20204     SDValue Res =
20205         truncateVectorWithPACK(Opcode, PackedVT, In, DL, DAG, Subtarget);
20206     return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20207   }
20208 
20209   EVT HalfPackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
20210   Lo = truncateVectorWithPACK(Opcode, HalfPackedVT, Lo, DL, DAG, Subtarget);
20211   Hi = truncateVectorWithPACK(Opcode, HalfPackedVT, Hi, DL, DAG, Subtarget);
20212   SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
20213   return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20214 }
20215 
20216 /// Truncate using inreg zero extension (AND mask) and X86ISD::PACKUS.
20217 /// e.g. trunc <8 x i32> X to <8 x i16> -->
20218 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
20219 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
20220 static SDValue truncateVectorWithPACKUS(EVT DstVT, SDValue In, const SDLoc &DL,
20221                                         const X86Subtarget &Subtarget,
20222                                         SelectionDAG &DAG) {
20223   In = DAG.getZeroExtendInReg(In, DL, DstVT);
20224   return truncateVectorWithPACK(X86ISD::PACKUS, DstVT, In, DL, DAG, Subtarget);
20225 }
20226 
20227 /// Truncate using inreg sign extension and X86ISD::PACKSS.
20228 static SDValue truncateVectorWithPACKSS(EVT DstVT, SDValue In, const SDLoc &DL,
20229                                         const X86Subtarget &Subtarget,
20230                                         SelectionDAG &DAG) {
20231   EVT SrcVT = In.getValueType();
20232   In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, SrcVT, In,
20233                    DAG.getValueType(DstVT));
20234   return truncateVectorWithPACK(X86ISD::PACKSS, DstVT, In, DL, DAG, Subtarget);
20235 }
20236 
20237 /// Helper to determine if \p In truncated to \p DstVT has the necessary
20238 /// signbits / leading zero bits to be truncated with PACKSS / PACKUS,
20239 /// possibly by converting a SRL node to SRA for sign extension.
20240 static SDValue matchTruncateWithPACK(unsigned &PackOpcode, EVT DstVT,
20241                                      SDValue In, const SDLoc &DL,
20242                                      SelectionDAG &DAG,
20243                                      const X86Subtarget &Subtarget) {
20244   // Requires SSE2.
20245   if (!Subtarget.hasSSE2())
20246     return SDValue();
20247 
20248   EVT SrcVT = In.getValueType();
20249   EVT DstSVT = DstVT.getVectorElementType();
20250   EVT SrcSVT = SrcVT.getVectorElementType();
20251 
20252   // Check we have a truncation suited for PACKSS/PACKUS.
20253   if (!((SrcSVT == MVT::i16 || SrcSVT == MVT::i32 || SrcSVT == MVT::i64) &&
20254         (DstSVT == MVT::i8 || DstSVT == MVT::i16 || DstSVT == MVT::i32)))
20255     return SDValue();
20256 
20257   assert(SrcSVT.getSizeInBits() > DstSVT.getSizeInBits() && "Bad truncation");
20258   unsigned NumStages = Log2_32(SrcSVT.getSizeInBits() / DstSVT.getSizeInBits());
20259 
20260   // Truncation from 128-bit to vXi32 can be better handled with PSHUFD.
20261   // Truncation to sub-64-bit vXi16 can be better handled with PSHUFD/PSHUFLW.
20262   // Truncation from v2i64 to v2i8 can be better handled with PSHUFB.
20263   if ((DstSVT == MVT::i32 && SrcVT.getSizeInBits() <= 128) ||
20264       (DstSVT == MVT::i16 && SrcVT.getSizeInBits() <= (64 * NumStages)) ||
20265       (DstVT == MVT::v2i8 && SrcVT == MVT::v2i64 && Subtarget.hasSSSE3()))
20266     return SDValue();
20267 
20268   // Prefer to lower v4i64 -> v4i32 as a shuffle unless we can cheaply
20269   // split this for packing.
20270   if (SrcVT == MVT::v4i64 && DstVT == MVT::v4i32 &&
20271       !isFreeToSplitVector(In.getNode(), DAG) &&
20272       (!Subtarget.hasAVX() || DAG.ComputeNumSignBits(In) != 64))
20273     return SDValue();
20274 
20275   // Don't truncate AVX512 targets as multiple PACK nodes stages.
20276   if (Subtarget.hasAVX512() && NumStages > 1)
20277     return SDValue();
20278 
20279   unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits();
20280   unsigned NumPackedSignBits = std::min<unsigned>(DstSVT.getSizeInBits(), 16);
20281   unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
20282 
20283   // Truncate with PACKUS if we are truncating a vector with leading zero
20284   // bits that extend all the way to the packed/truncated value.
20285   // e.g. Masks, zext_in_reg, etc.
20286   // Pre-SSE41 we can only use PACKUSWB.
20287   KnownBits Known = DAG.computeKnownBits(In);
20288   if ((NumSrcEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros()) {
20289     PackOpcode = X86ISD::PACKUS;
20290     return In;
20291   }
20292 
20293   // Truncate with PACKSS if we are truncating a vector with sign-bits
20294   // that extend all the way to the packed/truncated value.
20295   // e.g. Comparison result, sext_in_reg, etc.
20296   unsigned NumSignBits = DAG.ComputeNumSignBits(In);
20297 
20298   // Don't use PACKSS for vXi64 -> vXi32 truncations unless we're dealing with
20299   // a sign splat (or AVX512 VPSRAQ support). ComputeNumSignBits struggles to
20300   // see through BITCASTs later on and combines/simplifications can't then use
20301   // it.
20302   if (DstSVT == MVT::i32 && NumSignBits != SrcSVT.getSizeInBits() &&
20303       !Subtarget.hasAVX512())
20304     return SDValue();
20305 
20306   unsigned MinSignBits = NumSrcEltBits - NumPackedSignBits;
20307   if (MinSignBits < NumSignBits) {
20308     PackOpcode = X86ISD::PACKSS;
20309     return In;
20310   }
20311 
20312   // If we have a srl that only generates signbits that we will discard in
20313   // the truncation then we can use PACKSS by converting the srl to a sra.
20314   // SimplifyDemandedBits often relaxes sra to srl so we need to reverse it.
20315   if (In.getOpcode() == ISD::SRL && In->hasOneUse())
20316     if (const APInt *ShAmt = DAG.getValidShiftAmountConstant(
20317             In, APInt::getAllOnes(SrcVT.getVectorNumElements()))) {
20318       if (*ShAmt == MinSignBits) {
20319         PackOpcode = X86ISD::PACKSS;
20320         return DAG.getNode(ISD::SRA, DL, SrcVT, In->ops());
20321       }
20322     }
20323 
20324   return SDValue();
20325 }
20326 
20327 /// This function lowers a vector truncation of 'extended sign-bits' or
20328 /// 'extended zero-bits' values.
20329 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
20330 static SDValue LowerTruncateVecPackWithSignBits(MVT DstVT, SDValue In,
20331                                                 const SDLoc &DL,
20332                                                 const X86Subtarget &Subtarget,
20333                                                 SelectionDAG &DAG) {
20334   MVT SrcVT = In.getSimpleValueType();
20335   MVT DstSVT = DstVT.getVectorElementType();
20336   MVT SrcSVT = SrcVT.getVectorElementType();
20337   if (!((SrcSVT == MVT::i16 || SrcSVT == MVT::i32 || SrcSVT == MVT::i64) &&
20338         (DstSVT == MVT::i8 || DstSVT == MVT::i16 || DstSVT == MVT::i32)))
20339     return SDValue();
20340 
20341   // If the upper half of the source is undef, then attempt to split and
20342   // only truncate the lower half.
20343   if (DstVT.getSizeInBits() >= 128) {
20344     SmallVector<SDValue> LowerOps;
20345     if (SDValue Lo = isUpperSubvectorUndef(In, DL, DAG)) {
20346       MVT DstHalfVT = DstVT.getHalfNumVectorElementsVT();
20347       if (SDValue Res = LowerTruncateVecPackWithSignBits(DstHalfVT, Lo, DL,
20348                                                          Subtarget, DAG))
20349         return widenSubVector(Res, false, Subtarget, DAG, DL,
20350                               DstVT.getSizeInBits());
20351     }
20352   }
20353 
20354   unsigned PackOpcode;
20355   if (SDValue Src =
20356           matchTruncateWithPACK(PackOpcode, DstVT, In, DL, DAG, Subtarget))
20357     return truncateVectorWithPACK(PackOpcode, DstVT, Src, DL, DAG, Subtarget);
20358 
20359   return SDValue();
20360 }
20361 
20362 /// This function lowers a vector truncation from vXi32/vXi64 to vXi8/vXi16 into
20363 /// X86ISD::PACKUS/X86ISD::PACKSS operations.
20364 static SDValue LowerTruncateVecPack(MVT DstVT, SDValue In, const SDLoc &DL,
20365                                     const X86Subtarget &Subtarget,
20366                                     SelectionDAG &DAG) {
20367   MVT SrcVT = In.getSimpleValueType();
20368   MVT DstSVT = DstVT.getVectorElementType();
20369   MVT SrcSVT = SrcVT.getVectorElementType();
20370   unsigned NumElems = DstVT.getVectorNumElements();
20371   if (!((SrcSVT == MVT::i16 || SrcSVT == MVT::i32 || SrcSVT == MVT::i64) &&
20372         (DstSVT == MVT::i8 || DstSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
20373         NumElems >= 8))
20374     return SDValue();
20375 
20376   // SSSE3's pshufb results in less instructions in the cases below.
20377   if (Subtarget.hasSSSE3() && NumElems == 8) {
20378     if (SrcSVT == MVT::i16)
20379       return SDValue();
20380     if (SrcSVT == MVT::i32 && (DstSVT == MVT::i8 || !Subtarget.hasSSE41()))
20381       return SDValue();
20382   }
20383 
20384   // If the upper half of the source is undef, then attempt to split and
20385   // only truncate the lower half.
20386   if (DstVT.getSizeInBits() >= 128) {
20387     SmallVector<SDValue> LowerOps;
20388     if (SDValue Lo = isUpperSubvectorUndef(In, DL, DAG)) {
20389       MVT DstHalfVT = DstVT.getHalfNumVectorElementsVT();
20390       if (SDValue Res = LowerTruncateVecPack(DstHalfVT, Lo, DL, Subtarget, DAG))
20391         return widenSubVector(Res, false, Subtarget, DAG, DL,
20392                               DstVT.getSizeInBits());
20393     }
20394   }
20395 
20396   // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
20397   // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
20398   // truncate 2 x v4i32 to v8i16.
20399   if (Subtarget.hasSSE41() || DstSVT == MVT::i8)
20400     return truncateVectorWithPACKUS(DstVT, In, DL, Subtarget, DAG);
20401 
20402   if (SrcSVT == MVT::i16 || SrcSVT == MVT::i32)
20403     return truncateVectorWithPACKSS(DstVT, In, DL, Subtarget, DAG);
20404 
20405   // Special case vXi64 -> vXi16, shuffle to vXi32 and then use PACKSS.
20406   if (DstSVT == MVT::i16 && SrcSVT == MVT::i64) {
20407     MVT TruncVT = MVT::getVectorVT(MVT::i32, NumElems);
20408     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, In);
20409     return truncateVectorWithPACKSS(DstVT, Trunc, DL, Subtarget, DAG);
20410   }
20411 
20412   return SDValue();
20413 }
20414 
20415 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
20416                                   const X86Subtarget &Subtarget) {
20417 
20418   SDLoc DL(Op);
20419   MVT VT = Op.getSimpleValueType();
20420   SDValue In = Op.getOperand(0);
20421   MVT InVT = In.getSimpleValueType();
20422 
20423   assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
20424 
20425   // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
20426   unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
20427   if (InVT.getScalarSizeInBits() <= 16) {
20428     if (Subtarget.hasBWI()) {
20429       // legal, will go to VPMOVB2M, VPMOVW2M
20430       if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
20431         // We need to shift to get the lsb into sign position.
20432         // Shift packed bytes not supported natively, bitcast to word
20433         MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
20434         In = DAG.getNode(ISD::SHL, DL, ExtVT,
20435                          DAG.getBitcast(ExtVT, In),
20436                          DAG.getConstant(ShiftInx, DL, ExtVT));
20437         In = DAG.getBitcast(InVT, In);
20438       }
20439       return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
20440                           In, ISD::SETGT);
20441     }
20442     // Use TESTD/Q, extended vector to packed dword/qword.
20443     assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
20444            "Unexpected vector type.");
20445     unsigned NumElts = InVT.getVectorNumElements();
20446     assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
20447     // We need to change to a wider element type that we have support for.
20448     // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
20449     // For 16 element vectors we extend to v16i32 unless we are explicitly
20450     // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
20451     // we need to split into two 8 element vectors which we can extend to v8i32,
20452     // truncate and concat the results. There's an additional complication if
20453     // the original type is v16i8. In that case we can't split the v16i8
20454     // directly, so we need to shuffle high elements to low and use
20455     // sign_extend_vector_inreg.
20456     if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
20457       SDValue Lo, Hi;
20458       if (InVT == MVT::v16i8) {
20459         Lo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, In);
20460         Hi = DAG.getVectorShuffle(
20461             InVT, DL, In, In,
20462             {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
20463         Hi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, Hi);
20464       } else {
20465         assert(InVT == MVT::v16i16 && "Unexpected VT!");
20466         Lo = extract128BitVector(In, 0, DAG, DL);
20467         Hi = extract128BitVector(In, 8, DAG, DL);
20468       }
20469       // We're split now, just emit two truncates and a concat. The two
20470       // truncates will trigger legalization to come back to this function.
20471       Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
20472       Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
20473       return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
20474     }
20475     // We either have 8 elements or we're allowed to use 512-bit vectors.
20476     // If we have VLX, we want to use the narrowest vector that can get the
20477     // job done so we use vXi32.
20478     MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
20479     MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
20480     In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
20481     InVT = ExtVT;
20482     ShiftInx = InVT.getScalarSizeInBits() - 1;
20483   }
20484 
20485   if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
20486     // We need to shift to get the lsb into sign position.
20487     In = DAG.getNode(ISD::SHL, DL, InVT, In,
20488                      DAG.getConstant(ShiftInx, DL, InVT));
20489   }
20490   // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
20491   if (Subtarget.hasDQI())
20492     return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
20493   return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
20494 }
20495 
20496 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
20497   SDLoc DL(Op);
20498   MVT VT = Op.getSimpleValueType();
20499   SDValue In = Op.getOperand(0);
20500   MVT InVT = In.getSimpleValueType();
20501   assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
20502          "Invalid TRUNCATE operation");
20503 
20504   // If we're called by the type legalizer, handle a few cases.
20505   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20506   if (!TLI.isTypeLegal(VT) || !TLI.isTypeLegal(InVT)) {
20507     if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
20508         VT.is128BitVector() && Subtarget.hasAVX512()) {
20509       assert((InVT == MVT::v16i64 || Subtarget.hasVLX()) &&
20510              "Unexpected subtarget!");
20511       // The default behavior is to truncate one step, concatenate, and then
20512       // truncate the remainder. We'd rather produce two 64-bit results and
20513       // concatenate those.
20514       SDValue Lo, Hi;
20515       std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
20516 
20517       EVT LoVT, HiVT;
20518       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
20519 
20520       Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
20521       Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
20522       return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
20523     }
20524 
20525     // Pre-AVX512 (or prefer-256bit) see if we can make use of PACKSS/PACKUS.
20526     if (!Subtarget.hasAVX512() ||
20527         (InVT.is512BitVector() && VT.is256BitVector()))
20528       if (SDValue SignPack =
20529               LowerTruncateVecPackWithSignBits(VT, In, DL, Subtarget, DAG))
20530         return SignPack;
20531 
20532     // Pre-AVX512 see if we can make use of PACKSS/PACKUS.
20533     if (!Subtarget.hasAVX512())
20534       return LowerTruncateVecPack(VT, In, DL, Subtarget, DAG);
20535 
20536     // Otherwise let default legalization handle it.
20537     return SDValue();
20538   }
20539 
20540   if (VT.getVectorElementType() == MVT::i1)
20541     return LowerTruncateVecI1(Op, DAG, Subtarget);
20542 
20543   // Attempt to truncate with PACKUS/PACKSS even on AVX512 if we'd have to
20544   // concat from subvectors to use VPTRUNC etc.
20545   if (!Subtarget.hasAVX512() || isFreeToSplitVector(In.getNode(), DAG))
20546     if (SDValue SignPack =
20547             LowerTruncateVecPackWithSignBits(VT, In, DL, Subtarget, DAG))
20548       return SignPack;
20549 
20550   // vpmovqb/w/d, vpmovdb/w, vpmovwb
20551   if (Subtarget.hasAVX512()) {
20552     if (InVT == MVT::v32i16 && !Subtarget.hasBWI()) {
20553       assert(VT == MVT::v32i8 && "Unexpected VT!");
20554       return splitVectorIntUnary(Op, DAG);
20555     }
20556 
20557     // word to byte only under BWI. Otherwise we have to promoted to v16i32
20558     // and then truncate that. But we should only do that if we haven't been
20559     // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
20560     // handled by isel patterns.
20561     if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
20562         Subtarget.canExtendTo512DQ())
20563       return Op;
20564   }
20565 
20566   // Handle truncation of V256 to V128 using shuffles.
20567   assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
20568 
20569   if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
20570     // On AVX2, v4i64 -> v4i32 becomes VPERMD.
20571     if (Subtarget.hasInt256()) {
20572       static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
20573       In = DAG.getBitcast(MVT::v8i32, In);
20574       In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
20575       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
20576                          DAG.getIntPtrConstant(0, DL));
20577     }
20578 
20579     SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20580                                DAG.getIntPtrConstant(0, DL));
20581     SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20582                                DAG.getIntPtrConstant(2, DL));
20583     static const int ShufMask[] = {0, 2, 4, 6};
20584     return DAG.getVectorShuffle(VT, DL, DAG.getBitcast(MVT::v4i32, OpLo),
20585                                 DAG.getBitcast(MVT::v4i32, OpHi), ShufMask);
20586   }
20587 
20588   if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
20589     // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
20590     if (Subtarget.hasInt256()) {
20591       // The PSHUFB mask:
20592       static const int ShufMask1[] = { 0,  1,  4,  5,  8,  9, 12, 13,
20593                                       -1, -1, -1, -1, -1, -1, -1, -1,
20594                                       16, 17, 20, 21, 24, 25, 28, 29,
20595                                       -1, -1, -1, -1, -1, -1, -1, -1 };
20596       In = DAG.getBitcast(MVT::v32i8, In);
20597       In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
20598       In = DAG.getBitcast(MVT::v4i64, In);
20599 
20600       static const int ShufMask2[] = {0, 2, -1, -1};
20601       In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
20602       In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20603                        DAG.getIntPtrConstant(0, DL));
20604       return DAG.getBitcast(MVT::v8i16, In);
20605     }
20606 
20607     return Subtarget.hasSSE41()
20608                ? truncateVectorWithPACKUS(VT, In, DL, Subtarget, DAG)
20609                : truncateVectorWithPACKSS(VT, In, DL, Subtarget, DAG);
20610   }
20611 
20612   if (VT == MVT::v16i8 && InVT == MVT::v16i16)
20613     return truncateVectorWithPACKUS(VT, In, DL, Subtarget, DAG);
20614 
20615   llvm_unreachable("All 256->128 cases should have been handled above!");
20616 }
20617 
20618 // We can leverage the specific way the "cvttps2dq/cvttpd2dq" instruction
20619 // behaves on out of range inputs to generate optimized conversions.
20620 static SDValue expandFP_TO_UINT_SSE(MVT VT, SDValue Src, const SDLoc &dl,
20621                                     SelectionDAG &DAG,
20622                                     const X86Subtarget &Subtarget) {
20623   MVT SrcVT = Src.getSimpleValueType();
20624   unsigned DstBits = VT.getScalarSizeInBits();
20625   assert(DstBits == 32 && "expandFP_TO_UINT_SSE - only vXi32 supported");
20626 
20627   // Calculate the converted result for values in the range 0 to
20628   // 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
20629   SDValue Small = DAG.getNode(X86ISD::CVTTP2SI, dl, VT, Src);
20630   SDValue Big =
20631       DAG.getNode(X86ISD::CVTTP2SI, dl, VT,
20632                   DAG.getNode(ISD::FSUB, dl, SrcVT, Src,
20633                               DAG.getConstantFP(2147483648.0f, dl, SrcVT)));
20634 
20635   // The "CVTTP2SI" instruction conveniently sets the sign bit if
20636   // and only if the value was out of range. So we can use that
20637   // as our indicator that we rather use "Big" instead of "Small".
20638   //
20639   // Use "Small" if "IsOverflown" has all bits cleared
20640   // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
20641 
20642   // AVX1 can't use the signsplat masking for 256-bit vectors - we have to
20643   // use the slightly slower blendv select instead.
20644   if (VT == MVT::v8i32 && !Subtarget.hasAVX2()) {
20645     SDValue Overflow = DAG.getNode(ISD::OR, dl, VT, Small, Big);
20646     return DAG.getNode(X86ISD::BLENDV, dl, VT, Small, Overflow, Small);
20647   }
20648 
20649   SDValue IsOverflown =
20650       DAG.getNode(X86ISD::VSRAI, dl, VT, Small,
20651                   DAG.getTargetConstant(DstBits - 1, dl, MVT::i8));
20652   return DAG.getNode(ISD::OR, dl, VT, Small,
20653                      DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
20654 }
20655 
20656 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
20657   bool IsStrict = Op->isStrictFPOpcode();
20658   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
20659                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
20660   MVT VT = Op->getSimpleValueType(0);
20661   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
20662   SDValue Chain = IsStrict ? Op->getOperand(0) : SDValue();
20663   MVT SrcVT = Src.getSimpleValueType();
20664   SDLoc dl(Op);
20665 
20666   SDValue Res;
20667   if (isSoftF16(SrcVT, Subtarget)) {
20668     MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
20669     if (IsStrict)
20670       return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
20671                          {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
20672                                              {NVT, MVT::Other}, {Chain, Src})});
20673     return DAG.getNode(Op.getOpcode(), dl, VT,
20674                        DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
20675   } else if (isTypeLegal(SrcVT) && isLegalConversion(VT, IsSigned, Subtarget)) {
20676     return Op;
20677   }
20678 
20679   if (VT.isVector()) {
20680     if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
20681       MVT ResVT = MVT::v4i32;
20682       MVT TruncVT = MVT::v4i1;
20683       unsigned Opc;
20684       if (IsStrict)
20685         Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
20686       else
20687         Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20688 
20689       if (!IsSigned && !Subtarget.hasVLX()) {
20690         assert(Subtarget.useAVX512Regs() && "Unexpected features!");
20691         // Widen to 512-bits.
20692         ResVT = MVT::v8i32;
20693         TruncVT = MVT::v8i1;
20694         Opc = Op.getOpcode();
20695         // Need to concat with zero vector for strict fp to avoid spurious
20696         // exceptions.
20697         // TODO: Should we just do this for non-strict as well?
20698         SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v8f64)
20699                                : DAG.getUNDEF(MVT::v8f64);
20700         Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, Tmp, Src,
20701                           DAG.getIntPtrConstant(0, dl));
20702       }
20703       if (IsStrict) {
20704         Res = DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {Chain, Src});
20705         Chain = Res.getValue(1);
20706       } else {
20707         Res = DAG.getNode(Opc, dl, ResVT, Src);
20708       }
20709 
20710       Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
20711       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
20712                         DAG.getIntPtrConstant(0, dl));
20713       if (IsStrict)
20714         return DAG.getMergeValues({Res, Chain}, dl);
20715       return Res;
20716     }
20717 
20718     if (Subtarget.hasFP16() && SrcVT.getVectorElementType() == MVT::f16) {
20719       if (VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16)
20720         return Op;
20721 
20722       MVT ResVT = VT;
20723       MVT EleVT = VT.getVectorElementType();
20724       if (EleVT != MVT::i64)
20725         ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
20726 
20727       if (SrcVT != MVT::v8f16) {
20728         SDValue Tmp =
20729             IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
20730         SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
20731         Ops[0] = Src;
20732         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
20733       }
20734 
20735       if (IsStrict) {
20736         Res = DAG.getNode(IsSigned ? X86ISD::STRICT_CVTTP2SI
20737                                    : X86ISD::STRICT_CVTTP2UI,
20738                           dl, {ResVT, MVT::Other}, {Chain, Src});
20739         Chain = Res.getValue(1);
20740       } else {
20741         Res = DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl,
20742                           ResVT, Src);
20743       }
20744 
20745       // TODO: Need to add exception check code for strict FP.
20746       if (EleVT.getSizeInBits() < 16) {
20747         ResVT = MVT::getVectorVT(EleVT, 8);
20748         Res = DAG.getNode(ISD::TRUNCATE, dl, ResVT, Res);
20749       }
20750 
20751       if (ResVT != VT)
20752         Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20753                           DAG.getIntPtrConstant(0, dl));
20754 
20755       if (IsStrict)
20756         return DAG.getMergeValues({Res, Chain}, dl);
20757       return Res;
20758     }
20759 
20760     // v8f32/v16f32/v8f64->v8i16/v16i16 need to widen first.
20761     if (VT.getVectorElementType() == MVT::i16) {
20762       assert((SrcVT.getVectorElementType() == MVT::f32 ||
20763               SrcVT.getVectorElementType() == MVT::f64) &&
20764              "Expected f32/f64 vector!");
20765       MVT NVT = VT.changeVectorElementType(MVT::i32);
20766       if (IsStrict) {
20767         Res = DAG.getNode(IsSigned ? ISD::STRICT_FP_TO_SINT
20768                                    : ISD::STRICT_FP_TO_UINT,
20769                           dl, {NVT, MVT::Other}, {Chain, Src});
20770         Chain = Res.getValue(1);
20771       } else {
20772         Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl,
20773                           NVT, Src);
20774       }
20775 
20776       // TODO: Need to add exception check code for strict FP.
20777       Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20778 
20779       if (IsStrict)
20780         return DAG.getMergeValues({Res, Chain}, dl);
20781       return Res;
20782     }
20783 
20784     // v8f64->v8i32 is legal, but we need v8i32 to be custom for v8f32.
20785     if (VT == MVT::v8i32 && SrcVT == MVT::v8f64) {
20786       assert(!IsSigned && "Expected unsigned conversion!");
20787       assert(Subtarget.useAVX512Regs() && "Requires avx512f");
20788       return Op;
20789     }
20790 
20791     // Widen vXi32 fp_to_uint with avx512f to 512-bit source.
20792     if ((VT == MVT::v4i32 || VT == MVT::v8i32) &&
20793         (SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v8f32) &&
20794         Subtarget.useAVX512Regs()) {
20795       assert(!IsSigned && "Expected unsigned conversion!");
20796       assert(!Subtarget.hasVLX() && "Unexpected features!");
20797       MVT WideVT = SrcVT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
20798       MVT ResVT = SrcVT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
20799       // Need to concat with zero vector for strict fp to avoid spurious
20800       // exceptions.
20801       // TODO: Should we just do this for non-strict as well?
20802       SDValue Tmp =
20803           IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20804       Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20805                         DAG.getIntPtrConstant(0, dl));
20806 
20807       if (IsStrict) {
20808         Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, dl, {ResVT, MVT::Other},
20809                           {Chain, Src});
20810         Chain = Res.getValue(1);
20811       } else {
20812         Res = DAG.getNode(ISD::FP_TO_UINT, dl, ResVT, Src);
20813       }
20814 
20815       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20816                         DAG.getIntPtrConstant(0, dl));
20817 
20818       if (IsStrict)
20819         return DAG.getMergeValues({Res, Chain}, dl);
20820       return Res;
20821     }
20822 
20823     // Widen vXi64 fp_to_uint/fp_to_sint with avx512dq to 512-bit source.
20824     if ((VT == MVT::v2i64 || VT == MVT::v4i64) &&
20825         (SrcVT == MVT::v2f64 || SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32) &&
20826         Subtarget.useAVX512Regs() && Subtarget.hasDQI()) {
20827       assert(!Subtarget.hasVLX() && "Unexpected features!");
20828       MVT WideVT = SrcVT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
20829       // Need to concat with zero vector for strict fp to avoid spurious
20830       // exceptions.
20831       // TODO: Should we just do this for non-strict as well?
20832       SDValue Tmp =
20833           IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20834       Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20835                         DAG.getIntPtrConstant(0, dl));
20836 
20837       if (IsStrict) {
20838         Res = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20839                           {Chain, Src});
20840         Chain = Res.getValue(1);
20841       } else {
20842         Res = DAG.getNode(Op.getOpcode(), dl, MVT::v8i64, Src);
20843       }
20844 
20845       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20846                         DAG.getIntPtrConstant(0, dl));
20847 
20848       if (IsStrict)
20849         return DAG.getMergeValues({Res, Chain}, dl);
20850       return Res;
20851     }
20852 
20853     if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
20854       if (!Subtarget.hasVLX()) {
20855         // Non-strict nodes without VLX can we widened to v4f32->v4i64 by type
20856         // legalizer and then widened again by vector op legalization.
20857         if (!IsStrict)
20858           return SDValue();
20859 
20860         SDValue Zero = DAG.getConstantFP(0.0, dl, MVT::v2f32);
20861         SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f32,
20862                                   {Src, Zero, Zero, Zero});
20863         Tmp = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20864                           {Chain, Tmp});
20865         SDValue Chain = Tmp.getValue(1);
20866         Tmp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Tmp,
20867                           DAG.getIntPtrConstant(0, dl));
20868         return DAG.getMergeValues({Tmp, Chain}, dl);
20869       }
20870 
20871       assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL");
20872       SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
20873                                 DAG.getUNDEF(MVT::v2f32));
20874       if (IsStrict) {
20875         unsigned Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI
20876                                 : X86ISD::STRICT_CVTTP2UI;
20877         return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op->getOperand(0), Tmp});
20878       }
20879       unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20880       return DAG.getNode(Opc, dl, VT, Tmp);
20881     }
20882 
20883     // Generate optimized instructions for pre AVX512 unsigned conversions from
20884     // vXf32 to vXi32.
20885     if ((VT == MVT::v4i32 && SrcVT == MVT::v4f32) ||
20886         (VT == MVT::v4i32 && SrcVT == MVT::v4f64) ||
20887         (VT == MVT::v8i32 && SrcVT == MVT::v8f32)) {
20888       assert(!IsSigned && "Expected unsigned conversion!");
20889       return expandFP_TO_UINT_SSE(VT, Src, dl, DAG, Subtarget);
20890     }
20891 
20892     return SDValue();
20893   }
20894 
20895   assert(!VT.isVector());
20896 
20897   bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
20898 
20899   if (!IsSigned && UseSSEReg) {
20900     // Conversions from f32/f64 with AVX512 should be legal.
20901     if (Subtarget.hasAVX512())
20902       return Op;
20903 
20904     // We can leverage the specific way the "cvttss2si/cvttsd2si" instruction
20905     // behaves on out of range inputs to generate optimized conversions.
20906     if (!IsStrict && ((VT == MVT::i32 && !Subtarget.is64Bit()) ||
20907                       (VT == MVT::i64 && Subtarget.is64Bit()))) {
20908       unsigned DstBits = VT.getScalarSizeInBits();
20909       APInt UIntLimit = APInt::getSignMask(DstBits);
20910       SDValue FloatOffset = DAG.getNode(ISD::UINT_TO_FP, dl, SrcVT,
20911                                         DAG.getConstant(UIntLimit, dl, VT));
20912       MVT SrcVecVT = MVT::getVectorVT(SrcVT, 128 / SrcVT.getScalarSizeInBits());
20913 
20914       // Calculate the converted result for values in the range:
20915       // (i32) 0 to 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
20916       // (i64) 0 to 2^63-1 ("Small") and from 2^63 to 2^64-1 ("Big").
20917       SDValue Small =
20918           DAG.getNode(X86ISD::CVTTS2SI, dl, VT,
20919                       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT, Src));
20920       SDValue Big = DAG.getNode(
20921           X86ISD::CVTTS2SI, dl, VT,
20922           DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT,
20923                       DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FloatOffset)));
20924 
20925       // The "CVTTS2SI" instruction conveniently sets the sign bit if
20926       // and only if the value was out of range. So we can use that
20927       // as our indicator that we rather use "Big" instead of "Small".
20928       //
20929       // Use "Small" if "IsOverflown" has all bits cleared
20930       // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
20931       SDValue IsOverflown = DAG.getNode(
20932           ISD::SRA, dl, VT, Small, DAG.getConstant(DstBits - 1, dl, MVT::i8));
20933       return DAG.getNode(ISD::OR, dl, VT, Small,
20934                          DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
20935     }
20936 
20937     // Use default expansion for i64.
20938     if (VT == MVT::i64)
20939       return SDValue();
20940 
20941     assert(VT == MVT::i32 && "Unexpected VT!");
20942 
20943     // Promote i32 to i64 and use a signed operation on 64-bit targets.
20944     // FIXME: This does not generate an invalid exception if the input does not
20945     // fit in i32. PR44019
20946     if (Subtarget.is64Bit()) {
20947       if (IsStrict) {
20948         Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i64, MVT::Other},
20949                           {Chain, Src});
20950         Chain = Res.getValue(1);
20951       } else
20952         Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
20953 
20954       Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20955       if (IsStrict)
20956         return DAG.getMergeValues({Res, Chain}, dl);
20957       return Res;
20958     }
20959 
20960     // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
20961     // use fisttp which will be handled later.
20962     if (!Subtarget.hasSSE3())
20963       return SDValue();
20964   }
20965 
20966   // Promote i16 to i32 if we can use a SSE operation or the type is f128.
20967   // FIXME: This does not generate an invalid exception if the input does not
20968   // fit in i16. PR44019
20969   if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
20970     assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
20971     if (IsStrict) {
20972       Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i32, MVT::Other},
20973                         {Chain, Src});
20974       Chain = Res.getValue(1);
20975     } else
20976       Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
20977 
20978     Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20979     if (IsStrict)
20980       return DAG.getMergeValues({Res, Chain}, dl);
20981     return Res;
20982   }
20983 
20984   // If this is a FP_TO_SINT using SSEReg we're done.
20985   if (UseSSEReg && IsSigned)
20986     return Op;
20987 
20988   // fp128 needs to use a libcall.
20989   if (SrcVT == MVT::f128) {
20990     RTLIB::Libcall LC;
20991     if (IsSigned)
20992       LC = RTLIB::getFPTOSINT(SrcVT, VT);
20993     else
20994       LC = RTLIB::getFPTOUINT(SrcVT, VT);
20995 
20996     MakeLibCallOptions CallOptions;
20997     std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, Src, CallOptions,
20998                                                   SDLoc(Op), Chain);
20999 
21000     if (IsStrict)
21001       return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
21002 
21003     return Tmp.first;
21004   }
21005 
21006   // Fall back to X87.
21007   if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
21008     if (IsStrict)
21009       return DAG.getMergeValues({V, Chain}, dl);
21010     return V;
21011   }
21012 
21013   llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
21014 }
21015 
21016 SDValue X86TargetLowering::LowerLRINT_LLRINT(SDValue Op,
21017                                              SelectionDAG &DAG) const {
21018   SDValue Src = Op.getOperand(0);
21019   MVT SrcVT = Src.getSimpleValueType();
21020 
21021   if (SrcVT == MVT::f16)
21022     return SDValue();
21023 
21024   // If the source is in an SSE register, the node is Legal.
21025   if (isScalarFPTypeInSSEReg(SrcVT))
21026     return Op;
21027 
21028   return LRINT_LLRINTHelper(Op.getNode(), DAG);
21029 }
21030 
21031 SDValue X86TargetLowering::LRINT_LLRINTHelper(SDNode *N,
21032                                               SelectionDAG &DAG) const {
21033   EVT DstVT = N->getValueType(0);
21034   SDValue Src = N->getOperand(0);
21035   EVT SrcVT = Src.getValueType();
21036 
21037   if (SrcVT != MVT::f32 && SrcVT != MVT::f64 && SrcVT != MVT::f80) {
21038     // f16 must be promoted before using the lowering in this routine.
21039     // fp128 does not use this lowering.
21040     return SDValue();
21041   }
21042 
21043   SDLoc DL(N);
21044   SDValue Chain = DAG.getEntryNode();
21045 
21046   bool UseSSE = isScalarFPTypeInSSEReg(SrcVT);
21047 
21048   // If we're converting from SSE, the stack slot needs to hold both types.
21049   // Otherwise it only needs to hold the DstVT.
21050   EVT OtherVT = UseSSE ? SrcVT : DstVT;
21051   SDValue StackPtr = DAG.CreateStackTemporary(DstVT, OtherVT);
21052   int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
21053   MachinePointerInfo MPI =
21054       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
21055 
21056   if (UseSSE) {
21057     assert(DstVT == MVT::i64 && "Invalid LRINT/LLRINT to lower!");
21058     Chain = DAG.getStore(Chain, DL, Src, StackPtr, MPI);
21059     SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
21060     SDValue Ops[] = { Chain, StackPtr };
21061 
21062     Src = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, SrcVT, MPI,
21063                                   /*Align*/ std::nullopt,
21064                                   MachineMemOperand::MOLoad);
21065     Chain = Src.getValue(1);
21066   }
21067 
21068   SDValue StoreOps[] = { Chain, Src, StackPtr };
21069   Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, DL, DAG.getVTList(MVT::Other),
21070                                   StoreOps, DstVT, MPI, /*Align*/ std::nullopt,
21071                                   MachineMemOperand::MOStore);
21072 
21073   return DAG.getLoad(DstVT, DL, Chain, StackPtr, MPI);
21074 }
21075 
21076 SDValue
21077 X86TargetLowering::LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const {
21078   // This is based on the TargetLowering::expandFP_TO_INT_SAT implementation,
21079   // but making use of X86 specifics to produce better instruction sequences.
21080   SDNode *Node = Op.getNode();
21081   bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT;
21082   unsigned FpToIntOpcode = IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
21083   SDLoc dl(SDValue(Node, 0));
21084   SDValue Src = Node->getOperand(0);
21085 
21086   // There are three types involved here: SrcVT is the source floating point
21087   // type, DstVT is the type of the result, and TmpVT is the result of the
21088   // intermediate FP_TO_*INT operation we'll use (which may be a promotion of
21089   // DstVT).
21090   EVT SrcVT = Src.getValueType();
21091   EVT DstVT = Node->getValueType(0);
21092   EVT TmpVT = DstVT;
21093 
21094   // This code is only for floats and doubles. Fall back to generic code for
21095   // anything else.
21096   if (!isScalarFPTypeInSSEReg(SrcVT) || isSoftF16(SrcVT, Subtarget))
21097     return SDValue();
21098 
21099   EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
21100   unsigned SatWidth = SatVT.getScalarSizeInBits();
21101   unsigned DstWidth = DstVT.getScalarSizeInBits();
21102   unsigned TmpWidth = TmpVT.getScalarSizeInBits();
21103   assert(SatWidth <= DstWidth && SatWidth <= TmpWidth &&
21104          "Expected saturation width smaller than result width");
21105 
21106   // Promote result of FP_TO_*INT to at least 32 bits.
21107   if (TmpWidth < 32) {
21108     TmpVT = MVT::i32;
21109     TmpWidth = 32;
21110   }
21111 
21112   // Promote conversions to unsigned 32-bit to 64-bit, because it will allow
21113   // us to use a native signed conversion instead.
21114   if (SatWidth == 32 && !IsSigned && Subtarget.is64Bit()) {
21115     TmpVT = MVT::i64;
21116     TmpWidth = 64;
21117   }
21118 
21119   // If the saturation width is smaller than the size of the temporary result,
21120   // we can always use signed conversion, which is native.
21121   if (SatWidth < TmpWidth)
21122     FpToIntOpcode = ISD::FP_TO_SINT;
21123 
21124   // Determine minimum and maximum integer values and their corresponding
21125   // floating-point values.
21126   APInt MinInt, MaxInt;
21127   if (IsSigned) {
21128     MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth);
21129     MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth);
21130   } else {
21131     MinInt = APInt::getMinValue(SatWidth).zext(DstWidth);
21132     MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth);
21133   }
21134 
21135   APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT));
21136   APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT));
21137 
21138   APFloat::opStatus MinStatus = MinFloat.convertFromAPInt(
21139     MinInt, IsSigned, APFloat::rmTowardZero);
21140   APFloat::opStatus MaxStatus = MaxFloat.convertFromAPInt(
21141     MaxInt, IsSigned, APFloat::rmTowardZero);
21142   bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact)
21143                           && !(MaxStatus & APFloat::opStatus::opInexact);
21144 
21145   SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT);
21146   SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT);
21147 
21148   // If the integer bounds are exactly representable as floats, emit a
21149   // min+max+fptoi sequence. Otherwise use comparisons and selects.
21150   if (AreExactFloatBounds) {
21151     if (DstVT != TmpVT) {
21152       // Clamp by MinFloat from below. If Src is NaN, propagate NaN.
21153       SDValue MinClamped = DAG.getNode(
21154         X86ISD::FMAX, dl, SrcVT, MinFloatNode, Src);
21155       // Clamp by MaxFloat from above. If Src is NaN, propagate NaN.
21156       SDValue BothClamped = DAG.getNode(
21157         X86ISD::FMIN, dl, SrcVT, MaxFloatNode, MinClamped);
21158       // Convert clamped value to integer.
21159       SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, BothClamped);
21160 
21161       // NaN will become INDVAL, with the top bit set and the rest zero.
21162       // Truncation will discard the top bit, resulting in zero.
21163       return DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
21164     }
21165 
21166     // Clamp by MinFloat from below. If Src is NaN, the result is MinFloat.
21167     SDValue MinClamped = DAG.getNode(
21168       X86ISD::FMAX, dl, SrcVT, Src, MinFloatNode);
21169     // Clamp by MaxFloat from above. NaN cannot occur.
21170     SDValue BothClamped = DAG.getNode(
21171       X86ISD::FMINC, dl, SrcVT, MinClamped, MaxFloatNode);
21172     // Convert clamped value to integer.
21173     SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, DstVT, BothClamped);
21174 
21175     if (!IsSigned) {
21176       // In the unsigned case we're done, because we mapped NaN to MinFloat,
21177       // which is zero.
21178       return FpToInt;
21179     }
21180 
21181     // Otherwise, select zero if Src is NaN.
21182     SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
21183     return DAG.getSelectCC(
21184       dl, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
21185   }
21186 
21187   SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT);
21188   SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT);
21189 
21190   // Result of direct conversion, which may be selected away.
21191   SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, Src);
21192 
21193   if (DstVT != TmpVT) {
21194     // NaN will become INDVAL, with the top bit set and the rest zero.
21195     // Truncation will discard the top bit, resulting in zero.
21196     FpToInt = DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
21197   }
21198 
21199   SDValue Select = FpToInt;
21200   // For signed conversions where we saturate to the same size as the
21201   // result type of the fptoi instructions, INDVAL coincides with integer
21202   // minimum, so we don't need to explicitly check it.
21203   if (!IsSigned || SatWidth != TmpVT.getScalarSizeInBits()) {
21204     // If Src ULT MinFloat, select MinInt. In particular, this also selects
21205     // MinInt if Src is NaN.
21206     Select = DAG.getSelectCC(
21207       dl, Src, MinFloatNode, MinIntNode, Select, ISD::CondCode::SETULT);
21208   }
21209 
21210   // If Src OGT MaxFloat, select MaxInt.
21211   Select = DAG.getSelectCC(
21212     dl, Src, MaxFloatNode, MaxIntNode, Select, ISD::CondCode::SETOGT);
21213 
21214   // In the unsigned case we are done, because we mapped NaN to MinInt, which
21215   // is already zero. The promoted case was already handled above.
21216   if (!IsSigned || DstVT != TmpVT) {
21217     return Select;
21218   }
21219 
21220   // Otherwise, select 0 if Src is NaN.
21221   SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
21222   return DAG.getSelectCC(
21223     dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO);
21224 }
21225 
21226 SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
21227   bool IsStrict = Op->isStrictFPOpcode();
21228 
21229   SDLoc DL(Op);
21230   MVT VT = Op.getSimpleValueType();
21231   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21232   SDValue In = Op.getOperand(IsStrict ? 1 : 0);
21233   MVT SVT = In.getSimpleValueType();
21234 
21235   // Let f16->f80 get lowered to a libcall, except for darwin, where we should
21236   // lower it to an fp_extend via f32 (as only f16<>f32 libcalls are available)
21237   if (VT == MVT::f128 || (SVT == MVT::f16 && VT == MVT::f80 &&
21238                           !Subtarget.getTargetTriple().isOSDarwin()))
21239     return SDValue();
21240 
21241   if ((SVT == MVT::v8f16 && Subtarget.hasF16C()) ||
21242       (SVT == MVT::v16f16 && Subtarget.useAVX512Regs()))
21243     return Op;
21244 
21245   if (SVT == MVT::f16) {
21246     if (Subtarget.hasFP16())
21247       return Op;
21248 
21249     if (VT != MVT::f32) {
21250       if (IsStrict)
21251         return DAG.getNode(
21252             ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other},
21253             {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, DL,
21254                                 {MVT::f32, MVT::Other}, {Chain, In})});
21255 
21256       return DAG.getNode(ISD::FP_EXTEND, DL, VT,
21257                          DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, In));
21258     }
21259 
21260     if (!Subtarget.hasF16C()) {
21261       if (!Subtarget.getTargetTriple().isOSDarwin())
21262         return SDValue();
21263 
21264       assert(VT == MVT::f32 && SVT == MVT::f16 && "unexpected extend libcall");
21265 
21266       // Need a libcall, but ABI for f16 is soft-float on MacOS.
21267       TargetLowering::CallLoweringInfo CLI(DAG);
21268       Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
21269 
21270       In = DAG.getBitcast(MVT::i16, In);
21271       TargetLowering::ArgListTy Args;
21272       TargetLowering::ArgListEntry Entry;
21273       Entry.Node = In;
21274       Entry.Ty = EVT(MVT::i16).getTypeForEVT(*DAG.getContext());
21275       Entry.IsSExt = false;
21276       Entry.IsZExt = true;
21277       Args.push_back(Entry);
21278 
21279       SDValue Callee = DAG.getExternalSymbol(
21280           getLibcallName(RTLIB::FPEXT_F16_F32),
21281           getPointerTy(DAG.getDataLayout()));
21282       CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
21283           CallingConv::C, EVT(VT).getTypeForEVT(*DAG.getContext()), Callee,
21284           std::move(Args));
21285 
21286       SDValue Res;
21287       std::tie(Res,Chain) = LowerCallTo(CLI);
21288       if (IsStrict)
21289         Res = DAG.getMergeValues({Res, Chain}, DL);
21290 
21291       return Res;
21292     }
21293 
21294     In = DAG.getBitcast(MVT::i16, In);
21295     In = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v8i16,
21296                      getZeroVector(MVT::v8i16, Subtarget, DAG, DL), In,
21297                      DAG.getIntPtrConstant(0, DL));
21298     SDValue Res;
21299     if (IsStrict) {
21300       Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, DL, {MVT::v4f32, MVT::Other},
21301                         {Chain, In});
21302       Chain = Res.getValue(1);
21303     } else {
21304       Res = DAG.getNode(X86ISD::CVTPH2PS, DL, MVT::v4f32, In,
21305                         DAG.getTargetConstant(4, DL, MVT::i32));
21306     }
21307     Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Res,
21308                       DAG.getIntPtrConstant(0, DL));
21309     if (IsStrict)
21310       return DAG.getMergeValues({Res, Chain}, DL);
21311     return Res;
21312   }
21313 
21314   if (!SVT.isVector())
21315     return Op;
21316 
21317   if (SVT.getVectorElementType() == MVT::bf16) {
21318     // FIXME: Do we need to support strict FP?
21319     assert(!IsStrict && "Strict FP doesn't support BF16");
21320     if (VT.getVectorElementType() == MVT::f64) {
21321       MVT TmpVT = VT.changeVectorElementType(MVT::f32);
21322       return DAG.getNode(ISD::FP_EXTEND, DL, VT,
21323                          DAG.getNode(ISD::FP_EXTEND, DL, TmpVT, In));
21324     }
21325     assert(VT.getVectorElementType() == MVT::f32 && "Unexpected fpext");
21326     MVT NVT = SVT.changeVectorElementType(MVT::i32);
21327     In = DAG.getBitcast(SVT.changeTypeToInteger(), In);
21328     In = DAG.getNode(ISD::ZERO_EXTEND, DL, NVT, In);
21329     In = DAG.getNode(ISD::SHL, DL, NVT, In, DAG.getConstant(16, DL, NVT));
21330     return DAG.getBitcast(VT, In);
21331   }
21332 
21333   if (SVT.getVectorElementType() == MVT::f16) {
21334     if (Subtarget.hasFP16() && isTypeLegal(SVT))
21335       return Op;
21336     assert(Subtarget.hasF16C() && "Unexpected features!");
21337     if (SVT == MVT::v2f16)
21338       In = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f16, In,
21339                        DAG.getUNDEF(MVT::v2f16));
21340     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f16, In,
21341                               DAG.getUNDEF(MVT::v4f16));
21342     if (IsStrict)
21343       return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
21344                          {Op->getOperand(0), Res});
21345     return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
21346   } else if (VT == MVT::v4f64 || VT == MVT::v8f64) {
21347     return Op;
21348   }
21349 
21350   assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
21351 
21352   SDValue Res =
21353       DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, In, DAG.getUNDEF(SVT));
21354   if (IsStrict)
21355     return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
21356                        {Op->getOperand(0), Res});
21357   return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
21358 }
21359 
21360 SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
21361   bool IsStrict = Op->isStrictFPOpcode();
21362 
21363   SDLoc DL(Op);
21364   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21365   SDValue In = Op.getOperand(IsStrict ? 1 : 0);
21366   MVT VT = Op.getSimpleValueType();
21367   MVT SVT = In.getSimpleValueType();
21368 
21369   if (SVT == MVT::f128 || (VT == MVT::f16 && SVT == MVT::f80))
21370     return SDValue();
21371 
21372   if (VT == MVT::f16 && (SVT == MVT::f64 || SVT == MVT::f32) &&
21373       !Subtarget.hasFP16() && (SVT == MVT::f64 || !Subtarget.hasF16C())) {
21374     if (!Subtarget.getTargetTriple().isOSDarwin())
21375       return SDValue();
21376 
21377     // We need a libcall but the ABI for f16 libcalls on MacOS is soft.
21378     TargetLowering::CallLoweringInfo CLI(DAG);
21379     Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
21380 
21381     TargetLowering::ArgListTy Args;
21382     TargetLowering::ArgListEntry Entry;
21383     Entry.Node = In;
21384     Entry.Ty = EVT(SVT).getTypeForEVT(*DAG.getContext());
21385     Entry.IsSExt = false;
21386     Entry.IsZExt = true;
21387     Args.push_back(Entry);
21388 
21389     SDValue Callee = DAG.getExternalSymbol(
21390         getLibcallName(SVT == MVT::f64 ? RTLIB::FPROUND_F64_F16
21391                                        : RTLIB::FPROUND_F32_F16),
21392         getPointerTy(DAG.getDataLayout()));
21393     CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
21394         CallingConv::C, EVT(MVT::i16).getTypeForEVT(*DAG.getContext()), Callee,
21395         std::move(Args));
21396 
21397     SDValue Res;
21398     std::tie(Res, Chain) = LowerCallTo(CLI);
21399 
21400     Res = DAG.getBitcast(MVT::f16, Res);
21401 
21402     if (IsStrict)
21403       Res = DAG.getMergeValues({Res, Chain}, DL);
21404 
21405     return Res;
21406   }
21407 
21408   if (VT.getScalarType() == MVT::bf16) {
21409     if (SVT.getScalarType() == MVT::f32 && isTypeLegal(VT))
21410       return Op;
21411     return SDValue();
21412   }
21413 
21414   if (VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16()) {
21415     if (!Subtarget.hasF16C() || SVT.getScalarType() != MVT::f32)
21416       return SDValue();
21417 
21418     if (VT.isVector())
21419       return Op;
21420 
21421     SDValue Res;
21422     SDValue Rnd = DAG.getTargetConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, DL,
21423                                         MVT::i32);
21424     if (IsStrict) {
21425       Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4f32,
21426                         DAG.getConstantFP(0, DL, MVT::v4f32), In,
21427                         DAG.getIntPtrConstant(0, DL));
21428       Res = DAG.getNode(X86ISD::STRICT_CVTPS2PH, DL, {MVT::v8i16, MVT::Other},
21429                         {Chain, Res, Rnd});
21430       Chain = Res.getValue(1);
21431     } else {
21432       // FIXME: Should we use zeros for upper elements for non-strict?
21433       Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, In);
21434       Res = DAG.getNode(X86ISD::CVTPS2PH, DL, MVT::v8i16, Res, Rnd);
21435     }
21436 
21437     Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i16, Res,
21438                       DAG.getIntPtrConstant(0, DL));
21439     Res = DAG.getBitcast(MVT::f16, Res);
21440 
21441     if (IsStrict)
21442       return DAG.getMergeValues({Res, Chain}, DL);
21443 
21444     return Res;
21445   }
21446 
21447   return Op;
21448 }
21449 
21450 static SDValue LowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) {
21451   bool IsStrict = Op->isStrictFPOpcode();
21452   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21453   assert(Src.getValueType() == MVT::i16 && Op.getValueType() == MVT::f32 &&
21454          "Unexpected VT!");
21455 
21456   SDLoc dl(Op);
21457   SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16,
21458                             DAG.getConstant(0, dl, MVT::v8i16), Src,
21459                             DAG.getIntPtrConstant(0, dl));
21460 
21461   SDValue Chain;
21462   if (IsStrict) {
21463     Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {MVT::v4f32, MVT::Other},
21464                       {Op.getOperand(0), Res});
21465     Chain = Res.getValue(1);
21466   } else {
21467     Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
21468   }
21469 
21470   Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
21471                     DAG.getIntPtrConstant(0, dl));
21472 
21473   if (IsStrict)
21474     return DAG.getMergeValues({Res, Chain}, dl);
21475 
21476   return Res;
21477 }
21478 
21479 static SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) {
21480   bool IsStrict = Op->isStrictFPOpcode();
21481   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21482   assert(Src.getValueType() == MVT::f32 && Op.getValueType() == MVT::i16 &&
21483          "Unexpected VT!");
21484 
21485   SDLoc dl(Op);
21486   SDValue Res, Chain;
21487   if (IsStrict) {
21488     Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4f32,
21489                       DAG.getConstantFP(0, dl, MVT::v4f32), Src,
21490                       DAG.getIntPtrConstant(0, dl));
21491     Res = DAG.getNode(
21492         X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
21493         {Op.getOperand(0), Res, DAG.getTargetConstant(4, dl, MVT::i32)});
21494     Chain = Res.getValue(1);
21495   } else {
21496     // FIXME: Should we use zeros for upper elements for non-strict?
21497     Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, Src);
21498     Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
21499                       DAG.getTargetConstant(4, dl, MVT::i32));
21500   }
21501 
21502   Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Res,
21503                     DAG.getIntPtrConstant(0, dl));
21504 
21505   if (IsStrict)
21506     return DAG.getMergeValues({Res, Chain}, dl);
21507 
21508   return Res;
21509 }
21510 
21511 SDValue X86TargetLowering::LowerFP_TO_BF16(SDValue Op,
21512                                            SelectionDAG &DAG) const {
21513   SDLoc DL(Op);
21514   MakeLibCallOptions CallOptions;
21515   RTLIB::Libcall LC =
21516       RTLIB::getFPROUND(Op.getOperand(0).getValueType(), MVT::bf16);
21517   SDValue Res =
21518       makeLibCall(DAG, LC, MVT::f32, Op.getOperand(0), CallOptions, DL).first;
21519   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i16,
21520                      DAG.getBitcast(MVT::i32, Res));
21521 }
21522 
21523 /// Depending on uarch and/or optimizing for size, we might prefer to use a
21524 /// vector operation in place of the typical scalar operation.
21525 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
21526                                          const X86Subtarget &Subtarget) {
21527   // If both operands have other uses, this is probably not profitable.
21528   SDValue LHS = Op.getOperand(0);
21529   SDValue RHS = Op.getOperand(1);
21530   if (!LHS.hasOneUse() && !RHS.hasOneUse())
21531     return Op;
21532 
21533   // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
21534   bool IsFP = Op.getSimpleValueType().isFloatingPoint();
21535   if (IsFP && !Subtarget.hasSSE3())
21536     return Op;
21537   if (!IsFP && !Subtarget.hasSSSE3())
21538     return Op;
21539 
21540   // Extract from a common vector.
21541   if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21542       RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21543       LHS.getOperand(0) != RHS.getOperand(0) ||
21544       !isa<ConstantSDNode>(LHS.getOperand(1)) ||
21545       !isa<ConstantSDNode>(RHS.getOperand(1)) ||
21546       !shouldUseHorizontalOp(true, DAG, Subtarget))
21547     return Op;
21548 
21549   // Allow commuted 'hadd' ops.
21550   // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
21551   unsigned HOpcode;
21552   switch (Op.getOpcode()) {
21553     case ISD::ADD: HOpcode = X86ISD::HADD; break;
21554     case ISD::SUB: HOpcode = X86ISD::HSUB; break;
21555     case ISD::FADD: HOpcode = X86ISD::FHADD; break;
21556     case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
21557     default:
21558       llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
21559   }
21560   unsigned LExtIndex = LHS.getConstantOperandVal(1);
21561   unsigned RExtIndex = RHS.getConstantOperandVal(1);
21562   if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
21563       (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
21564     std::swap(LExtIndex, RExtIndex);
21565 
21566   if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
21567     return Op;
21568 
21569   SDValue X = LHS.getOperand(0);
21570   EVT VecVT = X.getValueType();
21571   unsigned BitWidth = VecVT.getSizeInBits();
21572   unsigned NumLanes = BitWidth / 128;
21573   unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
21574   assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
21575          "Not expecting illegal vector widths here");
21576 
21577   // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
21578   // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
21579   SDLoc DL(Op);
21580   if (BitWidth == 256 || BitWidth == 512) {
21581     unsigned LaneIdx = LExtIndex / NumEltsPerLane;
21582     X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
21583     LExtIndex %= NumEltsPerLane;
21584   }
21585 
21586   // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
21587   // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
21588   // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
21589   // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
21590   SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
21591   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
21592                      DAG.getIntPtrConstant(LExtIndex / 2, DL));
21593 }
21594 
21595 /// Depending on uarch and/or optimizing for size, we might prefer to use a
21596 /// vector operation in place of the typical scalar operation.
21597 SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
21598   assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
21599          "Only expecting float/double");
21600   return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
21601 }
21602 
21603 /// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
21604 /// This mode isn't supported in hardware on X86. But as long as we aren't
21605 /// compiling with trapping math, we can emulate this with
21606 /// trunc(X + copysign(nextafter(0.5, 0.0), X)).
21607 static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) {
21608   SDValue N0 = Op.getOperand(0);
21609   SDLoc dl(Op);
21610   MVT VT = Op.getSimpleValueType();
21611 
21612   // N0 += copysign(nextafter(0.5, 0.0), N0)
21613   const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21614   bool Ignored;
21615   APFloat Point5Pred = APFloat(0.5f);
21616   Point5Pred.convert(Sem, APFloat::rmNearestTiesToEven, &Ignored);
21617   Point5Pred.next(/*nextDown*/true);
21618 
21619   SDValue Adder = DAG.getNode(ISD::FCOPYSIGN, dl, VT,
21620                               DAG.getConstantFP(Point5Pred, dl, VT), N0);
21621   N0 = DAG.getNode(ISD::FADD, dl, VT, N0, Adder);
21622 
21623   // Truncate the result to remove fraction.
21624   return DAG.getNode(ISD::FTRUNC, dl, VT, N0);
21625 }
21626 
21627 /// The only differences between FABS and FNEG are the mask and the logic op.
21628 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
21629 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
21630   assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
21631          "Wrong opcode for lowering FABS or FNEG.");
21632 
21633   bool IsFABS = (Op.getOpcode() == ISD::FABS);
21634 
21635   // If this is a FABS and it has an FNEG user, bail out to fold the combination
21636   // into an FNABS. We'll lower the FABS after that if it is still in use.
21637   if (IsFABS)
21638     for (SDNode *User : Op->uses())
21639       if (User->getOpcode() == ISD::FNEG)
21640         return Op;
21641 
21642   SDLoc dl(Op);
21643   MVT VT = Op.getSimpleValueType();
21644 
21645   bool IsF128 = (VT == MVT::f128);
21646   assert(VT.isFloatingPoint() && VT != MVT::f80 &&
21647          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
21648          "Unexpected type in LowerFABSorFNEG");
21649 
21650   // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOptLevel to
21651   // decide if we should generate a 16-byte constant mask when we only need 4 or
21652   // 8 bytes for the scalar case.
21653 
21654   // There are no scalar bitwise logical SSE/AVX instructions, so we
21655   // generate a 16-byte vector constant and logic op even for the scalar case.
21656   // Using a 16-byte mask allows folding the load of the mask with
21657   // the logic op, so it can save (~4 bytes) on code size.
21658   bool IsFakeVector = !VT.isVector() && !IsF128;
21659   MVT LogicVT = VT;
21660   if (IsFakeVector)
21661     LogicVT = (VT == MVT::f64)   ? MVT::v2f64
21662               : (VT == MVT::f32) ? MVT::v4f32
21663                                  : MVT::v8f16;
21664 
21665   unsigned EltBits = VT.getScalarSizeInBits();
21666   // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
21667   APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
21668                            APInt::getSignMask(EltBits);
21669   const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21670   SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
21671 
21672   SDValue Op0 = Op.getOperand(0);
21673   bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
21674   unsigned LogicOp = IsFABS  ? X86ISD::FAND :
21675                      IsFNABS ? X86ISD::FOR  :
21676                                X86ISD::FXOR;
21677   SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
21678 
21679   if (VT.isVector() || IsF128)
21680     return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
21681 
21682   // For the scalar case extend to a 128-bit vector, perform the logic op,
21683   // and extract the scalar result back out.
21684   Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
21685   SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
21686   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
21687                      DAG.getIntPtrConstant(0, dl));
21688 }
21689 
21690 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
21691   SDValue Mag = Op.getOperand(0);
21692   SDValue Sign = Op.getOperand(1);
21693   SDLoc dl(Op);
21694 
21695   // If the sign operand is smaller, extend it first.
21696   MVT VT = Op.getSimpleValueType();
21697   if (Sign.getSimpleValueType().bitsLT(VT))
21698     Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
21699 
21700   // And if it is bigger, shrink it first.
21701   if (Sign.getSimpleValueType().bitsGT(VT))
21702     Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign,
21703                        DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
21704 
21705   // At this point the operands and the result should have the same
21706   // type, and that won't be f80 since that is not custom lowered.
21707   bool IsF128 = (VT == MVT::f128);
21708   assert(VT.isFloatingPoint() && VT != MVT::f80 &&
21709          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
21710          "Unexpected type in LowerFCOPYSIGN");
21711 
21712   const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21713 
21714   // Perform all scalar logic operations as 16-byte vectors because there are no
21715   // scalar FP logic instructions in SSE.
21716   // TODO: This isn't necessary. If we used scalar types, we might avoid some
21717   // unnecessary splats, but we might miss load folding opportunities. Should
21718   // this decision be based on OptimizeForSize?
21719   bool IsFakeVector = !VT.isVector() && !IsF128;
21720   MVT LogicVT = VT;
21721   if (IsFakeVector)
21722     LogicVT = (VT == MVT::f64)   ? MVT::v2f64
21723               : (VT == MVT::f32) ? MVT::v4f32
21724                                  : MVT::v8f16;
21725 
21726   // The mask constants are automatically splatted for vector types.
21727   unsigned EltSizeInBits = VT.getScalarSizeInBits();
21728   SDValue SignMask = DAG.getConstantFP(
21729       APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
21730   SDValue MagMask = DAG.getConstantFP(
21731       APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
21732 
21733   // First, clear all bits but the sign bit from the second operand (sign).
21734   if (IsFakeVector)
21735     Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
21736   SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
21737 
21738   // Next, clear the sign bit from the first operand (magnitude).
21739   // TODO: If we had general constant folding for FP logic ops, this check
21740   // wouldn't be necessary.
21741   SDValue MagBits;
21742   if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
21743     APFloat APF = Op0CN->getValueAPF();
21744     APF.clearSign();
21745     MagBits = DAG.getConstantFP(APF, dl, LogicVT);
21746   } else {
21747     // If the magnitude operand wasn't a constant, we need to AND out the sign.
21748     if (IsFakeVector)
21749       Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
21750     MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
21751   }
21752 
21753   // OR the magnitude value with the sign bit.
21754   SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
21755   return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
21756                                           DAG.getIntPtrConstant(0, dl));
21757 }
21758 
21759 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
21760   SDValue N0 = Op.getOperand(0);
21761   SDLoc dl(Op);
21762   MVT VT = Op.getSimpleValueType();
21763 
21764   MVT OpVT = N0.getSimpleValueType();
21765   assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
21766          "Unexpected type for FGETSIGN");
21767 
21768   // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
21769   MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
21770   SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
21771   Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
21772   Res = DAG.getZExtOrTrunc(Res, dl, VT);
21773   Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
21774   return Res;
21775 }
21776 
21777 /// Helper for attempting to create a X86ISD::BT node.
21778 static SDValue getBT(SDValue Src, SDValue BitNo, const SDLoc &DL, SelectionDAG &DAG) {
21779   // If Src is i8, promote it to i32 with any_extend.  There is no i8 BT
21780   // instruction.  Since the shift amount is in-range-or-undefined, we know
21781   // that doing a bittest on the i32 value is ok.  We extend to i32 because
21782   // the encoding for the i16 version is larger than the i32 version.
21783   // Also promote i16 to i32 for performance / code size reason.
21784   if (Src.getValueType().getScalarSizeInBits() < 32)
21785     Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
21786 
21787   // No legal type found, give up.
21788   if (!DAG.getTargetLoweringInfo().isTypeLegal(Src.getValueType()))
21789     return SDValue();
21790 
21791   // See if we can use the 32-bit instruction instead of the 64-bit one for a
21792   // shorter encoding. Since the former takes the modulo 32 of BitNo and the
21793   // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
21794   // known to be zero.
21795   if (Src.getValueType() == MVT::i64 &&
21796       DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
21797     Src = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Src);
21798 
21799   // If the operand types disagree, extend the shift amount to match.  Since
21800   // BT ignores high bits (like shifts) we can use anyextend.
21801   if (Src.getValueType() != BitNo.getValueType()) {
21802     // Peek through a mask/modulo operation.
21803     // TODO: DAGCombine fails to do this as it just checks isTruncateFree, but
21804     // we probably need a better IsDesirableToPromoteOp to handle this as well.
21805     if (BitNo.getOpcode() == ISD::AND && BitNo->hasOneUse())
21806       BitNo = DAG.getNode(ISD::AND, DL, Src.getValueType(),
21807                           DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
21808                                       BitNo.getOperand(0)),
21809                           DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
21810                                       BitNo.getOperand(1)));
21811     else
21812       BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
21813   }
21814 
21815   return DAG.getNode(X86ISD::BT, DL, MVT::i32, Src, BitNo);
21816 }
21817 
21818 /// Helper for creating a X86ISD::SETCC node.
21819 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
21820                         SelectionDAG &DAG) {
21821   return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
21822                      DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
21823 }
21824 
21825 /// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a
21826 /// recognizable memcmp expansion.
21827 static bool isOrXorXorTree(SDValue X, bool Root = true) {
21828   if (X.getOpcode() == ISD::OR)
21829     return isOrXorXorTree(X.getOperand(0), false) &&
21830            isOrXorXorTree(X.getOperand(1), false);
21831   if (Root)
21832     return false;
21833   return X.getOpcode() == ISD::XOR;
21834 }
21835 
21836 /// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp
21837 /// expansion.
21838 template <typename F>
21839 static SDValue emitOrXorXorTree(SDValue X, const SDLoc &DL, SelectionDAG &DAG,
21840                                 EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {
21841   SDValue Op0 = X.getOperand(0);
21842   SDValue Op1 = X.getOperand(1);
21843   if (X.getOpcode() == ISD::OR) {
21844     SDValue A = emitOrXorXorTree(Op0, DL, DAG, VecVT, CmpVT, HasPT, SToV);
21845     SDValue B = emitOrXorXorTree(Op1, DL, DAG, VecVT, CmpVT, HasPT, SToV);
21846     if (VecVT != CmpVT)
21847       return DAG.getNode(ISD::OR, DL, CmpVT, A, B);
21848     if (HasPT)
21849       return DAG.getNode(ISD::OR, DL, VecVT, A, B);
21850     return DAG.getNode(ISD::AND, DL, CmpVT, A, B);
21851   }
21852   if (X.getOpcode() == ISD::XOR) {
21853     SDValue A = SToV(Op0);
21854     SDValue B = SToV(Op1);
21855     if (VecVT != CmpVT)
21856       return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
21857     if (HasPT)
21858       return DAG.getNode(ISD::XOR, DL, VecVT, A, B);
21859     return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
21860   }
21861   llvm_unreachable("Impossible");
21862 }
21863 
21864 /// Try to map a 128-bit or larger integer comparison to vector instructions
21865 /// before type legalization splits it up into chunks.
21866 static SDValue combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y,
21867                                                ISD::CondCode CC,
21868                                                const SDLoc &DL,
21869                                                SelectionDAG &DAG,
21870                                                const X86Subtarget &Subtarget) {
21871   assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
21872 
21873   // We're looking for an oversized integer equality comparison.
21874   EVT OpVT = X.getValueType();
21875   unsigned OpSize = OpVT.getSizeInBits();
21876   if (!OpVT.isScalarInteger() || OpSize < 128)
21877     return SDValue();
21878 
21879   // Ignore a comparison with zero because that gets special treatment in
21880   // EmitTest(). But make an exception for the special case of a pair of
21881   // logically-combined vector-sized operands compared to zero. This pattern may
21882   // be generated by the memcmp expansion pass with oversized integer compares
21883   // (see PR33325).
21884   bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X);
21885   if (isNullConstant(Y) && !IsOrXorXorTreeCCZero)
21886     return SDValue();
21887 
21888   // Don't perform this combine if constructing the vector will be expensive.
21889   auto IsVectorBitCastCheap = [](SDValue X) {
21890     X = peekThroughBitcasts(X);
21891     return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
21892            X.getOpcode() == ISD::LOAD;
21893   };
21894   if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
21895       !IsOrXorXorTreeCCZero)
21896     return SDValue();
21897 
21898   // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
21899   // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
21900   // Otherwise use PCMPEQ (plus AND) and mask testing.
21901   bool NoImplicitFloatOps =
21902       DAG.getMachineFunction().getFunction().hasFnAttribute(
21903           Attribute::NoImplicitFloat);
21904   if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
21905       ((OpSize == 128 && Subtarget.hasSSE2()) ||
21906        (OpSize == 256 && Subtarget.hasAVX()) ||
21907        (OpSize == 512 && Subtarget.useAVX512Regs()))) {
21908     bool HasPT = Subtarget.hasSSE41();
21909 
21910     // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
21911     // vector registers are essentially free. (Technically, widening registers
21912     // prevents load folding, but the tradeoff is worth it.)
21913     bool PreferKOT = Subtarget.preferMaskRegisters();
21914     bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;
21915 
21916     EVT VecVT = MVT::v16i8;
21917     EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
21918     if (OpSize == 256) {
21919       VecVT = MVT::v32i8;
21920       CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
21921     }
21922     EVT CastVT = VecVT;
21923     bool NeedsAVX512FCast = false;
21924     if (OpSize == 512 || NeedZExt) {
21925       if (Subtarget.hasBWI()) {
21926         VecVT = MVT::v64i8;
21927         CmpVT = MVT::v64i1;
21928         if (OpSize == 512)
21929           CastVT = VecVT;
21930       } else {
21931         VecVT = MVT::v16i32;
21932         CmpVT = MVT::v16i1;
21933         CastVT = OpSize == 512   ? VecVT
21934                  : OpSize == 256 ? MVT::v8i32
21935                                  : MVT::v4i32;
21936         NeedsAVX512FCast = true;
21937       }
21938     }
21939 
21940     auto ScalarToVector = [&](SDValue X) -> SDValue {
21941       bool TmpZext = false;
21942       EVT TmpCastVT = CastVT;
21943       if (X.getOpcode() == ISD::ZERO_EXTEND) {
21944         SDValue OrigX = X.getOperand(0);
21945         unsigned OrigSize = OrigX.getScalarValueSizeInBits();
21946         if (OrigSize < OpSize) {
21947           if (OrigSize == 128) {
21948             TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
21949             X = OrigX;
21950             TmpZext = true;
21951           } else if (OrigSize == 256) {
21952             TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
21953             X = OrigX;
21954             TmpZext = true;
21955           }
21956         }
21957       }
21958       X = DAG.getBitcast(TmpCastVT, X);
21959       if (!NeedZExt && !TmpZext)
21960         return X;
21961       return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
21962                          DAG.getConstant(0, DL, VecVT), X,
21963                          DAG.getVectorIdxConstant(0, DL));
21964     };
21965 
21966     SDValue Cmp;
21967     if (IsOrXorXorTreeCCZero) {
21968       // This is a bitwise-combined equality comparison of 2 pairs of vectors:
21969       // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
21970       // Use 2 vector equality compares and 'and' the results before doing a
21971       // MOVMSK.
21972       Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector);
21973     } else {
21974       SDValue VecX = ScalarToVector(X);
21975       SDValue VecY = ScalarToVector(Y);
21976       if (VecVT != CmpVT) {
21977         Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
21978       } else if (HasPT) {
21979         Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
21980       } else {
21981         Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
21982       }
21983     }
21984     // AVX512 should emit a setcc that will lower to kortest.
21985     if (VecVT != CmpVT) {
21986       EVT KRegVT = CmpVT == MVT::v64i1   ? MVT::i64
21987                    : CmpVT == MVT::v32i1 ? MVT::i32
21988                                          : MVT::i16;
21989       return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
21990                           DAG.getConstant(0, DL, KRegVT), CC);
21991     }
21992     if (HasPT) {
21993       SDValue BCCmp =
21994           DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64, Cmp);
21995       SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
21996       X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
21997       SDValue X86SetCC = getSETCC(X86CC, PT, DL, DAG);
21998       return DAG.getNode(ISD::TRUNCATE, DL, VT, X86SetCC.getValue(0));
21999     }
22000     // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
22001     // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
22002     // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
22003     assert(Cmp.getValueType() == MVT::v16i8 &&
22004            "Non 128-bit vector on pre-SSE41 target");
22005     SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
22006     SDValue FFFFs = DAG.getConstant(0xFFFF, DL, MVT::i32);
22007     return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
22008   }
22009 
22010   return SDValue();
22011 }
22012 
22013 /// Helper for matching BINOP(EXTRACTELT(X,0),BINOP(EXTRACTELT(X,1),...))
22014 /// style scalarized (associative) reduction patterns. Partial reductions
22015 /// are supported when the pointer SrcMask is non-null.
22016 /// TODO - move this to SelectionDAG?
22017 static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
22018                                  SmallVectorImpl<SDValue> &SrcOps,
22019                                  SmallVectorImpl<APInt> *SrcMask = nullptr) {
22020   SmallVector<SDValue, 8> Opnds;
22021   DenseMap<SDValue, APInt> SrcOpMap;
22022   EVT VT = MVT::Other;
22023 
22024   // Recognize a special case where a vector is casted into wide integer to
22025   // test all 0s.
22026   assert(Op.getOpcode() == unsigned(BinOp) &&
22027          "Unexpected bit reduction opcode");
22028   Opnds.push_back(Op.getOperand(0));
22029   Opnds.push_back(Op.getOperand(1));
22030 
22031   for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
22032     SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
22033     // BFS traverse all BinOp operands.
22034     if (I->getOpcode() == unsigned(BinOp)) {
22035       Opnds.push_back(I->getOperand(0));
22036       Opnds.push_back(I->getOperand(1));
22037       // Re-evaluate the number of nodes to be traversed.
22038       e += 2; // 2 more nodes (LHS and RHS) are pushed.
22039       continue;
22040     }
22041 
22042     // Quit if a non-EXTRACT_VECTOR_ELT
22043     if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
22044       return false;
22045 
22046     // Quit if without a constant index.
22047     auto *Idx = dyn_cast<ConstantSDNode>(I->getOperand(1));
22048     if (!Idx)
22049       return false;
22050 
22051     SDValue Src = I->getOperand(0);
22052     DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
22053     if (M == SrcOpMap.end()) {
22054       VT = Src.getValueType();
22055       // Quit if not the same type.
22056       if (!SrcOpMap.empty() && VT != SrcOpMap.begin()->first.getValueType())
22057         return false;
22058       unsigned NumElts = VT.getVectorNumElements();
22059       APInt EltCount = APInt::getZero(NumElts);
22060       M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
22061       SrcOps.push_back(Src);
22062     }
22063 
22064     // Quit if element already used.
22065     unsigned CIdx = Idx->getZExtValue();
22066     if (M->second[CIdx])
22067       return false;
22068     M->second.setBit(CIdx);
22069   }
22070 
22071   if (SrcMask) {
22072     // Collect the source partial masks.
22073     for (SDValue &SrcOp : SrcOps)
22074       SrcMask->push_back(SrcOpMap[SrcOp]);
22075   } else {
22076     // Quit if not all elements are used.
22077     for (const auto &I : SrcOpMap)
22078       if (!I.second.isAllOnes())
22079         return false;
22080   }
22081 
22082   return true;
22083 }
22084 
22085 // Helper function for comparing all bits of two vectors.
22086 static SDValue LowerVectorAllEqual(const SDLoc &DL, SDValue LHS, SDValue RHS,
22087                                    ISD::CondCode CC, const APInt &OriginalMask,
22088                                    const X86Subtarget &Subtarget,
22089                                    SelectionDAG &DAG, X86::CondCode &X86CC) {
22090   EVT VT = LHS.getValueType();
22091   unsigned ScalarSize = VT.getScalarSizeInBits();
22092   if (OriginalMask.getBitWidth() != ScalarSize) {
22093     assert(ScalarSize == 1 && "Element Mask vs Vector bitwidth mismatch");
22094     return SDValue();
22095   }
22096 
22097   // Quit if not convertable to legal scalar or 128/256-bit vector.
22098   if (!llvm::has_single_bit<uint32_t>(VT.getSizeInBits()))
22099     return SDValue();
22100 
22101   // FCMP may use ISD::SETNE when nnan - early out if we manage to get here.
22102   if (VT.isFloatingPoint())
22103     return SDValue();
22104 
22105   assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
22106   X86CC = (CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE);
22107 
22108   APInt Mask = OriginalMask;
22109 
22110   auto MaskBits = [&](SDValue Src) {
22111     if (Mask.isAllOnes())
22112       return Src;
22113     EVT SrcVT = Src.getValueType();
22114     SDValue MaskValue = DAG.getConstant(Mask, DL, SrcVT);
22115     return DAG.getNode(ISD::AND, DL, SrcVT, Src, MaskValue);
22116   };
22117 
22118   // For sub-128-bit vector, cast to (legal) integer and compare with zero.
22119   if (VT.getSizeInBits() < 128) {
22120     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
22121     if (!DAG.getTargetLoweringInfo().isTypeLegal(IntVT)) {
22122       if (IntVT != MVT::i64)
22123         return SDValue();
22124       auto SplitLHS = DAG.SplitScalar(DAG.getBitcast(IntVT, MaskBits(LHS)), DL,
22125                                       MVT::i32, MVT::i32);
22126       auto SplitRHS = DAG.SplitScalar(DAG.getBitcast(IntVT, MaskBits(RHS)), DL,
22127                                       MVT::i32, MVT::i32);
22128       SDValue Lo =
22129           DAG.getNode(ISD::XOR, DL, MVT::i32, SplitLHS.first, SplitRHS.first);
22130       SDValue Hi =
22131           DAG.getNode(ISD::XOR, DL, MVT::i32, SplitLHS.second, SplitRHS.second);
22132       return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
22133                          DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi),
22134                          DAG.getConstant(0, DL, MVT::i32));
22135     }
22136     return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
22137                        DAG.getBitcast(IntVT, MaskBits(LHS)),
22138                        DAG.getBitcast(IntVT, MaskBits(RHS)));
22139   }
22140 
22141   // Without PTEST, a masked v2i64 or-reduction is not faster than
22142   // scalarization.
22143   bool UseKORTEST = Subtarget.useAVX512Regs();
22144   bool UsePTEST = Subtarget.hasSSE41();
22145   if (!UsePTEST && !Mask.isAllOnes() && ScalarSize > 32)
22146     return SDValue();
22147 
22148   // Split down to 128/256/512-bit vector.
22149   unsigned TestSize = UseKORTEST ? 512 : (Subtarget.hasAVX() ? 256 : 128);
22150 
22151   // If the input vector has vector elements wider than the target test size,
22152   // then cast to <X x i64> so it will safely split.
22153   if (ScalarSize > TestSize) {
22154     if (!Mask.isAllOnes())
22155       return SDValue();
22156     VT = EVT::getVectorVT(*DAG.getContext(), MVT::i64, VT.getSizeInBits() / 64);
22157     LHS = DAG.getBitcast(VT, LHS);
22158     RHS = DAG.getBitcast(VT, RHS);
22159     Mask = APInt::getAllOnes(64);
22160   }
22161 
22162   if (VT.getSizeInBits() > TestSize) {
22163     KnownBits KnownRHS = DAG.computeKnownBits(RHS);
22164     if (KnownRHS.isConstant() && KnownRHS.getConstant() == Mask) {
22165       // If ICMP(AND(LHS,MASK),MASK) - reduce using AND splits.
22166       while (VT.getSizeInBits() > TestSize) {
22167         auto Split = DAG.SplitVector(LHS, DL);
22168         VT = Split.first.getValueType();
22169         LHS = DAG.getNode(ISD::AND, DL, VT, Split.first, Split.second);
22170       }
22171       RHS = DAG.getAllOnesConstant(DL, VT);
22172     } else if (!UsePTEST && !KnownRHS.isZero()) {
22173       // MOVMSK Special Case:
22174       // ALLOF(CMPEQ(X,Y)) -> AND(CMPEQ(X[0],Y[0]),CMPEQ(X[1],Y[1]),....)
22175       MVT SVT = ScalarSize >= 32 ? MVT::i32 : MVT::i8;
22176       VT = MVT::getVectorVT(SVT, VT.getSizeInBits() / SVT.getSizeInBits());
22177       LHS = DAG.getBitcast(VT, MaskBits(LHS));
22178       RHS = DAG.getBitcast(VT, MaskBits(RHS));
22179       EVT BoolVT = VT.changeVectorElementType(MVT::i1);
22180       SDValue V = DAG.getSetCC(DL, BoolVT, LHS, RHS, ISD::SETEQ);
22181       V = DAG.getSExtOrTrunc(V, DL, VT);
22182       while (VT.getSizeInBits() > TestSize) {
22183         auto Split = DAG.SplitVector(V, DL);
22184         VT = Split.first.getValueType();
22185         V = DAG.getNode(ISD::AND, DL, VT, Split.first, Split.second);
22186       }
22187       V = DAG.getNOT(DL, V, VT);
22188       V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
22189       return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
22190                          DAG.getConstant(0, DL, MVT::i32));
22191     } else {
22192       // Convert to a ICMP_EQ(XOR(LHS,RHS),0) pattern.
22193       SDValue V = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
22194       while (VT.getSizeInBits() > TestSize) {
22195         auto Split = DAG.SplitVector(V, DL);
22196         VT = Split.first.getValueType();
22197         V = DAG.getNode(ISD::OR, DL, VT, Split.first, Split.second);
22198       }
22199       LHS = V;
22200       RHS = DAG.getConstant(0, DL, VT);
22201     }
22202   }
22203 
22204   if (UseKORTEST && VT.is512BitVector()) {
22205     MVT TestVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
22206     MVT BoolVT = TestVT.changeVectorElementType(MVT::i1);
22207     LHS = DAG.getBitcast(TestVT, MaskBits(LHS));
22208     RHS = DAG.getBitcast(TestVT, MaskBits(RHS));
22209     SDValue V = DAG.getSetCC(DL, BoolVT, LHS, RHS, ISD::SETNE);
22210     return DAG.getNode(X86ISD::KORTEST, DL, MVT::i32, V, V);
22211   }
22212 
22213   if (UsePTEST) {
22214     MVT TestVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
22215     LHS = DAG.getBitcast(TestVT, MaskBits(LHS));
22216     RHS = DAG.getBitcast(TestVT, MaskBits(RHS));
22217     SDValue V = DAG.getNode(ISD::XOR, DL, TestVT, LHS, RHS);
22218     return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, V, V);
22219   }
22220 
22221   assert(VT.getSizeInBits() == 128 && "Failure to split to 128-bits");
22222   MVT MaskVT = ScalarSize >= 32 ? MVT::v4i32 : MVT::v16i8;
22223   LHS = DAG.getBitcast(MaskVT, MaskBits(LHS));
22224   RHS = DAG.getBitcast(MaskVT, MaskBits(RHS));
22225   SDValue V = DAG.getNode(X86ISD::PCMPEQ, DL, MaskVT, LHS, RHS);
22226   V = DAG.getNOT(DL, V, MaskVT);
22227   V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
22228   return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
22229                      DAG.getConstant(0, DL, MVT::i32));
22230 }
22231 
22232 // Check whether an AND/OR'd reduction tree is PTEST-able, or if we can fallback
22233 // to CMP(MOVMSK(PCMPEQB(X,Y))).
22234 static SDValue MatchVectorAllEqualTest(SDValue LHS, SDValue RHS,
22235                                        ISD::CondCode CC, const SDLoc &DL,
22236                                        const X86Subtarget &Subtarget,
22237                                        SelectionDAG &DAG,
22238                                        X86::CondCode &X86CC) {
22239   assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
22240 
22241   bool CmpNull = isNullConstant(RHS);
22242   bool CmpAllOnes = isAllOnesConstant(RHS);
22243   if (!CmpNull && !CmpAllOnes)
22244     return SDValue();
22245 
22246   SDValue Op = LHS;
22247   if (!Subtarget.hasSSE2() || !Op->hasOneUse())
22248     return SDValue();
22249 
22250   // Check whether we're masking/truncating an OR-reduction result, in which
22251   // case track the masked bits.
22252   // TODO: Add CmpAllOnes support.
22253   APInt Mask = APInt::getAllOnes(Op.getScalarValueSizeInBits());
22254   if (CmpNull) {
22255     switch (Op.getOpcode()) {
22256     case ISD::TRUNCATE: {
22257       SDValue Src = Op.getOperand(0);
22258       Mask = APInt::getLowBitsSet(Src.getScalarValueSizeInBits(),
22259                                   Op.getScalarValueSizeInBits());
22260       Op = Src;
22261       break;
22262     }
22263     case ISD::AND: {
22264       if (auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
22265         Mask = Cst->getAPIntValue();
22266         Op = Op.getOperand(0);
22267       }
22268       break;
22269     }
22270     }
22271   }
22272 
22273   ISD::NodeType LogicOp = CmpNull ? ISD::OR : ISD::AND;
22274 
22275   // Match icmp(or(extract(X,0),extract(X,1)),0) anyof reduction patterns.
22276   // Match icmp(and(extract(X,0),extract(X,1)),-1) allof reduction patterns.
22277   SmallVector<SDValue, 8> VecIns;
22278   if (Op.getOpcode() == LogicOp && matchScalarReduction(Op, LogicOp, VecIns)) {
22279     EVT VT = VecIns[0].getValueType();
22280     assert(llvm::all_of(VecIns,
22281                         [VT](SDValue V) { return VT == V.getValueType(); }) &&
22282            "Reduction source vector mismatch");
22283 
22284     // Quit if not splittable to scalar/128/256/512-bit vector.
22285     if (!llvm::has_single_bit<uint32_t>(VT.getSizeInBits()))
22286       return SDValue();
22287 
22288     // If more than one full vector is evaluated, AND/OR them first before
22289     // PTEST.
22290     for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1;
22291          Slot += 2, e += 1) {
22292       // Each iteration will AND/OR 2 nodes and append the result until there is
22293       // only 1 node left, i.e. the final value of all vectors.
22294       SDValue LHS = VecIns[Slot];
22295       SDValue RHS = VecIns[Slot + 1];
22296       VecIns.push_back(DAG.getNode(LogicOp, DL, VT, LHS, RHS));
22297     }
22298 
22299     return LowerVectorAllEqual(DL, VecIns.back(),
22300                                CmpNull ? DAG.getConstant(0, DL, VT)
22301                                        : DAG.getAllOnesConstant(DL, VT),
22302                                CC, Mask, Subtarget, DAG, X86CC);
22303   }
22304 
22305   // Match icmp(reduce_or(X),0) anyof reduction patterns.
22306   // Match icmp(reduce_and(X),-1) allof reduction patterns.
22307   if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
22308     ISD::NodeType BinOp;
22309     if (SDValue Match =
22310             DAG.matchBinOpReduction(Op.getNode(), BinOp, {LogicOp})) {
22311       EVT MatchVT = Match.getValueType();
22312       return LowerVectorAllEqual(DL, Match,
22313                                  CmpNull ? DAG.getConstant(0, DL, MatchVT)
22314                                          : DAG.getAllOnesConstant(DL, MatchVT),
22315                                  CC, Mask, Subtarget, DAG, X86CC);
22316     }
22317   }
22318 
22319   if (Mask.isAllOnes()) {
22320     assert(!Op.getValueType().isVector() &&
22321            "Illegal vector type for reduction pattern");
22322     SDValue Src = peekThroughBitcasts(Op);
22323     if (Src.getValueType().isFixedLengthVector() &&
22324         Src.getValueType().getScalarType() == MVT::i1) {
22325       // Match icmp(bitcast(icmp_ne(X,Y)),0) reduction patterns.
22326       // Match icmp(bitcast(icmp_eq(X,Y)),-1) reduction patterns.
22327       if (Src.getOpcode() == ISD::SETCC) {
22328         SDValue LHS = Src.getOperand(0);
22329         SDValue RHS = Src.getOperand(1);
22330         EVT LHSVT = LHS.getValueType();
22331         ISD::CondCode SrcCC = cast<CondCodeSDNode>(Src.getOperand(2))->get();
22332         if (SrcCC == (CmpNull ? ISD::SETNE : ISD::SETEQ) &&
22333             llvm::has_single_bit<uint32_t>(LHSVT.getSizeInBits())) {
22334           APInt SrcMask = APInt::getAllOnes(LHSVT.getScalarSizeInBits());
22335           return LowerVectorAllEqual(DL, LHS, RHS, CC, SrcMask, Subtarget, DAG,
22336                                      X86CC);
22337         }
22338       }
22339       // Match icmp(bitcast(vXi1 trunc(Y)),0) reduction patterns.
22340       // Match icmp(bitcast(vXi1 trunc(Y)),-1) reduction patterns.
22341       // Peek through truncation, mask the LSB and compare against zero/LSB.
22342       if (Src.getOpcode() == ISD::TRUNCATE) {
22343         SDValue Inner = Src.getOperand(0);
22344         EVT InnerVT = Inner.getValueType();
22345         if (llvm::has_single_bit<uint32_t>(InnerVT.getSizeInBits())) {
22346           unsigned BW = InnerVT.getScalarSizeInBits();
22347           APInt SrcMask = APInt(BW, 1);
22348           APInt Cmp = CmpNull ? APInt::getZero(BW) : SrcMask;
22349           return LowerVectorAllEqual(DL, Inner,
22350                                      DAG.getConstant(Cmp, DL, InnerVT), CC,
22351                                      SrcMask, Subtarget, DAG, X86CC);
22352         }
22353       }
22354     }
22355   }
22356 
22357   return SDValue();
22358 }
22359 
22360 /// return true if \c Op has a use that doesn't just read flags.
22361 static bool hasNonFlagsUse(SDValue Op) {
22362   for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
22363        ++UI) {
22364     SDNode *User = *UI;
22365     unsigned UOpNo = UI.getOperandNo();
22366     if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
22367       // Look pass truncate.
22368       UOpNo = User->use_begin().getOperandNo();
22369       User = *User->use_begin();
22370     }
22371 
22372     if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
22373         !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
22374       return true;
22375   }
22376   return false;
22377 }
22378 
22379 // Transform to an x86-specific ALU node with flags if there is a chance of
22380 // using an RMW op or only the flags are used. Otherwise, leave
22381 // the node alone and emit a 'cmp' or 'test' instruction.
22382 static bool isProfitableToUseFlagOp(SDValue Op) {
22383   for (SDNode *U : Op->uses())
22384     if (U->getOpcode() != ISD::CopyToReg &&
22385         U->getOpcode() != ISD::SETCC &&
22386         U->getOpcode() != ISD::STORE)
22387       return false;
22388 
22389   return true;
22390 }
22391 
22392 /// Emit nodes that will be selected as "test Op0,Op0", or something
22393 /// equivalent.
22394 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
22395                         SelectionDAG &DAG, const X86Subtarget &Subtarget) {
22396   // CF and OF aren't always set the way we want. Determine which
22397   // of these we need.
22398   bool NeedCF = false;
22399   bool NeedOF = false;
22400   switch (X86CC) {
22401   default: break;
22402   case X86::COND_A: case X86::COND_AE:
22403   case X86::COND_B: case X86::COND_BE:
22404     NeedCF = true;
22405     break;
22406   case X86::COND_G: case X86::COND_GE:
22407   case X86::COND_L: case X86::COND_LE:
22408   case X86::COND_O: case X86::COND_NO: {
22409     // Check if we really need to set the
22410     // Overflow flag. If NoSignedWrap is present
22411     // that is not actually needed.
22412     switch (Op->getOpcode()) {
22413     case ISD::ADD:
22414     case ISD::SUB:
22415     case ISD::MUL:
22416     case ISD::SHL:
22417       if (Op.getNode()->getFlags().hasNoSignedWrap())
22418         break;
22419       [[fallthrough]];
22420     default:
22421       NeedOF = true;
22422       break;
22423     }
22424     break;
22425   }
22426   }
22427   // See if we can use the EFLAGS value from the operand instead of
22428   // doing a separate TEST. TEST always sets OF and CF to 0, so unless
22429   // we prove that the arithmetic won't overflow, we can't use OF or CF.
22430   if (Op.getResNo() != 0 || NeedOF || NeedCF) {
22431     // Emit a CMP with 0, which is the TEST pattern.
22432     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
22433                        DAG.getConstant(0, dl, Op.getValueType()));
22434   }
22435   unsigned Opcode = 0;
22436   unsigned NumOperands = 0;
22437 
22438   SDValue ArithOp = Op;
22439 
22440   // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
22441   // which may be the result of a CAST.  We use the variable 'Op', which is the
22442   // non-casted variable when we check for possible users.
22443   switch (ArithOp.getOpcode()) {
22444   case ISD::AND:
22445     // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
22446     // because a TEST instruction will be better.
22447     if (!hasNonFlagsUse(Op))
22448       break;
22449 
22450     [[fallthrough]];
22451   case ISD::ADD:
22452   case ISD::SUB:
22453   case ISD::OR:
22454   case ISD::XOR:
22455     if (!isProfitableToUseFlagOp(Op))
22456       break;
22457 
22458     // Otherwise use a regular EFLAGS-setting instruction.
22459     switch (ArithOp.getOpcode()) {
22460     default: llvm_unreachable("unexpected operator!");
22461     case ISD::ADD: Opcode = X86ISD::ADD; break;
22462     case ISD::SUB: Opcode = X86ISD::SUB; break;
22463     case ISD::XOR: Opcode = X86ISD::XOR; break;
22464     case ISD::AND: Opcode = X86ISD::AND; break;
22465     case ISD::OR:  Opcode = X86ISD::OR;  break;
22466     }
22467 
22468     NumOperands = 2;
22469     break;
22470   case X86ISD::ADD:
22471   case X86ISD::SUB:
22472   case X86ISD::OR:
22473   case X86ISD::XOR:
22474   case X86ISD::AND:
22475     return SDValue(Op.getNode(), 1);
22476   case ISD::SSUBO:
22477   case ISD::USUBO: {
22478     // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
22479     SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22480     return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
22481                        Op->getOperand(1)).getValue(1);
22482   }
22483   default:
22484     break;
22485   }
22486 
22487   if (Opcode == 0) {
22488     // Emit a CMP with 0, which is the TEST pattern.
22489     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
22490                        DAG.getConstant(0, dl, Op.getValueType()));
22491   }
22492   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22493   SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
22494 
22495   SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
22496   DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
22497   return SDValue(New.getNode(), 1);
22498 }
22499 
22500 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
22501 /// equivalent.
22502 static SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
22503                        const SDLoc &dl, SelectionDAG &DAG,
22504                        const X86Subtarget &Subtarget) {
22505   if (isNullConstant(Op1))
22506     return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
22507 
22508   EVT CmpVT = Op0.getValueType();
22509 
22510   assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
22511           CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
22512 
22513   // Only promote the compare up to I32 if it is a 16 bit operation
22514   // with an immediate.  16 bit immediates are to be avoided.
22515   if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
22516       !DAG.getMachineFunction().getFunction().hasMinSize()) {
22517     ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
22518     ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
22519     // Don't do this if the immediate can fit in 8-bits.
22520     if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
22521         (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
22522       unsigned ExtendOp =
22523           isX86CCSigned(X86CC) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
22524       if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
22525         // For equality comparisons try to use SIGN_EXTEND if the input was
22526         // truncate from something with enough sign bits.
22527         if (Op0.getOpcode() == ISD::TRUNCATE) {
22528           if (DAG.ComputeMaxSignificantBits(Op0.getOperand(0)) <= 16)
22529             ExtendOp = ISD::SIGN_EXTEND;
22530         } else if (Op1.getOpcode() == ISD::TRUNCATE) {
22531           if (DAG.ComputeMaxSignificantBits(Op1.getOperand(0)) <= 16)
22532             ExtendOp = ISD::SIGN_EXTEND;
22533         }
22534       }
22535 
22536       CmpVT = MVT::i32;
22537       Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
22538       Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
22539     }
22540   }
22541 
22542   // Try to shrink i64 compares if the input has enough zero bits.
22543   // FIXME: Do this for non-constant compares for constant on LHS?
22544   if (CmpVT == MVT::i64 && isa<ConstantSDNode>(Op1) && !isX86CCSigned(X86CC) &&
22545       Op0.hasOneUse() && // Hacky way to not break CSE opportunities with sub.
22546       cast<ConstantSDNode>(Op1)->getAPIntValue().getActiveBits() <= 32 &&
22547       DAG.MaskedValueIsZero(Op0, APInt::getHighBitsSet(64, 32))) {
22548     CmpVT = MVT::i32;
22549     Op0 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op0);
22550     Op1 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op1);
22551   }
22552 
22553   // 0-x == y --> x+y == 0
22554   // 0-x != y --> x+y != 0
22555   if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op0.getOperand(0)) &&
22556       Op0.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
22557     SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
22558     SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(1), Op1);
22559     return Add.getValue(1);
22560   }
22561 
22562   // x == 0-y --> x+y == 0
22563   // x != 0-y --> x+y != 0
22564   if (Op1.getOpcode() == ISD::SUB && isNullConstant(Op1.getOperand(0)) &&
22565       Op1.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
22566     SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
22567     SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0, Op1.getOperand(1));
22568     return Add.getValue(1);
22569   }
22570 
22571   // Use SUB instead of CMP to enable CSE between SUB and CMP.
22572   SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
22573   SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
22574   return Sub.getValue(1);
22575 }
22576 
22577 bool X86TargetLowering::isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode Cond,
22578                                                           EVT VT) const {
22579   return !VT.isVector() || Cond != ISD::CondCode::SETEQ;
22580 }
22581 
22582 bool X86TargetLowering::optimizeFMulOrFDivAsShiftAddBitcast(
22583     SDNode *N, SDValue, SDValue IntPow2) const {
22584   if (N->getOpcode() == ISD::FDIV)
22585     return true;
22586 
22587   EVT FPVT = N->getValueType(0);
22588   EVT IntVT = IntPow2.getValueType();
22589 
22590   // This indicates a non-free bitcast.
22591   // TODO: This is probably overly conservative as we will need to scale the
22592   // integer vector anyways for the int->fp cast.
22593   if (FPVT.isVector() &&
22594       FPVT.getScalarSizeInBits() != IntVT.getScalarSizeInBits())
22595     return false;
22596 
22597   return true;
22598 }
22599 
22600 /// Check if replacement of SQRT with RSQRT should be disabled.
22601 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
22602   EVT VT = Op.getValueType();
22603 
22604   // We don't need to replace SQRT with RSQRT for half type.
22605   if (VT.getScalarType() == MVT::f16)
22606     return true;
22607 
22608   // We never want to use both SQRT and RSQRT instructions for the same input.
22609   if (DAG.doesNodeExist(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
22610     return false;
22611 
22612   if (VT.isVector())
22613     return Subtarget.hasFastVectorFSQRT();
22614   return Subtarget.hasFastScalarFSQRT();
22615 }
22616 
22617 /// The minimum architected relative accuracy is 2^-12. We need one
22618 /// Newton-Raphson step to have a good float result (24 bits of precision).
22619 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
22620                                            SelectionDAG &DAG, int Enabled,
22621                                            int &RefinementSteps,
22622                                            bool &UseOneConstNR,
22623                                            bool Reciprocal) const {
22624   SDLoc DL(Op);
22625   EVT VT = Op.getValueType();
22626 
22627   // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
22628   // It is likely not profitable to do this for f64 because a double-precision
22629   // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
22630   // instructions: convert to single, rsqrtss, convert back to double, refine
22631   // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
22632   // along with FMA, this could be a throughput win.
22633   // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
22634   // after legalize types.
22635   if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
22636       (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
22637       (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
22638       (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
22639       (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
22640     if (RefinementSteps == ReciprocalEstimate::Unspecified)
22641       RefinementSteps = 1;
22642 
22643     UseOneConstNR = false;
22644     // There is no FSQRT for 512-bits, but there is RSQRT14.
22645     unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
22646     SDValue Estimate = DAG.getNode(Opcode, DL, VT, Op);
22647     if (RefinementSteps == 0 && !Reciprocal)
22648       Estimate = DAG.getNode(ISD::FMUL, DL, VT, Op, Estimate);
22649     return Estimate;
22650   }
22651 
22652   if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
22653       Subtarget.hasFP16()) {
22654     assert(Reciprocal && "Don't replace SQRT with RSQRT for half type");
22655     if (RefinementSteps == ReciprocalEstimate::Unspecified)
22656       RefinementSteps = 0;
22657 
22658     if (VT == MVT::f16) {
22659       SDValue Zero = DAG.getIntPtrConstant(0, DL);
22660       SDValue Undef = DAG.getUNDEF(MVT::v8f16);
22661       Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
22662       Op = DAG.getNode(X86ISD::RSQRT14S, DL, MVT::v8f16, Undef, Op);
22663       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
22664     }
22665 
22666     return DAG.getNode(X86ISD::RSQRT14, DL, VT, Op);
22667   }
22668   return SDValue();
22669 }
22670 
22671 /// The minimum architected relative accuracy is 2^-12. We need one
22672 /// Newton-Raphson step to have a good float result (24 bits of precision).
22673 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
22674                                             int Enabled,
22675                                             int &RefinementSteps) const {
22676   SDLoc DL(Op);
22677   EVT VT = Op.getValueType();
22678 
22679   // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
22680   // It is likely not profitable to do this for f64 because a double-precision
22681   // reciprocal estimate with refinement on x86 prior to FMA requires
22682   // 15 instructions: convert to single, rcpss, convert back to double, refine
22683   // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
22684   // along with FMA, this could be a throughput win.
22685 
22686   if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
22687       (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
22688       (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
22689       (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
22690     // Enable estimate codegen with 1 refinement step for vector division.
22691     // Scalar division estimates are disabled because they break too much
22692     // real-world code. These defaults are intended to match GCC behavior.
22693     if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
22694       return SDValue();
22695 
22696     if (RefinementSteps == ReciprocalEstimate::Unspecified)
22697       RefinementSteps = 1;
22698 
22699     // There is no FSQRT for 512-bits, but there is RCP14.
22700     unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
22701     return DAG.getNode(Opcode, DL, VT, Op);
22702   }
22703 
22704   if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
22705       Subtarget.hasFP16()) {
22706     if (RefinementSteps == ReciprocalEstimate::Unspecified)
22707       RefinementSteps = 0;
22708 
22709     if (VT == MVT::f16) {
22710       SDValue Zero = DAG.getIntPtrConstant(0, DL);
22711       SDValue Undef = DAG.getUNDEF(MVT::v8f16);
22712       Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
22713       Op = DAG.getNode(X86ISD::RCP14S, DL, MVT::v8f16, Undef, Op);
22714       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
22715     }
22716 
22717     return DAG.getNode(X86ISD::RCP14, DL, VT, Op);
22718   }
22719   return SDValue();
22720 }
22721 
22722 /// If we have at least two divisions that use the same divisor, convert to
22723 /// multiplication by a reciprocal. This may need to be adjusted for a given
22724 /// CPU if a division's cost is not at least twice the cost of a multiplication.
22725 /// This is because we still need one division to calculate the reciprocal and
22726 /// then we need two multiplies by that reciprocal as replacements for the
22727 /// original divisions.
22728 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
22729   return 2;
22730 }
22731 
22732 SDValue
22733 X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
22734                                  SelectionDAG &DAG,
22735                                  SmallVectorImpl<SDNode *> &Created) const {
22736   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
22737   if (isIntDivCheap(N->getValueType(0), Attr))
22738     return SDValue(N,0); // Lower SDIV as SDIV
22739 
22740   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
22741          "Unexpected divisor!");
22742 
22743   // Only perform this transform if CMOV is supported otherwise the select
22744   // below will become a branch.
22745   if (!Subtarget.canUseCMOV())
22746     return SDValue();
22747 
22748   // fold (sdiv X, pow2)
22749   EVT VT = N->getValueType(0);
22750   // FIXME: Support i8.
22751   if (VT != MVT::i16 && VT != MVT::i32 &&
22752       !(Subtarget.is64Bit() && VT == MVT::i64))
22753     return SDValue();
22754 
22755   // If the divisor is 2 or -2, the default expansion is better.
22756   if (Divisor == 2 ||
22757       Divisor == APInt(Divisor.getBitWidth(), -2, /*isSigned*/ true))
22758     return SDValue();
22759 
22760   return TargetLowering::buildSDIVPow2WithCMov(N, Divisor, DAG, Created);
22761 }
22762 
22763 /// Result of 'and' is compared against zero. Change to a BT node if possible.
22764 /// Returns the BT node and the condition code needed to use it.
22765 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
22766                             SelectionDAG &DAG, X86::CondCode &X86CC) {
22767   assert(And.getOpcode() == ISD::AND && "Expected AND node!");
22768   SDValue Op0 = And.getOperand(0);
22769   SDValue Op1 = And.getOperand(1);
22770   if (Op0.getOpcode() == ISD::TRUNCATE)
22771     Op0 = Op0.getOperand(0);
22772   if (Op1.getOpcode() == ISD::TRUNCATE)
22773     Op1 = Op1.getOperand(0);
22774 
22775   SDValue Src, BitNo;
22776   if (Op1.getOpcode() == ISD::SHL)
22777     std::swap(Op0, Op1);
22778   if (Op0.getOpcode() == ISD::SHL) {
22779     if (isOneConstant(Op0.getOperand(0))) {
22780       // If we looked past a truncate, check that it's only truncating away
22781       // known zeros.
22782       unsigned BitWidth = Op0.getValueSizeInBits();
22783       unsigned AndBitWidth = And.getValueSizeInBits();
22784       if (BitWidth > AndBitWidth) {
22785         KnownBits Known = DAG.computeKnownBits(Op0);
22786         if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
22787           return SDValue();
22788       }
22789       Src = Op1;
22790       BitNo = Op0.getOperand(1);
22791     }
22792   } else if (Op1.getOpcode() == ISD::Constant) {
22793     ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
22794     uint64_t AndRHSVal = AndRHS->getZExtValue();
22795     SDValue AndLHS = Op0;
22796 
22797     if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
22798       Src = AndLHS.getOperand(0);
22799       BitNo = AndLHS.getOperand(1);
22800     } else {
22801       // Use BT if the immediate can't be encoded in a TEST instruction or we
22802       // are optimizing for size and the immedaite won't fit in a byte.
22803       bool OptForSize = DAG.shouldOptForSize();
22804       if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
22805           isPowerOf2_64(AndRHSVal)) {
22806         Src = AndLHS;
22807         BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
22808                                 Src.getValueType());
22809       }
22810     }
22811   }
22812 
22813   // No patterns found, give up.
22814   if (!Src.getNode())
22815     return SDValue();
22816 
22817   // Remove any bit flip.
22818   if (isBitwiseNot(Src)) {
22819     Src = Src.getOperand(0);
22820     CC = CC == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ;
22821   }
22822 
22823   // Attempt to create the X86ISD::BT node.
22824   if (SDValue BT = getBT(Src, BitNo, dl, DAG)) {
22825     X86CC = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
22826     return BT;
22827   }
22828 
22829   return SDValue();
22830 }
22831 
22832 // Check if pre-AVX condcode can be performed by a single FCMP op.
22833 static bool cheapX86FSETCC_SSE(ISD::CondCode SetCCOpcode) {
22834   return (SetCCOpcode != ISD::SETONE) && (SetCCOpcode != ISD::SETUEQ);
22835 }
22836 
22837 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
22838 /// CMPs.
22839 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
22840                                    SDValue &Op1, bool &IsAlwaysSignaling) {
22841   unsigned SSECC;
22842   bool Swap = false;
22843 
22844   // SSE Condition code mapping:
22845   //  0 - EQ
22846   //  1 - LT
22847   //  2 - LE
22848   //  3 - UNORD
22849   //  4 - NEQ
22850   //  5 - NLT
22851   //  6 - NLE
22852   //  7 - ORD
22853   switch (SetCCOpcode) {
22854   default: llvm_unreachable("Unexpected SETCC condition");
22855   case ISD::SETOEQ:
22856   case ISD::SETEQ:  SSECC = 0; break;
22857   case ISD::SETOGT:
22858   case ISD::SETGT:  Swap = true; [[fallthrough]];
22859   case ISD::SETLT:
22860   case ISD::SETOLT: SSECC = 1; break;
22861   case ISD::SETOGE:
22862   case ISD::SETGE:  Swap = true; [[fallthrough]];
22863   case ISD::SETLE:
22864   case ISD::SETOLE: SSECC = 2; break;
22865   case ISD::SETUO:  SSECC = 3; break;
22866   case ISD::SETUNE:
22867   case ISD::SETNE:  SSECC = 4; break;
22868   case ISD::SETULE: Swap = true; [[fallthrough]];
22869   case ISD::SETUGE: SSECC = 5; break;
22870   case ISD::SETULT: Swap = true; [[fallthrough]];
22871   case ISD::SETUGT: SSECC = 6; break;
22872   case ISD::SETO:   SSECC = 7; break;
22873   case ISD::SETUEQ: SSECC = 8; break;
22874   case ISD::SETONE: SSECC = 12; break;
22875   }
22876   if (Swap)
22877     std::swap(Op0, Op1);
22878 
22879   switch (SetCCOpcode) {
22880   default:
22881     IsAlwaysSignaling = true;
22882     break;
22883   case ISD::SETEQ:
22884   case ISD::SETOEQ:
22885   case ISD::SETUEQ:
22886   case ISD::SETNE:
22887   case ISD::SETONE:
22888   case ISD::SETUNE:
22889   case ISD::SETO:
22890   case ISD::SETUO:
22891     IsAlwaysSignaling = false;
22892     break;
22893   }
22894 
22895   return SSECC;
22896 }
22897 
22898 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
22899 /// concatenate the result back.
22900 static SDValue splitIntVSETCC(EVT VT, SDValue LHS, SDValue RHS,
22901                               ISD::CondCode Cond, SelectionDAG &DAG,
22902                               const SDLoc &dl) {
22903   assert(VT.isInteger() && VT == LHS.getValueType() &&
22904          VT == RHS.getValueType() && "Unsupported VTs!");
22905 
22906   SDValue CC = DAG.getCondCode(Cond);
22907 
22908   // Extract the LHS Lo/Hi vectors
22909   SDValue LHS1, LHS2;
22910   std::tie(LHS1, LHS2) = splitVector(LHS, DAG, dl);
22911 
22912   // Extract the RHS Lo/Hi vectors
22913   SDValue RHS1, RHS2;
22914   std::tie(RHS1, RHS2) = splitVector(RHS, DAG, dl);
22915 
22916   // Issue the operation on the smaller types and concatenate the result back
22917   EVT LoVT, HiVT;
22918   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
22919   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
22920                      DAG.getNode(ISD::SETCC, dl, LoVT, LHS1, RHS1, CC),
22921                      DAG.getNode(ISD::SETCC, dl, HiVT, LHS2, RHS2, CC));
22922 }
22923 
22924 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
22925 
22926   SDValue Op0 = Op.getOperand(0);
22927   SDValue Op1 = Op.getOperand(1);
22928   SDValue CC = Op.getOperand(2);
22929   MVT VT = Op.getSimpleValueType();
22930   SDLoc dl(Op);
22931 
22932   assert(VT.getVectorElementType() == MVT::i1 &&
22933          "Cannot set masked compare for this operation");
22934 
22935   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
22936 
22937   // Prefer SETGT over SETLT.
22938   if (SetCCOpcode == ISD::SETLT) {
22939     SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
22940     std::swap(Op0, Op1);
22941   }
22942 
22943   return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
22944 }
22945 
22946 /// Given a buildvector constant, return a new vector constant with each element
22947 /// incremented or decremented. If incrementing or decrementing would result in
22948 /// unsigned overflow or underflow or this is not a simple vector constant,
22949 /// return an empty value.
22950 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc,
22951                                     bool NSW) {
22952   auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
22953   if (!BV || !V.getValueType().isSimple())
22954     return SDValue();
22955 
22956   MVT VT = V.getSimpleValueType();
22957   MVT EltVT = VT.getVectorElementType();
22958   unsigned NumElts = VT.getVectorNumElements();
22959   SmallVector<SDValue, 8> NewVecC;
22960   SDLoc DL(V);
22961   for (unsigned i = 0; i < NumElts; ++i) {
22962     auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
22963     if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
22964       return SDValue();
22965 
22966     // Avoid overflow/underflow.
22967     const APInt &EltC = Elt->getAPIntValue();
22968     if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isZero()))
22969       return SDValue();
22970     if (NSW && ((IsInc && EltC.isMaxSignedValue()) ||
22971                 (!IsInc && EltC.isMinSignedValue())))
22972       return SDValue();
22973 
22974     NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
22975   }
22976 
22977   return DAG.getBuildVector(VT, DL, NewVecC);
22978 }
22979 
22980 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
22981 /// Op0 u<= Op1:
22982 ///   t = psubus Op0, Op1
22983 ///   pcmpeq t, <0..0>
22984 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
22985                                     ISD::CondCode Cond, const SDLoc &dl,
22986                                     const X86Subtarget &Subtarget,
22987                                     SelectionDAG &DAG) {
22988   if (!Subtarget.hasSSE2())
22989     return SDValue();
22990 
22991   MVT VET = VT.getVectorElementType();
22992   if (VET != MVT::i8 && VET != MVT::i16)
22993     return SDValue();
22994 
22995   switch (Cond) {
22996   default:
22997     return SDValue();
22998   case ISD::SETULT: {
22999     // If the comparison is against a constant we can turn this into a
23000     // setule.  With psubus, setule does not require a swap.  This is
23001     // beneficial because the constant in the register is no longer
23002     // destructed as the destination so it can be hoisted out of a loop.
23003     // Only do this pre-AVX since vpcmp* is no longer destructive.
23004     if (Subtarget.hasAVX())
23005       return SDValue();
23006     SDValue ULEOp1 =
23007         incDecVectorConstant(Op1, DAG, /*IsInc*/ false, /*NSW*/ false);
23008     if (!ULEOp1)
23009       return SDValue();
23010     Op1 = ULEOp1;
23011     break;
23012   }
23013   case ISD::SETUGT: {
23014     // If the comparison is against a constant, we can turn this into a setuge.
23015     // This is beneficial because materializing a constant 0 for the PCMPEQ is
23016     // probably cheaper than XOR+PCMPGT using 2 different vector constants:
23017     // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
23018     SDValue UGEOp1 =
23019         incDecVectorConstant(Op1, DAG, /*IsInc*/ true, /*NSW*/ false);
23020     if (!UGEOp1)
23021       return SDValue();
23022     Op1 = Op0;
23023     Op0 = UGEOp1;
23024     break;
23025   }
23026   // Psubus is better than flip-sign because it requires no inversion.
23027   case ISD::SETUGE:
23028     std::swap(Op0, Op1);
23029     break;
23030   case ISD::SETULE:
23031     break;
23032   }
23033 
23034   SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
23035   return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
23036                      DAG.getConstant(0, dl, VT));
23037 }
23038 
23039 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
23040                            SelectionDAG &DAG) {
23041   bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
23042                   Op.getOpcode() == ISD::STRICT_FSETCCS;
23043   SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
23044   SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
23045   SDValue CC = Op.getOperand(IsStrict ? 3 : 2);
23046   MVT VT = Op->getSimpleValueType(0);
23047   ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
23048   bool isFP = Op1.getSimpleValueType().isFloatingPoint();
23049   SDLoc dl(Op);
23050 
23051   if (isFP) {
23052     MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
23053     assert(EltVT == MVT::f16 || EltVT == MVT::f32 || EltVT == MVT::f64);
23054     if (isSoftF16(EltVT, Subtarget))
23055       return SDValue();
23056 
23057     bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
23058     SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
23059 
23060     // If we have a strict compare with a vXi1 result and the input is 128/256
23061     // bits we can't use a masked compare unless we have VLX. If we use a wider
23062     // compare like we do for non-strict, we might trigger spurious exceptions
23063     // from the upper elements. Instead emit a AVX compare and convert to mask.
23064     unsigned Opc;
23065     if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1 &&
23066         (!IsStrict || Subtarget.hasVLX() ||
23067          Op0.getSimpleValueType().is512BitVector())) {
23068 #ifndef NDEBUG
23069       unsigned Num = VT.getVectorNumElements();
23070       assert(Num <= 16 || (Num == 32 && EltVT == MVT::f16));
23071 #endif
23072       Opc = IsStrict ? X86ISD::STRICT_CMPM : X86ISD::CMPM;
23073     } else {
23074       Opc = IsStrict ? X86ISD::STRICT_CMPP : X86ISD::CMPP;
23075       // The SSE/AVX packed FP comparison nodes are defined with a
23076       // floating-point vector result that matches the operand type. This allows
23077       // them to work with an SSE1 target (integer vector types are not legal).
23078       VT = Op0.getSimpleValueType();
23079     }
23080 
23081     SDValue Cmp;
23082     bool IsAlwaysSignaling;
23083     unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1, IsAlwaysSignaling);
23084     if (!Subtarget.hasAVX()) {
23085       // TODO: We could use following steps to handle a quiet compare with
23086       // signaling encodings.
23087       // 1. Get ordered masks from a quiet ISD::SETO
23088       // 2. Use the masks to mask potential unordered elements in operand A, B
23089       // 3. Get the compare results of masked A, B
23090       // 4. Calculating final result using the mask and result from 3
23091       // But currently, we just fall back to scalar operations.
23092       if (IsStrict && IsAlwaysSignaling && !IsSignaling)
23093         return SDValue();
23094 
23095       // Insert an extra signaling instruction to raise exception.
23096       if (IsStrict && !IsAlwaysSignaling && IsSignaling) {
23097         SDValue SignalCmp = DAG.getNode(
23098             Opc, dl, {VT, MVT::Other},
23099             {Chain, Op0, Op1, DAG.getTargetConstant(1, dl, MVT::i8)}); // LT_OS
23100         // FIXME: It seems we need to update the flags of all new strict nodes.
23101         // Otherwise, mayRaiseFPException in MI will return false due to
23102         // NoFPExcept = false by default. However, I didn't find it in other
23103         // patches.
23104         SignalCmp->setFlags(Op->getFlags());
23105         Chain = SignalCmp.getValue(1);
23106       }
23107 
23108       // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
23109       // emit two comparisons and a logic op to tie them together.
23110       if (!cheapX86FSETCC_SSE(Cond)) {
23111         // LLVM predicate is SETUEQ or SETONE.
23112         unsigned CC0, CC1;
23113         unsigned CombineOpc;
23114         if (Cond == ISD::SETUEQ) {
23115           CC0 = 3; // UNORD
23116           CC1 = 0; // EQ
23117           CombineOpc = X86ISD::FOR;
23118         } else {
23119           assert(Cond == ISD::SETONE);
23120           CC0 = 7; // ORD
23121           CC1 = 4; // NEQ
23122           CombineOpc = X86ISD::FAND;
23123         }
23124 
23125         SDValue Cmp0, Cmp1;
23126         if (IsStrict) {
23127           Cmp0 = DAG.getNode(
23128               Opc, dl, {VT, MVT::Other},
23129               {Chain, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8)});
23130           Cmp1 = DAG.getNode(
23131               Opc, dl, {VT, MVT::Other},
23132               {Chain, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8)});
23133           Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Cmp0.getValue(1),
23134                               Cmp1.getValue(1));
23135         } else {
23136           Cmp0 = DAG.getNode(
23137               Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8));
23138           Cmp1 = DAG.getNode(
23139               Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8));
23140         }
23141         Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
23142       } else {
23143         if (IsStrict) {
23144           Cmp = DAG.getNode(
23145               Opc, dl, {VT, MVT::Other},
23146               {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
23147           Chain = Cmp.getValue(1);
23148         } else
23149           Cmp = DAG.getNode(
23150               Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
23151       }
23152     } else {
23153       // Handle all other FP comparisons here.
23154       if (IsStrict) {
23155         // Make a flip on already signaling CCs before setting bit 4 of AVX CC.
23156         SSECC |= (IsAlwaysSignaling ^ IsSignaling) << 4;
23157         Cmp = DAG.getNode(
23158             Opc, dl, {VT, MVT::Other},
23159             {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
23160         Chain = Cmp.getValue(1);
23161       } else
23162         Cmp = DAG.getNode(
23163             Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
23164     }
23165 
23166     if (VT.getFixedSizeInBits() >
23167         Op.getSimpleValueType().getFixedSizeInBits()) {
23168       // We emitted a compare with an XMM/YMM result. Finish converting to a
23169       // mask register using a vptestm.
23170       EVT CastVT = EVT(VT).changeVectorElementTypeToInteger();
23171       Cmp = DAG.getBitcast(CastVT, Cmp);
23172       Cmp = DAG.getSetCC(dl, Op.getSimpleValueType(), Cmp,
23173                          DAG.getConstant(0, dl, CastVT), ISD::SETNE);
23174     } else {
23175       // If this is SSE/AVX CMPP, bitcast the result back to integer to match
23176       // the result type of SETCC. The bitcast is expected to be optimized
23177       // away during combining/isel.
23178       Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
23179     }
23180 
23181     if (IsStrict)
23182       return DAG.getMergeValues({Cmp, Chain}, dl);
23183 
23184     return Cmp;
23185   }
23186 
23187   assert(!IsStrict && "Strict SETCC only handles FP operands.");
23188 
23189   MVT VTOp0 = Op0.getSimpleValueType();
23190   (void)VTOp0;
23191   assert(VTOp0 == Op1.getSimpleValueType() &&
23192          "Expected operands with same type!");
23193   assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
23194          "Invalid number of packed elements for source and destination!");
23195 
23196   // The non-AVX512 code below works under the assumption that source and
23197   // destination types are the same.
23198   assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
23199          "Value types for source and destination must be the same!");
23200 
23201   // The result is boolean, but operands are int/float
23202   if (VT.getVectorElementType() == MVT::i1) {
23203     // In AVX-512 architecture setcc returns mask with i1 elements,
23204     // But there is no compare instruction for i8 and i16 elements in KNL.
23205     assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
23206            "Unexpected operand type");
23207     return LowerIntVSETCC_AVX512(Op, DAG);
23208   }
23209 
23210   // Lower using XOP integer comparisons.
23211   if (VT.is128BitVector() && Subtarget.hasXOP()) {
23212     // Translate compare code to XOP PCOM compare mode.
23213     unsigned CmpMode = 0;
23214     switch (Cond) {
23215     default: llvm_unreachable("Unexpected SETCC condition");
23216     case ISD::SETULT:
23217     case ISD::SETLT: CmpMode = 0x00; break;
23218     case ISD::SETULE:
23219     case ISD::SETLE: CmpMode = 0x01; break;
23220     case ISD::SETUGT:
23221     case ISD::SETGT: CmpMode = 0x02; break;
23222     case ISD::SETUGE:
23223     case ISD::SETGE: CmpMode = 0x03; break;
23224     case ISD::SETEQ: CmpMode = 0x04; break;
23225     case ISD::SETNE: CmpMode = 0x05; break;
23226     }
23227 
23228     // Are we comparing unsigned or signed integers?
23229     unsigned Opc =
23230         ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
23231 
23232     return DAG.getNode(Opc, dl, VT, Op0, Op1,
23233                        DAG.getTargetConstant(CmpMode, dl, MVT::i8));
23234   }
23235 
23236   // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
23237   // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
23238   if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
23239     SDValue BC0 = peekThroughBitcasts(Op0);
23240     if (BC0.getOpcode() == ISD::AND) {
23241       APInt UndefElts;
23242       SmallVector<APInt, 64> EltBits;
23243       if (getTargetConstantBitsFromNode(BC0.getOperand(1),
23244                                         VT.getScalarSizeInBits(), UndefElts,
23245                                         EltBits, false, false)) {
23246         if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
23247           Cond = ISD::SETEQ;
23248           Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
23249         }
23250       }
23251     }
23252   }
23253 
23254   // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
23255   if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
23256       Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
23257     ConstantSDNode *C1 = isConstOrConstSplat(Op1);
23258     if (C1 && C1->getAPIntValue().isPowerOf2()) {
23259       unsigned BitWidth = VT.getScalarSizeInBits();
23260       unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
23261 
23262       SDValue Result = Op0.getOperand(0);
23263       Result = DAG.getNode(ISD::SHL, dl, VT, Result,
23264                            DAG.getConstant(ShiftAmt, dl, VT));
23265       Result = DAG.getNode(ISD::SRA, dl, VT, Result,
23266                            DAG.getConstant(BitWidth - 1, dl, VT));
23267       return Result;
23268     }
23269   }
23270 
23271   // Break 256-bit integer vector compare into smaller ones.
23272   if (VT.is256BitVector() && !Subtarget.hasInt256())
23273     return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
23274 
23275   // Break 512-bit integer vector compare into smaller ones.
23276   // TODO: Try harder to use VPCMPx + VPMOV2x?
23277   if (VT.is512BitVector())
23278     return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
23279 
23280   // If we have a limit constant, try to form PCMPGT (signed cmp) to avoid
23281   // not-of-PCMPEQ:
23282   // X != INT_MIN --> X >s INT_MIN
23283   // X != INT_MAX --> X <s INT_MAX --> INT_MAX >s X
23284   // +X != 0 --> +X >s 0
23285   APInt ConstValue;
23286   if (Cond == ISD::SETNE &&
23287       ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
23288     if (ConstValue.isMinSignedValue())
23289       Cond = ISD::SETGT;
23290     else if (ConstValue.isMaxSignedValue())
23291       Cond = ISD::SETLT;
23292     else if (ConstValue.isZero() && DAG.SignBitIsZero(Op0))
23293       Cond = ISD::SETGT;
23294   }
23295 
23296   // If both operands are known non-negative, then an unsigned compare is the
23297   // same as a signed compare and there's no need to flip signbits.
23298   // TODO: We could check for more general simplifications here since we're
23299   // computing known bits.
23300   bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
23301                    !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
23302 
23303   // Special case: Use min/max operations for unsigned compares.
23304   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23305   if (ISD::isUnsignedIntSetCC(Cond) &&
23306       (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
23307       TLI.isOperationLegal(ISD::UMIN, VT)) {
23308     // If we have a constant operand, increment/decrement it and change the
23309     // condition to avoid an invert.
23310     if (Cond == ISD::SETUGT) {
23311       // X > C --> X >= (C+1) --> X == umax(X, C+1)
23312       if (SDValue UGTOp1 =
23313               incDecVectorConstant(Op1, DAG, /*IsInc*/ true, /*NSW*/ false)) {
23314         Op1 = UGTOp1;
23315         Cond = ISD::SETUGE;
23316       }
23317     }
23318     if (Cond == ISD::SETULT) {
23319       // X < C --> X <= (C-1) --> X == umin(X, C-1)
23320       if (SDValue ULTOp1 =
23321               incDecVectorConstant(Op1, DAG, /*IsInc*/ false, /*NSW*/ false)) {
23322         Op1 = ULTOp1;
23323         Cond = ISD::SETULE;
23324       }
23325     }
23326     bool Invert = false;
23327     unsigned Opc;
23328     switch (Cond) {
23329     default: llvm_unreachable("Unexpected condition code");
23330     case ISD::SETUGT: Invert = true; [[fallthrough]];
23331     case ISD::SETULE: Opc = ISD::UMIN; break;
23332     case ISD::SETULT: Invert = true; [[fallthrough]];
23333     case ISD::SETUGE: Opc = ISD::UMAX; break;
23334     }
23335 
23336     SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
23337     Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
23338 
23339     // If the logical-not of the result is required, perform that now.
23340     if (Invert)
23341       Result = DAG.getNOT(dl, Result, VT);
23342 
23343     return Result;
23344   }
23345 
23346   // Try to use SUBUS and PCMPEQ.
23347   if (FlipSigns)
23348     if (SDValue V =
23349             LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
23350       return V;
23351 
23352   // We are handling one of the integer comparisons here. Since SSE only has
23353   // GT and EQ comparisons for integer, swapping operands and multiple
23354   // operations may be required for some comparisons.
23355   unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
23356                                                             : X86ISD::PCMPGT;
23357   bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
23358               Cond == ISD::SETGE || Cond == ISD::SETUGE;
23359   bool Invert = Cond == ISD::SETNE ||
23360                 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
23361 
23362   if (Swap)
23363     std::swap(Op0, Op1);
23364 
23365   // Check that the operation in question is available (most are plain SSE2,
23366   // but PCMPGTQ and PCMPEQQ have different requirements).
23367   if (VT == MVT::v2i64) {
23368     if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
23369       assert(Subtarget.hasSSE2() && "Don't know how to lower!");
23370 
23371       // Special case for sign bit test. We can use a v4i32 PCMPGT and shuffle
23372       // the odd elements over the even elements.
23373       if (!FlipSigns && !Invert && ISD::isBuildVectorAllZeros(Op0.getNode())) {
23374         Op0 = DAG.getConstant(0, dl, MVT::v4i32);
23375         Op1 = DAG.getBitcast(MVT::v4i32, Op1);
23376 
23377         SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
23378         static const int MaskHi[] = { 1, 1, 3, 3 };
23379         SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
23380 
23381         return DAG.getBitcast(VT, Result);
23382       }
23383 
23384       if (!FlipSigns && !Invert && ISD::isBuildVectorAllOnes(Op1.getNode())) {
23385         Op0 = DAG.getBitcast(MVT::v4i32, Op0);
23386         Op1 = DAG.getConstant(-1, dl, MVT::v4i32);
23387 
23388         SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
23389         static const int MaskHi[] = { 1, 1, 3, 3 };
23390         SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
23391 
23392         return DAG.getBitcast(VT, Result);
23393       }
23394 
23395       // Since SSE has no unsigned integer comparisons, we need to flip the sign
23396       // bits of the inputs before performing those operations. The lower
23397       // compare is always unsigned.
23398       SDValue SB = DAG.getConstant(FlipSigns ? 0x8000000080000000ULL
23399                                              : 0x0000000080000000ULL,
23400                                    dl, MVT::v2i64);
23401 
23402       Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
23403       Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
23404 
23405       // Cast everything to the right type.
23406       Op0 = DAG.getBitcast(MVT::v4i32, Op0);
23407       Op1 = DAG.getBitcast(MVT::v4i32, Op1);
23408 
23409       // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
23410       SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
23411       SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
23412 
23413       // Create masks for only the low parts/high parts of the 64 bit integers.
23414       static const int MaskHi[] = { 1, 1, 3, 3 };
23415       static const int MaskLo[] = { 0, 0, 2, 2 };
23416       SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
23417       SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
23418       SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
23419 
23420       SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
23421       Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
23422 
23423       if (Invert)
23424         Result = DAG.getNOT(dl, Result, MVT::v4i32);
23425 
23426       return DAG.getBitcast(VT, Result);
23427     }
23428 
23429     if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
23430       // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
23431       // pcmpeqd + pshufd + pand.
23432       assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
23433 
23434       // First cast everything to the right type.
23435       Op0 = DAG.getBitcast(MVT::v4i32, Op0);
23436       Op1 = DAG.getBitcast(MVT::v4i32, Op1);
23437 
23438       // Do the compare.
23439       SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
23440 
23441       // Make sure the lower and upper halves are both all-ones.
23442       static const int Mask[] = { 1, 0, 3, 2 };
23443       SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
23444       Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
23445 
23446       if (Invert)
23447         Result = DAG.getNOT(dl, Result, MVT::v4i32);
23448 
23449       return DAG.getBitcast(VT, Result);
23450     }
23451   }
23452 
23453   // Since SSE has no unsigned integer comparisons, we need to flip the sign
23454   // bits of the inputs before performing those operations.
23455   if (FlipSigns) {
23456     MVT EltVT = VT.getVectorElementType();
23457     SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
23458                                  VT);
23459     Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
23460     Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
23461   }
23462 
23463   SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
23464 
23465   // If the logical-not of the result is required, perform that now.
23466   if (Invert)
23467     Result = DAG.getNOT(dl, Result, VT);
23468 
23469   return Result;
23470 }
23471 
23472 // Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
23473 static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
23474                               const SDLoc &dl, SelectionDAG &DAG,
23475                               const X86Subtarget &Subtarget,
23476                               SDValue &X86CC) {
23477   assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
23478 
23479   // Must be a bitcast from vXi1.
23480   if (Op0.getOpcode() != ISD::BITCAST)
23481     return SDValue();
23482 
23483   Op0 = Op0.getOperand(0);
23484   MVT VT = Op0.getSimpleValueType();
23485   if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
23486       !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
23487       !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
23488     return SDValue();
23489 
23490   X86::CondCode X86Cond;
23491   if (isNullConstant(Op1)) {
23492     X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
23493   } else if (isAllOnesConstant(Op1)) {
23494     // C flag is set for all ones.
23495     X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
23496   } else
23497     return SDValue();
23498 
23499   // If the input is an AND, we can combine it's operands into the KTEST.
23500   bool KTestable = false;
23501   if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
23502     KTestable = true;
23503   if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
23504     KTestable = true;
23505   if (!isNullConstant(Op1))
23506     KTestable = false;
23507   if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
23508     SDValue LHS = Op0.getOperand(0);
23509     SDValue RHS = Op0.getOperand(1);
23510     X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23511     return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
23512   }
23513 
23514   // If the input is an OR, we can combine it's operands into the KORTEST.
23515   SDValue LHS = Op0;
23516   SDValue RHS = Op0;
23517   if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
23518     LHS = Op0.getOperand(0);
23519     RHS = Op0.getOperand(1);
23520   }
23521 
23522   X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23523   return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
23524 }
23525 
23526 /// Emit flags for the given setcc condition and operands. Also returns the
23527 /// corresponding X86 condition code constant in X86CC.
23528 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
23529                                              ISD::CondCode CC, const SDLoc &dl,
23530                                              SelectionDAG &DAG,
23531                                              SDValue &X86CC) const {
23532   // Equality Combines.
23533   if (CC == ISD::SETEQ || CC == ISD::SETNE) {
23534     X86::CondCode X86CondCode;
23535 
23536     // Optimize to BT if possible.
23537     // Lower (X & (1 << N)) == 0 to BT(X, N).
23538     // Lower ((X >>u N) & 1) != 0 to BT(X, N).
23539     // Lower ((X >>s N) & 1) != 0 to BT(X, N).
23540     if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1)) {
23541       if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CondCode)) {
23542         X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
23543         return BT;
23544       }
23545     }
23546 
23547     // Try to use PTEST/PMOVMSKB for a tree AND/ORs equality compared with -1/0.
23548     if (SDValue CmpZ = MatchVectorAllEqualTest(Op0, Op1, CC, dl, Subtarget, DAG,
23549                                                X86CondCode)) {
23550       X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
23551       return CmpZ;
23552     }
23553 
23554     // Try to lower using KORTEST or KTEST.
23555     if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
23556       return Test;
23557 
23558     // Look for X == 0, X == 1, X != 0, or X != 1.  We can simplify some forms
23559     // of these.
23560     if (isOneConstant(Op1) || isNullConstant(Op1)) {
23561       // If the input is a setcc, then reuse the input setcc or use a new one
23562       // with the inverted condition.
23563       if (Op0.getOpcode() == X86ISD::SETCC) {
23564         bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
23565 
23566         X86CC = Op0.getOperand(0);
23567         if (Invert) {
23568           X86CondCode = (X86::CondCode)Op0.getConstantOperandVal(0);
23569           X86CondCode = X86::GetOppositeBranchCondition(X86CondCode);
23570           X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
23571         }
23572 
23573         return Op0.getOperand(1);
23574       }
23575     }
23576 
23577     // Try to use the carry flag from the add in place of an separate CMP for:
23578     // (seteq (add X, -1), -1). Similar for setne.
23579     if (isAllOnesConstant(Op1) && Op0.getOpcode() == ISD::ADD &&
23580         Op0.getOperand(1) == Op1) {
23581       if (isProfitableToUseFlagOp(Op0)) {
23582         SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
23583 
23584         SDValue New = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(0),
23585                                   Op0.getOperand(1));
23586         DAG.ReplaceAllUsesOfValueWith(SDValue(Op0.getNode(), 0), New);
23587         X86CondCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
23588         X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
23589         return SDValue(New.getNode(), 1);
23590       }
23591     }
23592   }
23593 
23594   X86::CondCode CondCode =
23595       TranslateX86CC(CC, dl, /*IsFP*/ false, Op0, Op1, DAG);
23596   assert(CondCode != X86::COND_INVALID && "Unexpected condition code!");
23597 
23598   SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG, Subtarget);
23599   X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
23600   return EFLAGS;
23601 }
23602 
23603 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
23604 
23605   bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
23606                   Op.getOpcode() == ISD::STRICT_FSETCCS;
23607   MVT VT = Op->getSimpleValueType(0);
23608 
23609   if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
23610 
23611   assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
23612   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
23613   SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
23614   SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
23615   SDLoc dl(Op);
23616   ISD::CondCode CC =
23617       cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
23618 
23619   if (isSoftF16(Op0.getValueType(), Subtarget))
23620     return SDValue();
23621 
23622   // Handle f128 first, since one possible outcome is a normal integer
23623   // comparison which gets handled by emitFlagsForSetcc.
23624   if (Op0.getValueType() == MVT::f128) {
23625     softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1, Chain,
23626                         Op.getOpcode() == ISD::STRICT_FSETCCS);
23627 
23628     // If softenSetCCOperands returned a scalar, use it.
23629     if (!Op1.getNode()) {
23630       assert(Op0.getValueType() == Op.getValueType() &&
23631              "Unexpected setcc expansion!");
23632       if (IsStrict)
23633         return DAG.getMergeValues({Op0, Chain}, dl);
23634       return Op0;
23635     }
23636   }
23637 
23638   if (Op0.getSimpleValueType().isInteger()) {
23639     // Attempt to canonicalize SGT/UGT -> SGE/UGE compares with constant which
23640     // reduces the number of EFLAGs bit reads (the GE conditions don't read ZF),
23641     // this may translate to less uops depending on uarch implementation. The
23642     // equivalent for SLE/ULE -> SLT/ULT isn't likely to happen as we already
23643     // canonicalize to that CondCode.
23644     // NOTE: Only do this if incrementing the constant doesn't increase the bit
23645     // encoding size - so it must either already be a i8 or i32 immediate, or it
23646     // shrinks down to that. We don't do this for any i64's to avoid additional
23647     // constant materializations.
23648     // TODO: Can we move this to TranslateX86CC to handle jumps/branches too?
23649     if (auto *Op1C = dyn_cast<ConstantSDNode>(Op1)) {
23650       const APInt &Op1Val = Op1C->getAPIntValue();
23651       if (!Op1Val.isZero()) {
23652         // Ensure the constant+1 doesn't overflow.
23653         if ((CC == ISD::CondCode::SETGT && !Op1Val.isMaxSignedValue()) ||
23654             (CC == ISD::CondCode::SETUGT && !Op1Val.isMaxValue())) {
23655           APInt Op1ValPlusOne = Op1Val + 1;
23656           if (Op1ValPlusOne.isSignedIntN(32) &&
23657               (!Op1Val.isSignedIntN(8) || Op1ValPlusOne.isSignedIntN(8))) {
23658             Op1 = DAG.getConstant(Op1ValPlusOne, dl, Op0.getValueType());
23659             CC = CC == ISD::CondCode::SETGT ? ISD::CondCode::SETGE
23660                                             : ISD::CondCode::SETUGE;
23661           }
23662         }
23663       }
23664     }
23665 
23666     SDValue X86CC;
23667     SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
23668     SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
23669     return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
23670   }
23671 
23672   // Handle floating point.
23673   X86::CondCode CondCode = TranslateX86CC(CC, dl, /*IsFP*/ true, Op0, Op1, DAG);
23674   if (CondCode == X86::COND_INVALID)
23675     return SDValue();
23676 
23677   SDValue EFLAGS;
23678   if (IsStrict) {
23679     bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
23680     EFLAGS =
23681         DAG.getNode(IsSignaling ? X86ISD::STRICT_FCMPS : X86ISD::STRICT_FCMP,
23682                     dl, {MVT::i32, MVT::Other}, {Chain, Op0, Op1});
23683     Chain = EFLAGS.getValue(1);
23684   } else {
23685     EFLAGS = DAG.getNode(X86ISD::FCMP, dl, MVT::i32, Op0, Op1);
23686   }
23687 
23688   SDValue X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
23689   SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
23690   return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
23691 }
23692 
23693 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
23694   SDValue LHS = Op.getOperand(0);
23695   SDValue RHS = Op.getOperand(1);
23696   SDValue Carry = Op.getOperand(2);
23697   SDValue Cond = Op.getOperand(3);
23698   SDLoc DL(Op);
23699 
23700   assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
23701   X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
23702 
23703   // Recreate the carry if needed.
23704   EVT CarryVT = Carry.getValueType();
23705   Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
23706                       Carry, DAG.getAllOnesConstant(DL, CarryVT));
23707 
23708   SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
23709   SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
23710   return getSETCC(CC, Cmp.getValue(1), DL, DAG);
23711 }
23712 
23713 // This function returns three things: the arithmetic computation itself
23714 // (Value), an EFLAGS result (Overflow), and a condition code (Cond).  The
23715 // flag and the condition code define the case in which the arithmetic
23716 // computation overflows.
23717 static std::pair<SDValue, SDValue>
23718 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
23719   assert(Op.getResNo() == 0 && "Unexpected result number!");
23720   SDValue Value, Overflow;
23721   SDValue LHS = Op.getOperand(0);
23722   SDValue RHS = Op.getOperand(1);
23723   unsigned BaseOp = 0;
23724   SDLoc DL(Op);
23725   switch (Op.getOpcode()) {
23726   default: llvm_unreachable("Unknown ovf instruction!");
23727   case ISD::SADDO:
23728     BaseOp = X86ISD::ADD;
23729     Cond = X86::COND_O;
23730     break;
23731   case ISD::UADDO:
23732     BaseOp = X86ISD::ADD;
23733     Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
23734     break;
23735   case ISD::SSUBO:
23736     BaseOp = X86ISD::SUB;
23737     Cond = X86::COND_O;
23738     break;
23739   case ISD::USUBO:
23740     BaseOp = X86ISD::SUB;
23741     Cond = X86::COND_B;
23742     break;
23743   case ISD::SMULO:
23744     BaseOp = X86ISD::SMUL;
23745     Cond = X86::COND_O;
23746     break;
23747   case ISD::UMULO:
23748     BaseOp = X86ISD::UMUL;
23749     Cond = X86::COND_O;
23750     break;
23751   }
23752 
23753   if (BaseOp) {
23754     // Also sets EFLAGS.
23755     SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
23756     Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
23757     Overflow = Value.getValue(1);
23758   }
23759 
23760   return std::make_pair(Value, Overflow);
23761 }
23762 
23763 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
23764   // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
23765   // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
23766   // looks for this combo and may remove the "setcc" instruction if the "setcc"
23767   // has only one use.
23768   SDLoc DL(Op);
23769   X86::CondCode Cond;
23770   SDValue Value, Overflow;
23771   std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
23772 
23773   SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
23774   assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
23775   return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
23776 }
23777 
23778 /// Return true if opcode is a X86 logical comparison.
23779 static bool isX86LogicalCmp(SDValue Op) {
23780   unsigned Opc = Op.getOpcode();
23781   if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
23782       Opc == X86ISD::FCMP)
23783     return true;
23784   if (Op.getResNo() == 1 &&
23785       (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
23786        Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
23787        Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
23788     return true;
23789 
23790   return false;
23791 }
23792 
23793 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
23794   if (V.getOpcode() != ISD::TRUNCATE)
23795     return false;
23796 
23797   SDValue VOp0 = V.getOperand(0);
23798   unsigned InBits = VOp0.getValueSizeInBits();
23799   unsigned Bits = V.getValueSizeInBits();
23800   return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
23801 }
23802 
23803 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
23804   bool AddTest = true;
23805   SDValue Cond  = Op.getOperand(0);
23806   SDValue Op1 = Op.getOperand(1);
23807   SDValue Op2 = Op.getOperand(2);
23808   SDLoc DL(Op);
23809   MVT VT = Op1.getSimpleValueType();
23810   SDValue CC;
23811 
23812   if (isSoftF16(VT, Subtarget)) {
23813     MVT NVT = VT.changeTypeToInteger();
23814     return DAG.getBitcast(VT, DAG.getNode(ISD::SELECT, DL, NVT, Cond,
23815                                           DAG.getBitcast(NVT, Op1),
23816                                           DAG.getBitcast(NVT, Op2)));
23817   }
23818 
23819   // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
23820   // are available or VBLENDV if AVX is available.
23821   // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
23822   if (Cond.getOpcode() == ISD::SETCC && isScalarFPTypeInSSEReg(VT) &&
23823       VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
23824     SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
23825     bool IsAlwaysSignaling;
23826     unsigned SSECC =
23827         translateX86FSETCC(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
23828                            CondOp0, CondOp1, IsAlwaysSignaling);
23829 
23830     if (Subtarget.hasAVX512()) {
23831       SDValue Cmp =
23832           DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
23833                       DAG.getTargetConstant(SSECC, DL, MVT::i8));
23834       assert(!VT.isVector() && "Not a scalar type?");
23835       return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
23836     }
23837 
23838     if (SSECC < 8 || Subtarget.hasAVX()) {
23839       SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
23840                                 DAG.getTargetConstant(SSECC, DL, MVT::i8));
23841 
23842       // If we have AVX, we can use a variable vector select (VBLENDV) instead
23843       // of 3 logic instructions for size savings and potentially speed.
23844       // Unfortunately, there is no scalar form of VBLENDV.
23845 
23846       // If either operand is a +0.0 constant, don't try this. We can expect to
23847       // optimize away at least one of the logic instructions later in that
23848       // case, so that sequence would be faster than a variable blend.
23849 
23850       // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
23851       // uses XMM0 as the selection register. That may need just as many
23852       // instructions as the AND/ANDN/OR sequence due to register moves, so
23853       // don't bother.
23854       if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
23855           !isNullFPConstant(Op2)) {
23856         // Convert to vectors, do a VSELECT, and convert back to scalar.
23857         // All of the conversions should be optimized away.
23858         MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
23859         SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
23860         SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
23861         SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
23862 
23863         MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
23864         VCmp = DAG.getBitcast(VCmpVT, VCmp);
23865 
23866         SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
23867 
23868         return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
23869                            VSel, DAG.getIntPtrConstant(0, DL));
23870       }
23871       SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
23872       SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
23873       return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
23874     }
23875   }
23876 
23877   // AVX512 fallback is to lower selects of scalar floats to masked moves.
23878   if (isScalarFPTypeInSSEReg(VT) && Subtarget.hasAVX512()) {
23879     SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
23880     return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
23881   }
23882 
23883   if (Cond.getOpcode() == ISD::SETCC &&
23884       !isSoftF16(Cond.getOperand(0).getSimpleValueType(), Subtarget)) {
23885     if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
23886       Cond = NewCond;
23887       // If the condition was updated, it's possible that the operands of the
23888       // select were also updated (for example, EmitTest has a RAUW). Refresh
23889       // the local references to the select operands in case they got stale.
23890       Op1 = Op.getOperand(1);
23891       Op2 = Op.getOperand(2);
23892     }
23893   }
23894 
23895   // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
23896   // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
23897   // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
23898   // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
23899   // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
23900   // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
23901   // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
23902   // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
23903   if (Cond.getOpcode() == X86ISD::SETCC &&
23904       Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
23905       isNullConstant(Cond.getOperand(1).getOperand(1))) {
23906     SDValue Cmp = Cond.getOperand(1);
23907     SDValue CmpOp0 = Cmp.getOperand(0);
23908     unsigned CondCode = Cond.getConstantOperandVal(0);
23909 
23910     // Special handling for __builtin_ffs(X) - 1 pattern which looks like
23911     // (select (seteq X, 0), -1, (cttz_zero_undef X)). Disable the special
23912     // handle to keep the CMP with 0. This should be removed by
23913     // optimizeCompareInst by using the flags from the BSR/TZCNT used for the
23914     // cttz_zero_undef.
23915     auto MatchFFSMinus1 = [&](SDValue Op1, SDValue Op2) {
23916       return (Op1.getOpcode() == ISD::CTTZ_ZERO_UNDEF && Op1.hasOneUse() &&
23917               Op1.getOperand(0) == CmpOp0 && isAllOnesConstant(Op2));
23918     };
23919     if (Subtarget.canUseCMOV() && (VT == MVT::i32 || VT == MVT::i64) &&
23920         ((CondCode == X86::COND_NE && MatchFFSMinus1(Op1, Op2)) ||
23921          (CondCode == X86::COND_E && MatchFFSMinus1(Op2, Op1)))) {
23922       // Keep Cmp.
23923     } else if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
23924         (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
23925       SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
23926       SDVTList CmpVTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
23927 
23928       // 'X - 1' sets the carry flag if X == 0.
23929       // '0 - X' sets the carry flag if X != 0.
23930       // Convert the carry flag to a -1/0 mask with sbb:
23931       // select (X != 0), -1, Y --> 0 - X; or (sbb), Y
23932       // select (X == 0), Y, -1 --> 0 - X; or (sbb), Y
23933       // select (X != 0), Y, -1 --> X - 1; or (sbb), Y
23934       // select (X == 0), -1, Y --> X - 1; or (sbb), Y
23935       SDValue Sub;
23936       if (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE)) {
23937         SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
23938         Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, Zero, CmpOp0);
23939       } else {
23940         SDValue One = DAG.getConstant(1, DL, CmpOp0.getValueType());
23941         Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, CmpOp0, One);
23942       }
23943       SDValue SBB = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
23944                                 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
23945                                 Sub.getValue(1));
23946       return DAG.getNode(ISD::OR, DL, VT, SBB, Y);
23947     } else if (!Subtarget.canUseCMOV() && CondCode == X86::COND_E &&
23948                CmpOp0.getOpcode() == ISD::AND &&
23949                isOneConstant(CmpOp0.getOperand(1))) {
23950       SDValue Src1, Src2;
23951       // true if Op2 is XOR or OR operator and one of its operands
23952       // is equal to Op1
23953       // ( a , a op b) || ( b , a op b)
23954       auto isOrXorPattern = [&]() {
23955         if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
23956             (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
23957           Src1 =
23958               Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
23959           Src2 = Op1;
23960           return true;
23961         }
23962         return false;
23963       };
23964 
23965       if (isOrXorPattern()) {
23966         SDValue Neg;
23967         unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
23968         // we need mask of all zeros or ones with same size of the other
23969         // operands.
23970         if (CmpSz > VT.getSizeInBits())
23971           Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
23972         else if (CmpSz < VT.getSizeInBits())
23973           Neg = DAG.getNode(ISD::AND, DL, VT,
23974               DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
23975               DAG.getConstant(1, DL, VT));
23976         else
23977           Neg = CmpOp0;
23978         SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
23979                                    Neg); // -(and (x, 0x1))
23980         SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
23981         return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2);  // And Op y
23982       }
23983     } else if ((VT == MVT::i32 || VT == MVT::i64) && isNullConstant(Op2) &&
23984                Cmp.getNode()->hasOneUse() && (CmpOp0 == Op1) &&
23985                ((CondCode == X86::COND_S) ||                    // smin(x, 0)
23986                 (CondCode == X86::COND_G && hasAndNot(Op1)))) { // smax(x, 0)
23987       // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
23988       //
23989       // If the comparison is testing for a positive value, we have to invert
23990       // the sign bit mask, so only do that transform if the target has a
23991       // bitwise 'and not' instruction (the invert is free).
23992       // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
23993       unsigned ShCt = VT.getSizeInBits() - 1;
23994       SDValue ShiftAmt = DAG.getConstant(ShCt, DL, VT);
23995       SDValue Shift = DAG.getNode(ISD::SRA, DL, VT, Op1, ShiftAmt);
23996       if (CondCode == X86::COND_G)
23997         Shift = DAG.getNOT(DL, Shift, VT);
23998       return DAG.getNode(ISD::AND, DL, VT, Shift, Op1);
23999     }
24000   }
24001 
24002   // Look past (and (setcc_carry (cmp ...)), 1).
24003   if (Cond.getOpcode() == ISD::AND &&
24004       Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
24005       isOneConstant(Cond.getOperand(1)))
24006     Cond = Cond.getOperand(0);
24007 
24008   // If condition flag is set by a X86ISD::CMP, then use it as the condition
24009   // setting operand in place of the X86ISD::SETCC.
24010   unsigned CondOpcode = Cond.getOpcode();
24011   if (CondOpcode == X86ISD::SETCC ||
24012       CondOpcode == X86ISD::SETCC_CARRY) {
24013     CC = Cond.getOperand(0);
24014 
24015     SDValue Cmp = Cond.getOperand(1);
24016     bool IllegalFPCMov = false;
24017     if (VT.isFloatingPoint() && !VT.isVector() &&
24018         !isScalarFPTypeInSSEReg(VT) && Subtarget.canUseCMOV())  // FPStack?
24019       IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
24020 
24021     if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
24022         Cmp.getOpcode() == X86ISD::BT) { // FIXME
24023       Cond = Cmp;
24024       AddTest = false;
24025     }
24026   } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
24027              CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
24028              CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
24029     SDValue Value;
24030     X86::CondCode X86Cond;
24031     std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
24032 
24033     CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
24034     AddTest = false;
24035   }
24036 
24037   if (AddTest) {
24038     // Look past the truncate if the high bits are known zero.
24039     if (isTruncWithZeroHighBitsInput(Cond, DAG))
24040       Cond = Cond.getOperand(0);
24041 
24042     // We know the result of AND is compared against zero. Try to match
24043     // it to BT.
24044     if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
24045       X86::CondCode X86CondCode;
24046       if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, X86CondCode)) {
24047         CC = DAG.getTargetConstant(X86CondCode, DL, MVT::i8);
24048         Cond = BT;
24049         AddTest = false;
24050       }
24051     }
24052   }
24053 
24054   if (AddTest) {
24055     CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
24056     Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
24057   }
24058 
24059   // a <  b ? -1 :  0 -> RES = ~setcc_carry
24060   // a <  b ?  0 : -1 -> RES = setcc_carry
24061   // a >= b ? -1 :  0 -> RES = setcc_carry
24062   // a >= b ?  0 : -1 -> RES = ~setcc_carry
24063   if (Cond.getOpcode() == X86ISD::SUB) {
24064     unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
24065 
24066     if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
24067         (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
24068         (isNullConstant(Op1) || isNullConstant(Op2))) {
24069       SDValue Res =
24070           DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
24071                       DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
24072       if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
24073         return DAG.getNOT(DL, Res, Res.getValueType());
24074       return Res;
24075     }
24076   }
24077 
24078   // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
24079   // widen the cmov and push the truncate through. This avoids introducing a new
24080   // branch during isel and doesn't add any extensions.
24081   if (Op.getValueType() == MVT::i8 &&
24082       Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
24083     SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
24084     if (T1.getValueType() == T2.getValueType() &&
24085         // Exclude CopyFromReg to avoid partial register stalls.
24086         T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
24087       SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
24088                                  CC, Cond);
24089       return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
24090     }
24091   }
24092 
24093   // Or finally, promote i8 cmovs if we have CMOV,
24094   //                 or i16 cmovs if it won't prevent folding a load.
24095   // FIXME: we should not limit promotion of i8 case to only when the CMOV is
24096   //        legal, but EmitLoweredSelect() can not deal with these extensions
24097   //        being inserted between two CMOV's. (in i16 case too TBN)
24098   //        https://bugs.llvm.org/show_bug.cgi?id=40974
24099   if ((Op.getValueType() == MVT::i8 && Subtarget.canUseCMOV()) ||
24100       (Op.getValueType() == MVT::i16 && !X86::mayFoldLoad(Op1, Subtarget) &&
24101        !X86::mayFoldLoad(Op2, Subtarget))) {
24102     Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
24103     Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
24104     SDValue Ops[] = { Op2, Op1, CC, Cond };
24105     SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
24106     return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
24107   }
24108 
24109   // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
24110   // condition is true.
24111   SDValue Ops[] = { Op2, Op1, CC, Cond };
24112   return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops, Op->getFlags());
24113 }
24114 
24115 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
24116                                      const X86Subtarget &Subtarget,
24117                                      SelectionDAG &DAG) {
24118   MVT VT = Op->getSimpleValueType(0);
24119   SDValue In = Op->getOperand(0);
24120   MVT InVT = In.getSimpleValueType();
24121   assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
24122   MVT VTElt = VT.getVectorElementType();
24123   SDLoc dl(Op);
24124 
24125   unsigned NumElts = VT.getVectorNumElements();
24126 
24127   // Extend VT if the scalar type is i8/i16 and BWI is not supported.
24128   MVT ExtVT = VT;
24129   if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
24130     // If v16i32 is to be avoided, we'll need to split and concatenate.
24131     if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
24132       return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
24133 
24134     ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
24135   }
24136 
24137   // Widen to 512-bits if VLX is not supported.
24138   MVT WideVT = ExtVT;
24139   if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
24140     NumElts *= 512 / ExtVT.getSizeInBits();
24141     InVT = MVT::getVectorVT(MVT::i1, NumElts);
24142     In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
24143                      In, DAG.getIntPtrConstant(0, dl));
24144     WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
24145   }
24146 
24147   SDValue V;
24148   MVT WideEltVT = WideVT.getVectorElementType();
24149   if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
24150       (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
24151     V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
24152   } else {
24153     SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
24154     SDValue Zero = DAG.getConstant(0, dl, WideVT);
24155     V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
24156   }
24157 
24158   // Truncate if we had to extend i16/i8 above.
24159   if (VT != ExtVT) {
24160     WideVT = MVT::getVectorVT(VTElt, NumElts);
24161     V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
24162   }
24163 
24164   // Extract back to 128/256-bit if we widened.
24165   if (WideVT != VT)
24166     V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
24167                     DAG.getIntPtrConstant(0, dl));
24168 
24169   return V;
24170 }
24171 
24172 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
24173                                SelectionDAG &DAG) {
24174   SDValue In = Op->getOperand(0);
24175   MVT InVT = In.getSimpleValueType();
24176 
24177   if (InVT.getVectorElementType() == MVT::i1)
24178     return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
24179 
24180   assert(Subtarget.hasAVX() && "Expected AVX support");
24181   return LowerAVXExtend(Op, DAG, Subtarget);
24182 }
24183 
24184 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
24185 // For sign extend this needs to handle all vector sizes and SSE4.1 and
24186 // non-SSE4.1 targets. For zero extend this should only handle inputs of
24187 // MVT::v64i8 when BWI is not supported, but AVX512 is.
24188 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
24189                                         const X86Subtarget &Subtarget,
24190                                         SelectionDAG &DAG) {
24191   SDValue In = Op->getOperand(0);
24192   MVT VT = Op->getSimpleValueType(0);
24193   MVT InVT = In.getSimpleValueType();
24194 
24195   MVT SVT = VT.getVectorElementType();
24196   MVT InSVT = InVT.getVectorElementType();
24197   assert(SVT.getFixedSizeInBits() > InSVT.getFixedSizeInBits());
24198 
24199   if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
24200     return SDValue();
24201   if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
24202     return SDValue();
24203   if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
24204       !(VT.is256BitVector() && Subtarget.hasAVX()) &&
24205       !(VT.is512BitVector() && Subtarget.hasAVX512()))
24206     return SDValue();
24207 
24208   SDLoc dl(Op);
24209   unsigned Opc = Op.getOpcode();
24210   unsigned NumElts = VT.getVectorNumElements();
24211 
24212   // For 256-bit vectors, we only need the lower (128-bit) half of the input.
24213   // For 512-bit vectors, we need 128-bits or 256-bits.
24214   if (InVT.getSizeInBits() > 128) {
24215     // Input needs to be at least the same number of elements as output, and
24216     // at least 128-bits.
24217     int InSize = InSVT.getSizeInBits() * NumElts;
24218     In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
24219     InVT = In.getSimpleValueType();
24220   }
24221 
24222   // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
24223   // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
24224   // need to be handled here for 256/512-bit results.
24225   if (Subtarget.hasInt256()) {
24226     assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
24227 
24228     if (InVT.getVectorNumElements() != NumElts)
24229       return DAG.getNode(Op.getOpcode(), dl, VT, In);
24230 
24231     // FIXME: Apparently we create inreg operations that could be regular
24232     // extends.
24233     unsigned ExtOpc =
24234         Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
24235                                              : ISD::ZERO_EXTEND;
24236     return DAG.getNode(ExtOpc, dl, VT, In);
24237   }
24238 
24239   // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
24240   if (Subtarget.hasAVX()) {
24241     assert(VT.is256BitVector() && "256-bit vector expected");
24242     MVT HalfVT = VT.getHalfNumVectorElementsVT();
24243     int HalfNumElts = HalfVT.getVectorNumElements();
24244 
24245     unsigned NumSrcElts = InVT.getVectorNumElements();
24246     SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
24247     for (int i = 0; i != HalfNumElts; ++i)
24248       HiMask[i] = HalfNumElts + i;
24249 
24250     SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
24251     SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
24252     Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
24253     return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
24254   }
24255 
24256   // We should only get here for sign extend.
24257   assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
24258   assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
24259   unsigned InNumElts = InVT.getVectorNumElements();
24260 
24261   // If the source elements are already all-signbits, we don't need to extend,
24262   // just splat the elements.
24263   APInt DemandedElts = APInt::getLowBitsSet(InNumElts, NumElts);
24264   if (DAG.ComputeNumSignBits(In, DemandedElts) == InVT.getScalarSizeInBits()) {
24265     unsigned Scale = InNumElts / NumElts;
24266     SmallVector<int, 16> ShuffleMask;
24267     for (unsigned I = 0; I != NumElts; ++I)
24268       ShuffleMask.append(Scale, I);
24269     return DAG.getBitcast(VT,
24270                           DAG.getVectorShuffle(InVT, dl, In, In, ShuffleMask));
24271   }
24272 
24273   // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
24274   SDValue Curr = In;
24275   SDValue SignExt = Curr;
24276 
24277   // As SRAI is only available on i16/i32 types, we expand only up to i32
24278   // and handle i64 separately.
24279   if (InVT != MVT::v4i32) {
24280     MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
24281 
24282     unsigned DestWidth = DestVT.getScalarSizeInBits();
24283     unsigned Scale = DestWidth / InSVT.getSizeInBits();
24284     unsigned DestElts = DestVT.getVectorNumElements();
24285 
24286     // Build a shuffle mask that takes each input element and places it in the
24287     // MSBs of the new element size.
24288     SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
24289     for (unsigned i = 0; i != DestElts; ++i)
24290       Mask[i * Scale + (Scale - 1)] = i;
24291 
24292     Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
24293     Curr = DAG.getBitcast(DestVT, Curr);
24294 
24295     unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
24296     SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
24297                           DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
24298   }
24299 
24300   if (VT == MVT::v2i64) {
24301     assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
24302     SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
24303     SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
24304     SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
24305     SignExt = DAG.getBitcast(VT, SignExt);
24306   }
24307 
24308   return SignExt;
24309 }
24310 
24311 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
24312                                 SelectionDAG &DAG) {
24313   MVT VT = Op->getSimpleValueType(0);
24314   SDValue In = Op->getOperand(0);
24315   MVT InVT = In.getSimpleValueType();
24316   SDLoc dl(Op);
24317 
24318   if (InVT.getVectorElementType() == MVT::i1)
24319     return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
24320 
24321   assert(VT.isVector() && InVT.isVector() && "Expected vector type");
24322   assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
24323          "Expected same number of elements");
24324   assert((VT.getVectorElementType() == MVT::i16 ||
24325           VT.getVectorElementType() == MVT::i32 ||
24326           VT.getVectorElementType() == MVT::i64) &&
24327          "Unexpected element type");
24328   assert((InVT.getVectorElementType() == MVT::i8 ||
24329           InVT.getVectorElementType() == MVT::i16 ||
24330           InVT.getVectorElementType() == MVT::i32) &&
24331          "Unexpected element type");
24332 
24333   if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
24334     assert(InVT == MVT::v32i8 && "Unexpected VT!");
24335     return splitVectorIntUnary(Op, DAG);
24336   }
24337 
24338   if (Subtarget.hasInt256())
24339     return Op;
24340 
24341   // Optimize vectors in AVX mode
24342   // Sign extend  v8i16 to v8i32 and
24343   //              v4i32 to v4i64
24344   //
24345   // Divide input vector into two parts
24346   // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
24347   // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
24348   // concat the vectors to original VT
24349   MVT HalfVT = VT.getHalfNumVectorElementsVT();
24350   SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
24351 
24352   unsigned NumElems = InVT.getVectorNumElements();
24353   SmallVector<int,8> ShufMask(NumElems, -1);
24354   for (unsigned i = 0; i != NumElems/2; ++i)
24355     ShufMask[i] = i + NumElems/2;
24356 
24357   SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
24358   OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
24359 
24360   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
24361 }
24362 
24363 /// Change a vector store into a pair of half-size vector stores.
24364 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
24365   SDValue StoredVal = Store->getValue();
24366   assert((StoredVal.getValueType().is256BitVector() ||
24367           StoredVal.getValueType().is512BitVector()) &&
24368          "Expecting 256/512-bit op");
24369 
24370   // Splitting volatile memory ops is not allowed unless the operation was not
24371   // legal to begin with. Assume the input store is legal (this transform is
24372   // only used for targets with AVX). Note: It is possible that we have an
24373   // illegal type like v2i128, and so we could allow splitting a volatile store
24374   // in that case if that is important.
24375   if (!Store->isSimple())
24376     return SDValue();
24377 
24378   SDLoc DL(Store);
24379   SDValue Value0, Value1;
24380   std::tie(Value0, Value1) = splitVector(StoredVal, DAG, DL);
24381   unsigned HalfOffset = Value0.getValueType().getStoreSize();
24382   SDValue Ptr0 = Store->getBasePtr();
24383   SDValue Ptr1 =
24384       DAG.getMemBasePlusOffset(Ptr0, TypeSize::getFixed(HalfOffset), DL);
24385   SDValue Ch0 =
24386       DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
24387                    Store->getOriginalAlign(),
24388                    Store->getMemOperand()->getFlags());
24389   SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
24390                              Store->getPointerInfo().getWithOffset(HalfOffset),
24391                              Store->getOriginalAlign(),
24392                              Store->getMemOperand()->getFlags());
24393   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
24394 }
24395 
24396 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
24397 /// type.
24398 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
24399                                     SelectionDAG &DAG) {
24400   SDValue StoredVal = Store->getValue();
24401   assert(StoreVT.is128BitVector() &&
24402          StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
24403   StoredVal = DAG.getBitcast(StoreVT, StoredVal);
24404 
24405   // Splitting volatile memory ops is not allowed unless the operation was not
24406   // legal to begin with. We are assuming the input op is legal (this transform
24407   // is only used for targets with AVX).
24408   if (!Store->isSimple())
24409     return SDValue();
24410 
24411   MVT StoreSVT = StoreVT.getScalarType();
24412   unsigned NumElems = StoreVT.getVectorNumElements();
24413   unsigned ScalarSize = StoreSVT.getStoreSize();
24414 
24415   SDLoc DL(Store);
24416   SmallVector<SDValue, 4> Stores;
24417   for (unsigned i = 0; i != NumElems; ++i) {
24418     unsigned Offset = i * ScalarSize;
24419     SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(),
24420                                            TypeSize::getFixed(Offset), DL);
24421     SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
24422                               DAG.getIntPtrConstant(i, DL));
24423     SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
24424                               Store->getPointerInfo().getWithOffset(Offset),
24425                               Store->getOriginalAlign(),
24426                               Store->getMemOperand()->getFlags());
24427     Stores.push_back(Ch);
24428   }
24429   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
24430 }
24431 
24432 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
24433                           SelectionDAG &DAG) {
24434   StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
24435   SDLoc dl(St);
24436   SDValue StoredVal = St->getValue();
24437 
24438   // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
24439   if (StoredVal.getValueType().isVector() &&
24440       StoredVal.getValueType().getVectorElementType() == MVT::i1) {
24441     unsigned NumElts = StoredVal.getValueType().getVectorNumElements();
24442     assert(NumElts <= 8 && "Unexpected VT");
24443     assert(!St->isTruncatingStore() && "Expected non-truncating store");
24444     assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
24445            "Expected AVX512F without AVX512DQI");
24446 
24447     // We must pad with zeros to ensure we store zeroes to any unused bits.
24448     StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
24449                             DAG.getUNDEF(MVT::v16i1), StoredVal,
24450                             DAG.getIntPtrConstant(0, dl));
24451     StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
24452     StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
24453     // Make sure we store zeros in the extra bits.
24454     if (NumElts < 8)
24455       StoredVal = DAG.getZeroExtendInReg(
24456           StoredVal, dl, EVT::getIntegerVT(*DAG.getContext(), NumElts));
24457 
24458     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
24459                         St->getPointerInfo(), St->getOriginalAlign(),
24460                         St->getMemOperand()->getFlags());
24461   }
24462 
24463   if (St->isTruncatingStore())
24464     return SDValue();
24465 
24466   // If this is a 256-bit store of concatenated ops, we are better off splitting
24467   // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
24468   // and each half can execute independently. Some cores would split the op into
24469   // halves anyway, so the concat (vinsertf128) is purely an extra op.
24470   MVT StoreVT = StoredVal.getSimpleValueType();
24471   if (StoreVT.is256BitVector() ||
24472       ((StoreVT == MVT::v32i16 || StoreVT == MVT::v64i8) &&
24473        !Subtarget.hasBWI())) {
24474     if (StoredVal.hasOneUse() && isFreeToSplitVector(StoredVal.getNode(), DAG))
24475       return splitVectorStore(St, DAG);
24476     return SDValue();
24477   }
24478 
24479   if (StoreVT.is32BitVector())
24480     return SDValue();
24481 
24482   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24483   assert(StoreVT.is64BitVector() && "Unexpected VT");
24484   assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
24485              TargetLowering::TypeWidenVector &&
24486          "Unexpected type action!");
24487 
24488   EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
24489   StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
24490                           DAG.getUNDEF(StoreVT));
24491 
24492   if (Subtarget.hasSSE2()) {
24493     // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
24494     // and store it.
24495     MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
24496     MVT CastVT = MVT::getVectorVT(StVT, 2);
24497     StoredVal = DAG.getBitcast(CastVT, StoredVal);
24498     StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
24499                             DAG.getIntPtrConstant(0, dl));
24500 
24501     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
24502                         St->getPointerInfo(), St->getOriginalAlign(),
24503                         St->getMemOperand()->getFlags());
24504   }
24505   assert(Subtarget.hasSSE1() && "Expected SSE");
24506   SDVTList Tys = DAG.getVTList(MVT::Other);
24507   SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
24508   return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
24509                                  St->getMemOperand());
24510 }
24511 
24512 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
24513 // may emit an illegal shuffle but the expansion is still better than scalar
24514 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
24515 // we'll emit a shuffle and a arithmetic shift.
24516 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
24517 // TODO: It is possible to support ZExt by zeroing the undef values during
24518 // the shuffle phase or after the shuffle.
24519 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
24520                                  SelectionDAG &DAG) {
24521   MVT RegVT = Op.getSimpleValueType();
24522   assert(RegVT.isVector() && "We only custom lower vector loads.");
24523   assert(RegVT.isInteger() &&
24524          "We only custom lower integer vector loads.");
24525 
24526   LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
24527   SDLoc dl(Ld);
24528 
24529   // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
24530   if (RegVT.getVectorElementType() == MVT::i1) {
24531     assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
24532     assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
24533     assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
24534            "Expected AVX512F without AVX512DQI");
24535 
24536     SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
24537                                 Ld->getPointerInfo(), Ld->getOriginalAlign(),
24538                                 Ld->getMemOperand()->getFlags());
24539 
24540     // Replace chain users with the new chain.
24541     assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
24542 
24543     SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
24544     Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
24545                       DAG.getBitcast(MVT::v16i1, Val),
24546                       DAG.getIntPtrConstant(0, dl));
24547     return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
24548   }
24549 
24550   return SDValue();
24551 }
24552 
24553 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
24554 /// each of which has no other use apart from the AND / OR.
24555 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
24556   Opc = Op.getOpcode();
24557   if (Opc != ISD::OR && Opc != ISD::AND)
24558     return false;
24559   return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
24560           Op.getOperand(0).hasOneUse() &&
24561           Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
24562           Op.getOperand(1).hasOneUse());
24563 }
24564 
24565 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
24566   SDValue Chain = Op.getOperand(0);
24567   SDValue Cond  = Op.getOperand(1);
24568   SDValue Dest  = Op.getOperand(2);
24569   SDLoc dl(Op);
24570 
24571   // Bail out when we don't have native compare instructions.
24572   if (Cond.getOpcode() == ISD::SETCC &&
24573       Cond.getOperand(0).getValueType() != MVT::f128 &&
24574       !isSoftF16(Cond.getOperand(0).getValueType(), Subtarget)) {
24575     SDValue LHS = Cond.getOperand(0);
24576     SDValue RHS = Cond.getOperand(1);
24577     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
24578 
24579     // Special case for
24580     // setcc([su]{add,sub,mul}o == 0)
24581     // setcc([su]{add,sub,mul}o != 1)
24582     if (ISD::isOverflowIntrOpRes(LHS) &&
24583         (CC == ISD::SETEQ || CC == ISD::SETNE) &&
24584         (isNullConstant(RHS) || isOneConstant(RHS))) {
24585       SDValue Value, Overflow;
24586       X86::CondCode X86Cond;
24587       std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, LHS.getValue(0), DAG);
24588 
24589       if ((CC == ISD::SETEQ) == isNullConstant(RHS))
24590         X86Cond = X86::GetOppositeBranchCondition(X86Cond);
24591 
24592       SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
24593       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24594                          Overflow);
24595     }
24596 
24597     if (LHS.getSimpleValueType().isInteger()) {
24598       SDValue CCVal;
24599       SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, CC, SDLoc(Cond), DAG, CCVal);
24600       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24601                          EFLAGS);
24602     }
24603 
24604     if (CC == ISD::SETOEQ) {
24605       // For FCMP_OEQ, we can emit
24606       // two branches instead of an explicit AND instruction with a
24607       // separate test. However, we only do this if this block doesn't
24608       // have a fall-through edge, because this requires an explicit
24609       // jmp when the condition is false.
24610       if (Op.getNode()->hasOneUse()) {
24611         SDNode *User = *Op.getNode()->use_begin();
24612         // Look for an unconditional branch following this conditional branch.
24613         // We need this because we need to reverse the successors in order
24614         // to implement FCMP_OEQ.
24615         if (User->getOpcode() == ISD::BR) {
24616           SDValue FalseBB = User->getOperand(1);
24617           SDNode *NewBR =
24618             DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
24619           assert(NewBR == User);
24620           (void)NewBR;
24621           Dest = FalseBB;
24622 
24623           SDValue Cmp =
24624               DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
24625           SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
24626           Chain = DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest,
24627                               CCVal, Cmp);
24628           CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
24629           return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24630                              Cmp);
24631         }
24632       }
24633     } else if (CC == ISD::SETUNE) {
24634       // For FCMP_UNE, we can emit
24635       // two branches instead of an explicit OR instruction with a
24636       // separate test.
24637       SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
24638       SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
24639       Chain =
24640           DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, Cmp);
24641       CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
24642       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24643                          Cmp);
24644     } else {
24645       X86::CondCode X86Cond =
24646           TranslateX86CC(CC, dl, /*IsFP*/ true, LHS, RHS, DAG);
24647       SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
24648       SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
24649       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24650                          Cmp);
24651     }
24652   }
24653 
24654   if (ISD::isOverflowIntrOpRes(Cond)) {
24655     SDValue Value, Overflow;
24656     X86::CondCode X86Cond;
24657     std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
24658 
24659     SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
24660     return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24661                        Overflow);
24662   }
24663 
24664   // Look past the truncate if the high bits are known zero.
24665   if (isTruncWithZeroHighBitsInput(Cond, DAG))
24666     Cond = Cond.getOperand(0);
24667 
24668   EVT CondVT = Cond.getValueType();
24669 
24670   // Add an AND with 1 if we don't already have one.
24671   if (!(Cond.getOpcode() == ISD::AND && isOneConstant(Cond.getOperand(1))))
24672     Cond =
24673         DAG.getNode(ISD::AND, dl, CondVT, Cond, DAG.getConstant(1, dl, CondVT));
24674 
24675   SDValue LHS = Cond;
24676   SDValue RHS = DAG.getConstant(0, dl, CondVT);
24677 
24678   SDValue CCVal;
24679   SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, ISD::SETNE, dl, DAG, CCVal);
24680   return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24681                      EFLAGS);
24682 }
24683 
24684 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
24685 // Calls to _alloca are needed to probe the stack when allocating more than 4k
24686 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
24687 // that the guard pages used by the OS virtual memory manager are allocated in
24688 // correct sequence.
24689 SDValue
24690 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
24691                                            SelectionDAG &DAG) const {
24692   MachineFunction &MF = DAG.getMachineFunction();
24693   bool SplitStack = MF.shouldSplitStack();
24694   bool EmitStackProbeCall = hasStackProbeSymbol(MF);
24695   bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
24696                SplitStack || EmitStackProbeCall;
24697   SDLoc dl(Op);
24698 
24699   // Get the inputs.
24700   SDNode *Node = Op.getNode();
24701   SDValue Chain = Op.getOperand(0);
24702   SDValue Size  = Op.getOperand(1);
24703   MaybeAlign Alignment(Op.getConstantOperandVal(2));
24704   EVT VT = Node->getValueType(0);
24705 
24706   // Chain the dynamic stack allocation so that it doesn't modify the stack
24707   // pointer when other instructions are using the stack.
24708   Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
24709 
24710   bool Is64Bit = Subtarget.is64Bit();
24711   MVT SPTy = getPointerTy(DAG.getDataLayout());
24712 
24713   SDValue Result;
24714   if (!Lower) {
24715     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24716     Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
24717     assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
24718                     " not tell us which reg is the stack pointer!");
24719 
24720     const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
24721     const Align StackAlign = TFI.getStackAlign();
24722     if (hasInlineStackProbe(MF)) {
24723       MachineRegisterInfo &MRI = MF.getRegInfo();
24724 
24725       const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
24726       Register Vreg = MRI.createVirtualRegister(AddrRegClass);
24727       Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
24728       Result = DAG.getNode(X86ISD::PROBED_ALLOCA, dl, SPTy, Chain,
24729                            DAG.getRegister(Vreg, SPTy));
24730     } else {
24731       SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
24732       Chain = SP.getValue(1);
24733       Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
24734     }
24735     if (Alignment && *Alignment > StackAlign)
24736       Result =
24737           DAG.getNode(ISD::AND, dl, VT, Result,
24738                       DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
24739     Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
24740   } else if (SplitStack) {
24741     MachineRegisterInfo &MRI = MF.getRegInfo();
24742 
24743     if (Is64Bit) {
24744       // The 64 bit implementation of segmented stacks needs to clobber both r10
24745       // r11. This makes it impossible to use it along with nested parameters.
24746       const Function &F = MF.getFunction();
24747       for (const auto &A : F.args()) {
24748         if (A.hasNestAttr())
24749           report_fatal_error("Cannot use segmented stacks with functions that "
24750                              "have nested arguments.");
24751       }
24752     }
24753 
24754     const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
24755     Register Vreg = MRI.createVirtualRegister(AddrRegClass);
24756     Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
24757     Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
24758                                 DAG.getRegister(Vreg, SPTy));
24759   } else {
24760     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
24761     Chain = DAG.getNode(X86ISD::DYN_ALLOCA, dl, NodeTys, Chain, Size);
24762     MF.getInfo<X86MachineFunctionInfo>()->setHasDynAlloca(true);
24763 
24764     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24765     Register SPReg = RegInfo->getStackRegister();
24766     SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
24767     Chain = SP.getValue(1);
24768 
24769     if (Alignment) {
24770       SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
24771                        DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
24772       Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
24773     }
24774 
24775     Result = SP;
24776   }
24777 
24778   Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
24779 
24780   SDValue Ops[2] = {Result, Chain};
24781   return DAG.getMergeValues(Ops, dl);
24782 }
24783 
24784 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
24785   MachineFunction &MF = DAG.getMachineFunction();
24786   auto PtrVT = getPointerTy(MF.getDataLayout());
24787   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
24788 
24789   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
24790   SDLoc DL(Op);
24791 
24792   if (!Subtarget.is64Bit() ||
24793       Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
24794     // vastart just stores the address of the VarArgsFrameIndex slot into the
24795     // memory location argument.
24796     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
24797     return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
24798                         MachinePointerInfo(SV));
24799   }
24800 
24801   // __va_list_tag:
24802   //   gp_offset         (0 - 6 * 8)
24803   //   fp_offset         (48 - 48 + 8 * 16)
24804   //   overflow_arg_area (point to parameters coming in memory).
24805   //   reg_save_area
24806   SmallVector<SDValue, 8> MemOps;
24807   SDValue FIN = Op.getOperand(1);
24808   // Store gp_offset
24809   SDValue Store = DAG.getStore(
24810       Op.getOperand(0), DL,
24811       DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
24812       MachinePointerInfo(SV));
24813   MemOps.push_back(Store);
24814 
24815   // Store fp_offset
24816   FIN = DAG.getMemBasePlusOffset(FIN, TypeSize::getFixed(4), DL);
24817   Store = DAG.getStore(
24818       Op.getOperand(0), DL,
24819       DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
24820       MachinePointerInfo(SV, 4));
24821   MemOps.push_back(Store);
24822 
24823   // Store ptr to overflow_arg_area
24824   FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
24825   SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
24826   Store =
24827       DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
24828   MemOps.push_back(Store);
24829 
24830   // Store ptr to reg_save_area.
24831   FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
24832       Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
24833   SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
24834   Store = DAG.getStore(
24835       Op.getOperand(0), DL, RSFIN, FIN,
24836       MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
24837   MemOps.push_back(Store);
24838   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
24839 }
24840 
24841 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
24842   assert(Subtarget.is64Bit() &&
24843          "LowerVAARG only handles 64-bit va_arg!");
24844   assert(Op.getNumOperands() == 4);
24845 
24846   MachineFunction &MF = DAG.getMachineFunction();
24847   if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
24848     // The Win64 ABI uses char* instead of a structure.
24849     return DAG.expandVAArg(Op.getNode());
24850 
24851   SDValue Chain = Op.getOperand(0);
24852   SDValue SrcPtr = Op.getOperand(1);
24853   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
24854   unsigned Align = Op.getConstantOperandVal(3);
24855   SDLoc dl(Op);
24856 
24857   EVT ArgVT = Op.getNode()->getValueType(0);
24858   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
24859   uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
24860   uint8_t ArgMode;
24861 
24862   // Decide which area this value should be read from.
24863   // TODO: Implement the AMD64 ABI in its entirety. This simple
24864   // selection mechanism works only for the basic types.
24865   assert(ArgVT != MVT::f80 && "va_arg for f80 not yet implemented");
24866   if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
24867     ArgMode = 2;  // Argument passed in XMM register. Use fp_offset.
24868   } else {
24869     assert(ArgVT.isInteger() && ArgSize <= 32 /*bytes*/ &&
24870            "Unhandled argument type in LowerVAARG");
24871     ArgMode = 1;  // Argument passed in GPR64 register(s). Use gp_offset.
24872   }
24873 
24874   if (ArgMode == 2) {
24875     // Make sure using fp_offset makes sense.
24876     assert(!Subtarget.useSoftFloat() &&
24877            !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
24878            Subtarget.hasSSE1());
24879   }
24880 
24881   // Insert VAARG node into the DAG
24882   // VAARG returns two values: Variable Argument Address, Chain
24883   SDValue InstOps[] = {Chain, SrcPtr,
24884                        DAG.getTargetConstant(ArgSize, dl, MVT::i32),
24885                        DAG.getTargetConstant(ArgMode, dl, MVT::i8),
24886                        DAG.getTargetConstant(Align, dl, MVT::i32)};
24887   SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
24888   SDValue VAARG = DAG.getMemIntrinsicNode(
24889       Subtarget.isTarget64BitLP64() ? X86ISD::VAARG_64 : X86ISD::VAARG_X32, dl,
24890       VTs, InstOps, MVT::i64, MachinePointerInfo(SV),
24891       /*Alignment=*/std::nullopt,
24892       MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
24893   Chain = VAARG.getValue(1);
24894 
24895   // Load the next argument and return it
24896   return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
24897 }
24898 
24899 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
24900                            SelectionDAG &DAG) {
24901   // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
24902   // where a va_list is still an i8*.
24903   assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
24904   if (Subtarget.isCallingConvWin64(
24905         DAG.getMachineFunction().getFunction().getCallingConv()))
24906     // Probably a Win64 va_copy.
24907     return DAG.expandVACopy(Op.getNode());
24908 
24909   SDValue Chain = Op.getOperand(0);
24910   SDValue DstPtr = Op.getOperand(1);
24911   SDValue SrcPtr = Op.getOperand(2);
24912   const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
24913   const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
24914   SDLoc DL(Op);
24915 
24916   return DAG.getMemcpy(
24917       Chain, DL, DstPtr, SrcPtr,
24918       DAG.getIntPtrConstant(Subtarget.isTarget64BitLP64() ? 24 : 16, DL),
24919       Align(Subtarget.isTarget64BitLP64() ? 8 : 4), /*isVolatile*/ false, false,
24920       false, MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
24921 }
24922 
24923 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
24924 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
24925   switch (Opc) {
24926   case ISD::SHL:
24927   case X86ISD::VSHL:
24928   case X86ISD::VSHLI:
24929     return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
24930   case ISD::SRL:
24931   case X86ISD::VSRL:
24932   case X86ISD::VSRLI:
24933     return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
24934   case ISD::SRA:
24935   case X86ISD::VSRA:
24936   case X86ISD::VSRAI:
24937     return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
24938   }
24939   llvm_unreachable("Unknown target vector shift node");
24940 }
24941 
24942 /// Handle vector element shifts where the shift amount is a constant.
24943 /// Takes immediate version of shift as input.
24944 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
24945                                           SDValue SrcOp, uint64_t ShiftAmt,
24946                                           SelectionDAG &DAG) {
24947   MVT ElementType = VT.getVectorElementType();
24948 
24949   // Bitcast the source vector to the output type, this is mainly necessary for
24950   // vXi8/vXi64 shifts.
24951   if (VT != SrcOp.getSimpleValueType())
24952     SrcOp = DAG.getBitcast(VT, SrcOp);
24953 
24954   // Fold this packed shift into its first operand if ShiftAmt is 0.
24955   if (ShiftAmt == 0)
24956     return SrcOp;
24957 
24958   // Check for ShiftAmt >= element width
24959   if (ShiftAmt >= ElementType.getSizeInBits()) {
24960     if (Opc == X86ISD::VSRAI)
24961       ShiftAmt = ElementType.getSizeInBits() - 1;
24962     else
24963       return DAG.getConstant(0, dl, VT);
24964   }
24965 
24966   assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
24967          && "Unknown target vector shift-by-constant node");
24968 
24969   // Fold this packed vector shift into a build vector if SrcOp is a
24970   // vector of Constants or UNDEFs.
24971   if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
24972     unsigned ShiftOpc;
24973     switch (Opc) {
24974     default: llvm_unreachable("Unknown opcode!");
24975     case X86ISD::VSHLI:
24976       ShiftOpc = ISD::SHL;
24977       break;
24978     case X86ISD::VSRLI:
24979       ShiftOpc = ISD::SRL;
24980       break;
24981     case X86ISD::VSRAI:
24982       ShiftOpc = ISD::SRA;
24983       break;
24984     }
24985 
24986     SDValue Amt = DAG.getConstant(ShiftAmt, dl, VT);
24987     if (SDValue C = DAG.FoldConstantArithmetic(ShiftOpc, dl, VT, {SrcOp, Amt}))
24988       return C;
24989   }
24990 
24991   return DAG.getNode(Opc, dl, VT, SrcOp,
24992                      DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
24993 }
24994 
24995 /// Handle vector element shifts by a splat shift amount
24996 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
24997                                    SDValue SrcOp, SDValue ShAmt, int ShAmtIdx,
24998                                    const X86Subtarget &Subtarget,
24999                                    SelectionDAG &DAG) {
25000   MVT AmtVT = ShAmt.getSimpleValueType();
25001   assert(AmtVT.isVector() && "Vector shift type mismatch");
25002   assert(0 <= ShAmtIdx && ShAmtIdx < (int)AmtVT.getVectorNumElements() &&
25003          "Illegal vector splat index");
25004 
25005   // Move the splat element to the bottom element.
25006   if (ShAmtIdx != 0) {
25007     SmallVector<int> Mask(AmtVT.getVectorNumElements(), -1);
25008     Mask[0] = ShAmtIdx;
25009     ShAmt = DAG.getVectorShuffle(AmtVT, dl, ShAmt, DAG.getUNDEF(AmtVT), Mask);
25010   }
25011 
25012   // Peek through any zext node if we can get back to a 128-bit source.
25013   if (AmtVT.getScalarSizeInBits() == 64 &&
25014       (ShAmt.getOpcode() == ISD::ZERO_EXTEND ||
25015        ShAmt.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) &&
25016       ShAmt.getOperand(0).getValueType().isSimple() &&
25017       ShAmt.getOperand(0).getValueType().is128BitVector()) {
25018     ShAmt = ShAmt.getOperand(0);
25019     AmtVT = ShAmt.getSimpleValueType();
25020   }
25021 
25022   // See if we can mask off the upper elements using the existing source node.
25023   // The shift uses the entire lower 64-bits of the amount vector, so no need to
25024   // do this for vXi64 types.
25025   bool IsMasked = false;
25026   if (AmtVT.getScalarSizeInBits() < 64) {
25027     if (ShAmt.getOpcode() == ISD::BUILD_VECTOR ||
25028         ShAmt.getOpcode() == ISD::SCALAR_TO_VECTOR) {
25029       // If the shift amount has come from a scalar, then zero-extend the scalar
25030       // before moving to the vector.
25031       ShAmt = DAG.getZExtOrTrunc(ShAmt.getOperand(0), dl, MVT::i32);
25032       ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
25033       ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, ShAmt);
25034       AmtVT = MVT::v4i32;
25035       IsMasked = true;
25036     } else if (ShAmt.getOpcode() == ISD::AND) {
25037       // See if the shift amount is already masked (e.g. for rotation modulo),
25038       // then we can zero-extend it by setting all the other mask elements to
25039       // zero.
25040       SmallVector<SDValue> MaskElts(
25041           AmtVT.getVectorNumElements(),
25042           DAG.getConstant(0, dl, AmtVT.getScalarType()));
25043       MaskElts[0] = DAG.getAllOnesConstant(dl, AmtVT.getScalarType());
25044       SDValue Mask = DAG.getBuildVector(AmtVT, dl, MaskElts);
25045       if ((Mask = DAG.FoldConstantArithmetic(ISD::AND, dl, AmtVT,
25046                                              {ShAmt.getOperand(1), Mask}))) {
25047         ShAmt = DAG.getNode(ISD::AND, dl, AmtVT, ShAmt.getOperand(0), Mask);
25048         IsMasked = true;
25049       }
25050     }
25051   }
25052 
25053   // Extract if the shift amount vector is larger than 128-bits.
25054   if (AmtVT.getSizeInBits() > 128) {
25055     ShAmt = extract128BitVector(ShAmt, 0, DAG, dl);
25056     AmtVT = ShAmt.getSimpleValueType();
25057   }
25058 
25059   // Zero-extend bottom element to v2i64 vector type, either by extension or
25060   // shuffle masking.
25061   if (!IsMasked && AmtVT.getScalarSizeInBits() < 64) {
25062     if (AmtVT == MVT::v4i32 && (ShAmt.getOpcode() == X86ISD::VBROADCAST ||
25063                                 ShAmt.getOpcode() == X86ISD::VBROADCAST_LOAD)) {
25064       ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, SDLoc(ShAmt), MVT::v4i32, ShAmt);
25065     } else if (Subtarget.hasSSE41()) {
25066       ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
25067                           MVT::v2i64, ShAmt);
25068     } else {
25069       SDValue ByteShift = DAG.getTargetConstant(
25070           (128 - AmtVT.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
25071       ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
25072       ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
25073                           ByteShift);
25074       ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
25075                           ByteShift);
25076     }
25077   }
25078 
25079   // Change opcode to non-immediate version.
25080   Opc = getTargetVShiftUniformOpcode(Opc, true);
25081 
25082   // The return type has to be a 128-bit type with the same element
25083   // type as the input type.
25084   MVT EltVT = VT.getVectorElementType();
25085   MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
25086 
25087   ShAmt = DAG.getBitcast(ShVT, ShAmt);
25088   return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
25089 }
25090 
25091 /// Return Mask with the necessary casting or extending
25092 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
25093 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
25094                            const X86Subtarget &Subtarget, SelectionDAG &DAG,
25095                            const SDLoc &dl) {
25096 
25097   if (isAllOnesConstant(Mask))
25098     return DAG.getConstant(1, dl, MaskVT);
25099   if (X86::isZeroNode(Mask))
25100     return DAG.getConstant(0, dl, MaskVT);
25101 
25102   assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
25103 
25104   if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
25105     assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
25106     assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
25107     // In case 32bit mode, bitcast i64 is illegal, extend/split it.
25108     SDValue Lo, Hi;
25109     std::tie(Lo, Hi) = DAG.SplitScalar(Mask, dl, MVT::i32, MVT::i32);
25110     Lo = DAG.getBitcast(MVT::v32i1, Lo);
25111     Hi = DAG.getBitcast(MVT::v32i1, Hi);
25112     return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
25113   } else {
25114     MVT BitcastVT = MVT::getVectorVT(MVT::i1,
25115                                      Mask.getSimpleValueType().getSizeInBits());
25116     // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
25117     // are extracted by EXTRACT_SUBVECTOR.
25118     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
25119                        DAG.getBitcast(BitcastVT, Mask),
25120                        DAG.getIntPtrConstant(0, dl));
25121   }
25122 }
25123 
25124 /// Return (and \p Op, \p Mask) for compare instructions or
25125 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
25126 /// necessary casting or extending for \p Mask when lowering masking intrinsics
25127 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
25128                                     SDValue PreservedSrc,
25129                                     const X86Subtarget &Subtarget,
25130                                     SelectionDAG &DAG) {
25131   MVT VT = Op.getSimpleValueType();
25132   MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
25133   unsigned OpcodeSelect = ISD::VSELECT;
25134   SDLoc dl(Op);
25135 
25136   if (isAllOnesConstant(Mask))
25137     return Op;
25138 
25139   SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25140 
25141   if (PreservedSrc.isUndef())
25142     PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
25143   return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
25144 }
25145 
25146 /// Creates an SDNode for a predicated scalar operation.
25147 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
25148 /// The mask is coming as MVT::i8 and it should be transformed
25149 /// to MVT::v1i1 while lowering masking intrinsics.
25150 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
25151 /// "X86select" instead of "vselect". We just can't create the "vselect" node
25152 /// for a scalar instruction.
25153 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
25154                                     SDValue PreservedSrc,
25155                                     const X86Subtarget &Subtarget,
25156                                     SelectionDAG &DAG) {
25157 
25158   if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
25159     if (MaskConst->getZExtValue() & 0x1)
25160       return Op;
25161 
25162   MVT VT = Op.getSimpleValueType();
25163   SDLoc dl(Op);
25164 
25165   assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
25166   SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
25167                               DAG.getBitcast(MVT::v8i1, Mask),
25168                               DAG.getIntPtrConstant(0, dl));
25169   if (Op.getOpcode() == X86ISD::FSETCCM ||
25170       Op.getOpcode() == X86ISD::FSETCCM_SAE ||
25171       Op.getOpcode() == X86ISD::VFPCLASSS)
25172     return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
25173 
25174   if (PreservedSrc.isUndef())
25175     PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
25176   return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
25177 }
25178 
25179 static int getSEHRegistrationNodeSize(const Function *Fn) {
25180   if (!Fn->hasPersonalityFn())
25181     report_fatal_error(
25182         "querying registration node size for function without personality");
25183   // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
25184   // WinEHStatePass for the full struct definition.
25185   switch (classifyEHPersonality(Fn->getPersonalityFn())) {
25186   case EHPersonality::MSVC_X86SEH: return 24;
25187   case EHPersonality::MSVC_CXX: return 16;
25188   default: break;
25189   }
25190   report_fatal_error(
25191       "can only recover FP for 32-bit MSVC EH personality functions");
25192 }
25193 
25194 /// When the MSVC runtime transfers control to us, either to an outlined
25195 /// function or when returning to a parent frame after catching an exception, we
25196 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
25197 /// Here's the math:
25198 ///   RegNodeBase = EntryEBP - RegNodeSize
25199 ///   ParentFP = RegNodeBase - ParentFrameOffset
25200 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
25201 /// subtracting the offset (negative on x86) takes us back to the parent FP.
25202 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
25203                                    SDValue EntryEBP) {
25204   MachineFunction &MF = DAG.getMachineFunction();
25205   SDLoc dl;
25206 
25207   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25208   MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
25209 
25210   // It's possible that the parent function no longer has a personality function
25211   // if the exceptional code was optimized away, in which case we just return
25212   // the incoming EBP.
25213   if (!Fn->hasPersonalityFn())
25214     return EntryEBP;
25215 
25216   // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
25217   // registration, or the .set_setframe offset.
25218   MCSymbol *OffsetSym =
25219       MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
25220           GlobalValue::dropLLVMManglingEscape(Fn->getName()));
25221   SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
25222   SDValue ParentFrameOffset =
25223       DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
25224 
25225   // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
25226   // prologue to RBP in the parent function.
25227   const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
25228   if (Subtarget.is64Bit())
25229     return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
25230 
25231   int RegNodeSize = getSEHRegistrationNodeSize(Fn);
25232   // RegNodeBase = EntryEBP - RegNodeSize
25233   // ParentFP = RegNodeBase - ParentFrameOffset
25234   SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
25235                                     DAG.getConstant(RegNodeSize, dl, PtrVT));
25236   return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
25237 }
25238 
25239 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
25240                                                    SelectionDAG &DAG) const {
25241   // Helper to detect if the operand is CUR_DIRECTION rounding mode.
25242   auto isRoundModeCurDirection = [](SDValue Rnd) {
25243     if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
25244       return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
25245 
25246     return false;
25247   };
25248   auto isRoundModeSAE = [](SDValue Rnd) {
25249     if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
25250       unsigned RC = C->getZExtValue();
25251       if (RC & X86::STATIC_ROUNDING::NO_EXC) {
25252         // Clear the NO_EXC bit and check remaining bits.
25253         RC ^= X86::STATIC_ROUNDING::NO_EXC;
25254         // As a convenience we allow no other bits or explicitly
25255         // current direction.
25256         return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
25257       }
25258     }
25259 
25260     return false;
25261   };
25262   auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
25263     if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
25264       RC = C->getZExtValue();
25265       if (RC & X86::STATIC_ROUNDING::NO_EXC) {
25266         // Clear the NO_EXC bit and check remaining bits.
25267         RC ^= X86::STATIC_ROUNDING::NO_EXC;
25268         return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
25269                RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
25270                RC == X86::STATIC_ROUNDING::TO_POS_INF ||
25271                RC == X86::STATIC_ROUNDING::TO_ZERO;
25272       }
25273     }
25274 
25275     return false;
25276   };
25277 
25278   SDLoc dl(Op);
25279   unsigned IntNo = Op.getConstantOperandVal(0);
25280   MVT VT = Op.getSimpleValueType();
25281   const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
25282 
25283   // Propagate flags from original node to transformed node(s).
25284   SelectionDAG::FlagInserter FlagsInserter(DAG, Op->getFlags());
25285 
25286   if (IntrData) {
25287     switch(IntrData->Type) {
25288     case INTR_TYPE_1OP: {
25289       // We specify 2 possible opcodes for intrinsics with rounding modes.
25290       // First, we check if the intrinsic may have non-default rounding mode,
25291       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
25292       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25293       if (IntrWithRoundingModeOpcode != 0) {
25294         SDValue Rnd = Op.getOperand(2);
25295         unsigned RC = 0;
25296         if (isRoundModeSAEToX(Rnd, RC))
25297           return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
25298                              Op.getOperand(1),
25299                              DAG.getTargetConstant(RC, dl, MVT::i32));
25300         if (!isRoundModeCurDirection(Rnd))
25301           return SDValue();
25302       }
25303       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25304                          Op.getOperand(1));
25305     }
25306     case INTR_TYPE_1OP_SAE: {
25307       SDValue Sae = Op.getOperand(2);
25308 
25309       unsigned Opc;
25310       if (isRoundModeCurDirection(Sae))
25311         Opc = IntrData->Opc0;
25312       else if (isRoundModeSAE(Sae))
25313         Opc = IntrData->Opc1;
25314       else
25315         return SDValue();
25316 
25317       return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
25318     }
25319     case INTR_TYPE_2OP: {
25320       SDValue Src2 = Op.getOperand(2);
25321 
25322       // We specify 2 possible opcodes for intrinsics with rounding modes.
25323       // First, we check if the intrinsic may have non-default rounding mode,
25324       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
25325       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25326       if (IntrWithRoundingModeOpcode != 0) {
25327         SDValue Rnd = Op.getOperand(3);
25328         unsigned RC = 0;
25329         if (isRoundModeSAEToX(Rnd, RC))
25330           return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
25331                              Op.getOperand(1), Src2,
25332                              DAG.getTargetConstant(RC, dl, MVT::i32));
25333         if (!isRoundModeCurDirection(Rnd))
25334           return SDValue();
25335       }
25336 
25337       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25338                          Op.getOperand(1), Src2);
25339     }
25340     case INTR_TYPE_2OP_SAE: {
25341       SDValue Sae = Op.getOperand(3);
25342 
25343       unsigned Opc;
25344       if (isRoundModeCurDirection(Sae))
25345         Opc = IntrData->Opc0;
25346       else if (isRoundModeSAE(Sae))
25347         Opc = IntrData->Opc1;
25348       else
25349         return SDValue();
25350 
25351       return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
25352                          Op.getOperand(2));
25353     }
25354     case INTR_TYPE_3OP:
25355     case INTR_TYPE_3OP_IMM8: {
25356       SDValue Src1 = Op.getOperand(1);
25357       SDValue Src2 = Op.getOperand(2);
25358       SDValue Src3 = Op.getOperand(3);
25359 
25360       if (IntrData->Type == INTR_TYPE_3OP_IMM8 &&
25361           Src3.getValueType() != MVT::i8) {
25362         Src3 = DAG.getTargetConstant(
25363             cast<ConstantSDNode>(Src3)->getZExtValue() & 0xff, dl, MVT::i8);
25364       }
25365 
25366       // We specify 2 possible opcodes for intrinsics with rounding modes.
25367       // First, we check if the intrinsic may have non-default rounding mode,
25368       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
25369       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25370       if (IntrWithRoundingModeOpcode != 0) {
25371         SDValue Rnd = Op.getOperand(4);
25372         unsigned RC = 0;
25373         if (isRoundModeSAEToX(Rnd, RC))
25374           return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
25375                              Src1, Src2, Src3,
25376                              DAG.getTargetConstant(RC, dl, MVT::i32));
25377         if (!isRoundModeCurDirection(Rnd))
25378           return SDValue();
25379       }
25380 
25381       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25382                          {Src1, Src2, Src3});
25383     }
25384     case INTR_TYPE_4OP_IMM8: {
25385       assert(Op.getOperand(4)->getOpcode() == ISD::TargetConstant);
25386       SDValue Src4 = Op.getOperand(4);
25387       if (Src4.getValueType() != MVT::i8) {
25388         Src4 = DAG.getTargetConstant(
25389             cast<ConstantSDNode>(Src4)->getZExtValue() & 0xff, dl, MVT::i8);
25390       }
25391 
25392       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25393                          Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
25394                          Src4);
25395     }
25396     case INTR_TYPE_1OP_MASK: {
25397       SDValue Src = Op.getOperand(1);
25398       SDValue PassThru = Op.getOperand(2);
25399       SDValue Mask = Op.getOperand(3);
25400       // We add rounding mode to the Node when
25401       //   - RC Opcode is specified and
25402       //   - RC is not "current direction".
25403       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25404       if (IntrWithRoundingModeOpcode != 0) {
25405         SDValue Rnd = Op.getOperand(4);
25406         unsigned RC = 0;
25407         if (isRoundModeSAEToX(Rnd, RC))
25408           return getVectorMaskingNode(
25409               DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
25410                           Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
25411               Mask, PassThru, Subtarget, DAG);
25412         if (!isRoundModeCurDirection(Rnd))
25413           return SDValue();
25414       }
25415       return getVectorMaskingNode(
25416           DAG.getNode(IntrData->Opc0, dl, VT, Src), Mask, PassThru,
25417           Subtarget, DAG);
25418     }
25419     case INTR_TYPE_1OP_MASK_SAE: {
25420       SDValue Src = Op.getOperand(1);
25421       SDValue PassThru = Op.getOperand(2);
25422       SDValue Mask = Op.getOperand(3);
25423       SDValue Rnd = Op.getOperand(4);
25424 
25425       unsigned Opc;
25426       if (isRoundModeCurDirection(Rnd))
25427         Opc = IntrData->Opc0;
25428       else if (isRoundModeSAE(Rnd))
25429         Opc = IntrData->Opc1;
25430       else
25431         return SDValue();
25432 
25433       return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src), Mask, PassThru,
25434                                   Subtarget, DAG);
25435     }
25436     case INTR_TYPE_SCALAR_MASK: {
25437       SDValue Src1 = Op.getOperand(1);
25438       SDValue Src2 = Op.getOperand(2);
25439       SDValue passThru = Op.getOperand(3);
25440       SDValue Mask = Op.getOperand(4);
25441       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25442       // There are 2 kinds of intrinsics in this group:
25443       // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
25444       // (2) With rounding mode and sae - 7 operands.
25445       bool HasRounding = IntrWithRoundingModeOpcode != 0;
25446       if (Op.getNumOperands() == (5U + HasRounding)) {
25447         if (HasRounding) {
25448           SDValue Rnd = Op.getOperand(5);
25449           unsigned RC = 0;
25450           if (isRoundModeSAEToX(Rnd, RC))
25451             return getScalarMaskingNode(
25452                 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
25453                             DAG.getTargetConstant(RC, dl, MVT::i32)),
25454                 Mask, passThru, Subtarget, DAG);
25455           if (!isRoundModeCurDirection(Rnd))
25456             return SDValue();
25457         }
25458         return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
25459                                                 Src2),
25460                                     Mask, passThru, Subtarget, DAG);
25461       }
25462 
25463       assert(Op.getNumOperands() == (6U + HasRounding) &&
25464              "Unexpected intrinsic form");
25465       SDValue RoundingMode = Op.getOperand(5);
25466       unsigned Opc = IntrData->Opc0;
25467       if (HasRounding) {
25468         SDValue Sae = Op.getOperand(6);
25469         if (isRoundModeSAE(Sae))
25470           Opc = IntrWithRoundingModeOpcode;
25471         else if (!isRoundModeCurDirection(Sae))
25472           return SDValue();
25473       }
25474       return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
25475                                               Src2, RoundingMode),
25476                                   Mask, passThru, Subtarget, DAG);
25477     }
25478     case INTR_TYPE_SCALAR_MASK_RND: {
25479       SDValue Src1 = Op.getOperand(1);
25480       SDValue Src2 = Op.getOperand(2);
25481       SDValue passThru = Op.getOperand(3);
25482       SDValue Mask = Op.getOperand(4);
25483       SDValue Rnd = Op.getOperand(5);
25484 
25485       SDValue NewOp;
25486       unsigned RC = 0;
25487       if (isRoundModeCurDirection(Rnd))
25488         NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
25489       else if (isRoundModeSAEToX(Rnd, RC))
25490         NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
25491                             DAG.getTargetConstant(RC, dl, MVT::i32));
25492       else
25493         return SDValue();
25494 
25495       return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
25496     }
25497     case INTR_TYPE_SCALAR_MASK_SAE: {
25498       SDValue Src1 = Op.getOperand(1);
25499       SDValue Src2 = Op.getOperand(2);
25500       SDValue passThru = Op.getOperand(3);
25501       SDValue Mask = Op.getOperand(4);
25502       SDValue Sae = Op.getOperand(5);
25503       unsigned Opc;
25504       if (isRoundModeCurDirection(Sae))
25505         Opc = IntrData->Opc0;
25506       else if (isRoundModeSAE(Sae))
25507         Opc = IntrData->Opc1;
25508       else
25509         return SDValue();
25510 
25511       return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
25512                                   Mask, passThru, Subtarget, DAG);
25513     }
25514     case INTR_TYPE_2OP_MASK: {
25515       SDValue Src1 = Op.getOperand(1);
25516       SDValue Src2 = Op.getOperand(2);
25517       SDValue PassThru = Op.getOperand(3);
25518       SDValue Mask = Op.getOperand(4);
25519       SDValue NewOp;
25520       if (IntrData->Opc1 != 0) {
25521         SDValue Rnd = Op.getOperand(5);
25522         unsigned RC = 0;
25523         if (isRoundModeSAEToX(Rnd, RC))
25524           NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
25525                               DAG.getTargetConstant(RC, dl, MVT::i32));
25526         else if (!isRoundModeCurDirection(Rnd))
25527           return SDValue();
25528       }
25529       if (!NewOp)
25530         NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
25531       return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
25532     }
25533     case INTR_TYPE_2OP_MASK_SAE: {
25534       SDValue Src1 = Op.getOperand(1);
25535       SDValue Src2 = Op.getOperand(2);
25536       SDValue PassThru = Op.getOperand(3);
25537       SDValue Mask = Op.getOperand(4);
25538 
25539       unsigned Opc = IntrData->Opc0;
25540       if (IntrData->Opc1 != 0) {
25541         SDValue Sae = Op.getOperand(5);
25542         if (isRoundModeSAE(Sae))
25543           Opc = IntrData->Opc1;
25544         else if (!isRoundModeCurDirection(Sae))
25545           return SDValue();
25546       }
25547 
25548       return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
25549                                   Mask, PassThru, Subtarget, DAG);
25550     }
25551     case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
25552       SDValue Src1 = Op.getOperand(1);
25553       SDValue Src2 = Op.getOperand(2);
25554       SDValue Src3 = Op.getOperand(3);
25555       SDValue PassThru = Op.getOperand(4);
25556       SDValue Mask = Op.getOperand(5);
25557       SDValue Sae = Op.getOperand(6);
25558       unsigned Opc;
25559       if (isRoundModeCurDirection(Sae))
25560         Opc = IntrData->Opc0;
25561       else if (isRoundModeSAE(Sae))
25562         Opc = IntrData->Opc1;
25563       else
25564         return SDValue();
25565 
25566       return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
25567                                   Mask, PassThru, Subtarget, DAG);
25568     }
25569     case INTR_TYPE_3OP_MASK_SAE: {
25570       SDValue Src1 = Op.getOperand(1);
25571       SDValue Src2 = Op.getOperand(2);
25572       SDValue Src3 = Op.getOperand(3);
25573       SDValue PassThru = Op.getOperand(4);
25574       SDValue Mask = Op.getOperand(5);
25575 
25576       unsigned Opc = IntrData->Opc0;
25577       if (IntrData->Opc1 != 0) {
25578         SDValue Sae = Op.getOperand(6);
25579         if (isRoundModeSAE(Sae))
25580           Opc = IntrData->Opc1;
25581         else if (!isRoundModeCurDirection(Sae))
25582           return SDValue();
25583       }
25584       return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
25585                                   Mask, PassThru, Subtarget, DAG);
25586     }
25587     case BLENDV: {
25588       SDValue Src1 = Op.getOperand(1);
25589       SDValue Src2 = Op.getOperand(2);
25590       SDValue Src3 = Op.getOperand(3);
25591 
25592       EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
25593       Src3 = DAG.getBitcast(MaskVT, Src3);
25594 
25595       // Reverse the operands to match VSELECT order.
25596       return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
25597     }
25598     case VPERM_2OP : {
25599       SDValue Src1 = Op.getOperand(1);
25600       SDValue Src2 = Op.getOperand(2);
25601 
25602       // Swap Src1 and Src2 in the node creation
25603       return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
25604     }
25605     case CFMA_OP_MASKZ:
25606     case CFMA_OP_MASK: {
25607       SDValue Src1 = Op.getOperand(1);
25608       SDValue Src2 = Op.getOperand(2);
25609       SDValue Src3 = Op.getOperand(3);
25610       SDValue Mask = Op.getOperand(4);
25611       MVT VT = Op.getSimpleValueType();
25612 
25613       SDValue PassThru = Src3;
25614       if (IntrData->Type == CFMA_OP_MASKZ)
25615         PassThru = getZeroVector(VT, Subtarget, DAG, dl);
25616 
25617       // We add rounding mode to the Node when
25618       //   - RC Opcode is specified and
25619       //   - RC is not "current direction".
25620       SDValue NewOp;
25621       if (IntrData->Opc1 != 0) {
25622         SDValue Rnd = Op.getOperand(5);
25623         unsigned RC = 0;
25624         if (isRoundModeSAEToX(Rnd, RC))
25625           NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2, Src3,
25626                               DAG.getTargetConstant(RC, dl, MVT::i32));
25627         else if (!isRoundModeCurDirection(Rnd))
25628           return SDValue();
25629       }
25630       if (!NewOp)
25631         NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2, Src3);
25632       return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
25633     }
25634     case IFMA_OP:
25635       // NOTE: We need to swizzle the operands to pass the multiply operands
25636       // first.
25637       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25638                          Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
25639     case FPCLASSS: {
25640       SDValue Src1 = Op.getOperand(1);
25641       SDValue Imm = Op.getOperand(2);
25642       SDValue Mask = Op.getOperand(3);
25643       SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
25644       SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
25645                                                  Subtarget, DAG);
25646       // Need to fill with zeros to ensure the bitcast will produce zeroes
25647       // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
25648       SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
25649                                 DAG.getConstant(0, dl, MVT::v8i1),
25650                                 FPclassMask, DAG.getIntPtrConstant(0, dl));
25651       return DAG.getBitcast(MVT::i8, Ins);
25652     }
25653 
25654     case CMP_MASK_CC: {
25655       MVT MaskVT = Op.getSimpleValueType();
25656       SDValue CC = Op.getOperand(3);
25657       SDValue Mask = Op.getOperand(4);
25658       // We specify 2 possible opcodes for intrinsics with rounding modes.
25659       // First, we check if the intrinsic may have non-default rounding mode,
25660       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
25661       if (IntrData->Opc1 != 0) {
25662         SDValue Sae = Op.getOperand(5);
25663         if (isRoundModeSAE(Sae))
25664           return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
25665                              Op.getOperand(2), CC, Mask, Sae);
25666         if (!isRoundModeCurDirection(Sae))
25667           return SDValue();
25668       }
25669       //default rounding mode
25670       return DAG.getNode(IntrData->Opc0, dl, MaskVT,
25671                          {Op.getOperand(1), Op.getOperand(2), CC, Mask});
25672     }
25673     case CMP_MASK_SCALAR_CC: {
25674       SDValue Src1 = Op.getOperand(1);
25675       SDValue Src2 = Op.getOperand(2);
25676       SDValue CC = Op.getOperand(3);
25677       SDValue Mask = Op.getOperand(4);
25678 
25679       SDValue Cmp;
25680       if (IntrData->Opc1 != 0) {
25681         SDValue Sae = Op.getOperand(5);
25682         if (isRoundModeSAE(Sae))
25683           Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
25684         else if (!isRoundModeCurDirection(Sae))
25685           return SDValue();
25686       }
25687       //default rounding mode
25688       if (!Cmp.getNode())
25689         Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
25690 
25691       SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
25692                                              Subtarget, DAG);
25693       // Need to fill with zeros to ensure the bitcast will produce zeroes
25694       // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
25695       SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
25696                                 DAG.getConstant(0, dl, MVT::v8i1),
25697                                 CmpMask, DAG.getIntPtrConstant(0, dl));
25698       return DAG.getBitcast(MVT::i8, Ins);
25699     }
25700     case COMI: { // Comparison intrinsics
25701       ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
25702       SDValue LHS = Op.getOperand(1);
25703       SDValue RHS = Op.getOperand(2);
25704       // Some conditions require the operands to be swapped.
25705       if (CC == ISD::SETLT || CC == ISD::SETLE)
25706         std::swap(LHS, RHS);
25707 
25708       SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
25709       SDValue SetCC;
25710       switch (CC) {
25711       case ISD::SETEQ: { // (ZF = 0 and PF = 0)
25712         SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
25713         SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
25714         SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
25715         break;
25716       }
25717       case ISD::SETNE: { // (ZF = 1 or PF = 1)
25718         SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
25719         SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
25720         SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
25721         break;
25722       }
25723       case ISD::SETGT: // (CF = 0 and ZF = 0)
25724       case ISD::SETLT: { // Condition opposite to GT. Operands swapped above.
25725         SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
25726         break;
25727       }
25728       case ISD::SETGE: // CF = 0
25729       case ISD::SETLE: // Condition opposite to GE. Operands swapped above.
25730         SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
25731         break;
25732       default:
25733         llvm_unreachable("Unexpected illegal condition!");
25734       }
25735       return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
25736     }
25737     case COMI_RM: { // Comparison intrinsics with Sae
25738       SDValue LHS = Op.getOperand(1);
25739       SDValue RHS = Op.getOperand(2);
25740       unsigned CondVal = Op.getConstantOperandVal(3);
25741       SDValue Sae = Op.getOperand(4);
25742 
25743       SDValue FCmp;
25744       if (isRoundModeCurDirection(Sae))
25745         FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
25746                            DAG.getTargetConstant(CondVal, dl, MVT::i8));
25747       else if (isRoundModeSAE(Sae))
25748         FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
25749                            DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
25750       else
25751         return SDValue();
25752       // Need to fill with zeros to ensure the bitcast will produce zeroes
25753       // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
25754       SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
25755                                 DAG.getConstant(0, dl, MVT::v16i1),
25756                                 FCmp, DAG.getIntPtrConstant(0, dl));
25757       return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
25758                          DAG.getBitcast(MVT::i16, Ins));
25759     }
25760     case VSHIFT: {
25761       SDValue SrcOp = Op.getOperand(1);
25762       SDValue ShAmt = Op.getOperand(2);
25763       assert(ShAmt.getValueType() == MVT::i32 &&
25764              "Unexpected VSHIFT amount type");
25765 
25766       // Catch shift-by-constant.
25767       if (auto *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
25768         return getTargetVShiftByConstNode(IntrData->Opc0, dl,
25769                                           Op.getSimpleValueType(), SrcOp,
25770                                           CShAmt->getZExtValue(), DAG);
25771 
25772       ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
25773       return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
25774                                  SrcOp, ShAmt, 0, Subtarget, DAG);
25775     }
25776     case COMPRESS_EXPAND_IN_REG: {
25777       SDValue Mask = Op.getOperand(3);
25778       SDValue DataToCompress = Op.getOperand(1);
25779       SDValue PassThru = Op.getOperand(2);
25780       if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
25781         return Op.getOperand(1);
25782 
25783       // Avoid false dependency.
25784       if (PassThru.isUndef())
25785         PassThru = getZeroVector(VT, Subtarget, DAG, dl);
25786 
25787       return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
25788                          Mask);
25789     }
25790     case FIXUPIMM:
25791     case FIXUPIMM_MASKZ: {
25792       SDValue Src1 = Op.getOperand(1);
25793       SDValue Src2 = Op.getOperand(2);
25794       SDValue Src3 = Op.getOperand(3);
25795       SDValue Imm = Op.getOperand(4);
25796       SDValue Mask = Op.getOperand(5);
25797       SDValue Passthru = (IntrData->Type == FIXUPIMM)
25798                              ? Src1
25799                              : getZeroVector(VT, Subtarget, DAG, dl);
25800 
25801       unsigned Opc = IntrData->Opc0;
25802       if (IntrData->Opc1 != 0) {
25803         SDValue Sae = Op.getOperand(6);
25804         if (isRoundModeSAE(Sae))
25805           Opc = IntrData->Opc1;
25806         else if (!isRoundModeCurDirection(Sae))
25807           return SDValue();
25808       }
25809 
25810       SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
25811 
25812       if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
25813         return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
25814 
25815       return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
25816     }
25817     case ROUNDP: {
25818       assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
25819       // Clear the upper bits of the rounding immediate so that the legacy
25820       // intrinsic can't trigger the scaling behavior of VRNDSCALE.
25821       auto Round = cast<ConstantSDNode>(Op.getOperand(2));
25822       SDValue RoundingMode =
25823           DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
25824       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25825                          Op.getOperand(1), RoundingMode);
25826     }
25827     case ROUNDS: {
25828       assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
25829       // Clear the upper bits of the rounding immediate so that the legacy
25830       // intrinsic can't trigger the scaling behavior of VRNDSCALE.
25831       auto Round = cast<ConstantSDNode>(Op.getOperand(3));
25832       SDValue RoundingMode =
25833           DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
25834       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25835                          Op.getOperand(1), Op.getOperand(2), RoundingMode);
25836     }
25837     case BEXTRI: {
25838       assert(IntrData->Opc0 == X86ISD::BEXTRI && "Unexpected opcode");
25839 
25840       uint64_t Imm = Op.getConstantOperandVal(2);
25841       SDValue Control = DAG.getTargetConstant(Imm & 0xffff, dl,
25842                                               Op.getValueType());
25843       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25844                          Op.getOperand(1), Control);
25845     }
25846     // ADC/SBB
25847     case ADX: {
25848       SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
25849       SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
25850 
25851       SDValue Res;
25852       // If the carry in is zero, then we should just use ADD/SUB instead of
25853       // ADC/SBB.
25854       if (isNullConstant(Op.getOperand(1))) {
25855         Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
25856                           Op.getOperand(3));
25857       } else {
25858         SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
25859                                     DAG.getConstant(-1, dl, MVT::i8));
25860         Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
25861                           Op.getOperand(3), GenCF.getValue(1));
25862       }
25863       SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
25864       SDValue Results[] = { SetCC, Res };
25865       return DAG.getMergeValues(Results, dl);
25866     }
25867     case CVTPD2PS_MASK:
25868     case CVTPD2DQ_MASK:
25869     case CVTQQ2PS_MASK:
25870     case TRUNCATE_TO_REG: {
25871       SDValue Src = Op.getOperand(1);
25872       SDValue PassThru = Op.getOperand(2);
25873       SDValue Mask = Op.getOperand(3);
25874 
25875       if (isAllOnesConstant(Mask))
25876         return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
25877 
25878       MVT SrcVT = Src.getSimpleValueType();
25879       MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
25880       Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25881       return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(),
25882                          {Src, PassThru, Mask});
25883     }
25884     case CVTPS2PH_MASK: {
25885       SDValue Src = Op.getOperand(1);
25886       SDValue Rnd = Op.getOperand(2);
25887       SDValue PassThru = Op.getOperand(3);
25888       SDValue Mask = Op.getOperand(4);
25889 
25890       unsigned RC = 0;
25891       unsigned Opc = IntrData->Opc0;
25892       bool SAE = Src.getValueType().is512BitVector() &&
25893                  (isRoundModeSAEToX(Rnd, RC) || isRoundModeSAE(Rnd));
25894       if (SAE) {
25895         Opc = X86ISD::CVTPS2PH_SAE;
25896         Rnd = DAG.getTargetConstant(RC, dl, MVT::i32);
25897       }
25898 
25899       if (isAllOnesConstant(Mask))
25900         return DAG.getNode(Opc, dl, Op.getValueType(), Src, Rnd);
25901 
25902       if (SAE)
25903         Opc = X86ISD::MCVTPS2PH_SAE;
25904       else
25905         Opc = IntrData->Opc1;
25906       MVT SrcVT = Src.getSimpleValueType();
25907       MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
25908       Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25909       return DAG.getNode(Opc, dl, Op.getValueType(), Src, Rnd, PassThru, Mask);
25910     }
25911     case CVTNEPS2BF16_MASK: {
25912       SDValue Src = Op.getOperand(1);
25913       SDValue PassThru = Op.getOperand(2);
25914       SDValue Mask = Op.getOperand(3);
25915 
25916       if (ISD::isBuildVectorAllOnes(Mask.getNode()))
25917         return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
25918 
25919       // Break false dependency.
25920       if (PassThru.isUndef())
25921         PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
25922 
25923       return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
25924                          Mask);
25925     }
25926     default:
25927       break;
25928     }
25929   }
25930 
25931   switch (IntNo) {
25932   default: return SDValue();    // Don't custom lower most intrinsics.
25933 
25934   // ptest and testp intrinsics. The intrinsic these come from are designed to
25935   // return an integer value, not just an instruction so lower it to the ptest
25936   // or testp pattern and a setcc for the result.
25937   case Intrinsic::x86_avx512_ktestc_b:
25938   case Intrinsic::x86_avx512_ktestc_w:
25939   case Intrinsic::x86_avx512_ktestc_d:
25940   case Intrinsic::x86_avx512_ktestc_q:
25941   case Intrinsic::x86_avx512_ktestz_b:
25942   case Intrinsic::x86_avx512_ktestz_w:
25943   case Intrinsic::x86_avx512_ktestz_d:
25944   case Intrinsic::x86_avx512_ktestz_q:
25945   case Intrinsic::x86_sse41_ptestz:
25946   case Intrinsic::x86_sse41_ptestc:
25947   case Intrinsic::x86_sse41_ptestnzc:
25948   case Intrinsic::x86_avx_ptestz_256:
25949   case Intrinsic::x86_avx_ptestc_256:
25950   case Intrinsic::x86_avx_ptestnzc_256:
25951   case Intrinsic::x86_avx_vtestz_ps:
25952   case Intrinsic::x86_avx_vtestc_ps:
25953   case Intrinsic::x86_avx_vtestnzc_ps:
25954   case Intrinsic::x86_avx_vtestz_pd:
25955   case Intrinsic::x86_avx_vtestc_pd:
25956   case Intrinsic::x86_avx_vtestnzc_pd:
25957   case Intrinsic::x86_avx_vtestz_ps_256:
25958   case Intrinsic::x86_avx_vtestc_ps_256:
25959   case Intrinsic::x86_avx_vtestnzc_ps_256:
25960   case Intrinsic::x86_avx_vtestz_pd_256:
25961   case Intrinsic::x86_avx_vtestc_pd_256:
25962   case Intrinsic::x86_avx_vtestnzc_pd_256: {
25963     unsigned TestOpc = X86ISD::PTEST;
25964     X86::CondCode X86CC;
25965     switch (IntNo) {
25966     default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
25967     case Intrinsic::x86_avx512_ktestc_b:
25968     case Intrinsic::x86_avx512_ktestc_w:
25969     case Intrinsic::x86_avx512_ktestc_d:
25970     case Intrinsic::x86_avx512_ktestc_q:
25971       // CF = 1
25972       TestOpc = X86ISD::KTEST;
25973       X86CC = X86::COND_B;
25974       break;
25975     case Intrinsic::x86_avx512_ktestz_b:
25976     case Intrinsic::x86_avx512_ktestz_w:
25977     case Intrinsic::x86_avx512_ktestz_d:
25978     case Intrinsic::x86_avx512_ktestz_q:
25979       TestOpc = X86ISD::KTEST;
25980       X86CC = X86::COND_E;
25981       break;
25982     case Intrinsic::x86_avx_vtestz_ps:
25983     case Intrinsic::x86_avx_vtestz_pd:
25984     case Intrinsic::x86_avx_vtestz_ps_256:
25985     case Intrinsic::x86_avx_vtestz_pd_256:
25986       TestOpc = X86ISD::TESTP;
25987       [[fallthrough]];
25988     case Intrinsic::x86_sse41_ptestz:
25989     case Intrinsic::x86_avx_ptestz_256:
25990       // ZF = 1
25991       X86CC = X86::COND_E;
25992       break;
25993     case Intrinsic::x86_avx_vtestc_ps:
25994     case Intrinsic::x86_avx_vtestc_pd:
25995     case Intrinsic::x86_avx_vtestc_ps_256:
25996     case Intrinsic::x86_avx_vtestc_pd_256:
25997       TestOpc = X86ISD::TESTP;
25998       [[fallthrough]];
25999     case Intrinsic::x86_sse41_ptestc:
26000     case Intrinsic::x86_avx_ptestc_256:
26001       // CF = 1
26002       X86CC = X86::COND_B;
26003       break;
26004     case Intrinsic::x86_avx_vtestnzc_ps:
26005     case Intrinsic::x86_avx_vtestnzc_pd:
26006     case Intrinsic::x86_avx_vtestnzc_ps_256:
26007     case Intrinsic::x86_avx_vtestnzc_pd_256:
26008       TestOpc = X86ISD::TESTP;
26009       [[fallthrough]];
26010     case Intrinsic::x86_sse41_ptestnzc:
26011     case Intrinsic::x86_avx_ptestnzc_256:
26012       // ZF and CF = 0
26013       X86CC = X86::COND_A;
26014       break;
26015     }
26016 
26017     SDValue LHS = Op.getOperand(1);
26018     SDValue RHS = Op.getOperand(2);
26019     SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
26020     SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
26021     return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
26022   }
26023 
26024   case Intrinsic::x86_sse42_pcmpistria128:
26025   case Intrinsic::x86_sse42_pcmpestria128:
26026   case Intrinsic::x86_sse42_pcmpistric128:
26027   case Intrinsic::x86_sse42_pcmpestric128:
26028   case Intrinsic::x86_sse42_pcmpistrio128:
26029   case Intrinsic::x86_sse42_pcmpestrio128:
26030   case Intrinsic::x86_sse42_pcmpistris128:
26031   case Intrinsic::x86_sse42_pcmpestris128:
26032   case Intrinsic::x86_sse42_pcmpistriz128:
26033   case Intrinsic::x86_sse42_pcmpestriz128: {
26034     unsigned Opcode;
26035     X86::CondCode X86CC;
26036     switch (IntNo) {
26037     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
26038     case Intrinsic::x86_sse42_pcmpistria128:
26039       Opcode = X86ISD::PCMPISTR;
26040       X86CC = X86::COND_A;
26041       break;
26042     case Intrinsic::x86_sse42_pcmpestria128:
26043       Opcode = X86ISD::PCMPESTR;
26044       X86CC = X86::COND_A;
26045       break;
26046     case Intrinsic::x86_sse42_pcmpistric128:
26047       Opcode = X86ISD::PCMPISTR;
26048       X86CC = X86::COND_B;
26049       break;
26050     case Intrinsic::x86_sse42_pcmpestric128:
26051       Opcode = X86ISD::PCMPESTR;
26052       X86CC = X86::COND_B;
26053       break;
26054     case Intrinsic::x86_sse42_pcmpistrio128:
26055       Opcode = X86ISD::PCMPISTR;
26056       X86CC = X86::COND_O;
26057       break;
26058     case Intrinsic::x86_sse42_pcmpestrio128:
26059       Opcode = X86ISD::PCMPESTR;
26060       X86CC = X86::COND_O;
26061       break;
26062     case Intrinsic::x86_sse42_pcmpistris128:
26063       Opcode = X86ISD::PCMPISTR;
26064       X86CC = X86::COND_S;
26065       break;
26066     case Intrinsic::x86_sse42_pcmpestris128:
26067       Opcode = X86ISD::PCMPESTR;
26068       X86CC = X86::COND_S;
26069       break;
26070     case Intrinsic::x86_sse42_pcmpistriz128:
26071       Opcode = X86ISD::PCMPISTR;
26072       X86CC = X86::COND_E;
26073       break;
26074     case Intrinsic::x86_sse42_pcmpestriz128:
26075       Opcode = X86ISD::PCMPESTR;
26076       X86CC = X86::COND_E;
26077       break;
26078     }
26079     SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
26080     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
26081     SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
26082     SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
26083     return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
26084   }
26085 
26086   case Intrinsic::x86_sse42_pcmpistri128:
26087   case Intrinsic::x86_sse42_pcmpestri128: {
26088     unsigned Opcode;
26089     if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
26090       Opcode = X86ISD::PCMPISTR;
26091     else
26092       Opcode = X86ISD::PCMPESTR;
26093 
26094     SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
26095     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
26096     return DAG.getNode(Opcode, dl, VTs, NewOps);
26097   }
26098 
26099   case Intrinsic::x86_sse42_pcmpistrm128:
26100   case Intrinsic::x86_sse42_pcmpestrm128: {
26101     unsigned Opcode;
26102     if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
26103       Opcode = X86ISD::PCMPISTR;
26104     else
26105       Opcode = X86ISD::PCMPESTR;
26106 
26107     SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
26108     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
26109     return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
26110   }
26111 
26112   case Intrinsic::eh_sjlj_lsda: {
26113     MachineFunction &MF = DAG.getMachineFunction();
26114     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26115     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
26116     auto &Context = MF.getMMI().getContext();
26117     MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
26118                                             Twine(MF.getFunctionNumber()));
26119     return DAG.getNode(getGlobalWrapperKind(nullptr, /*OpFlags=*/0), dl, VT,
26120                        DAG.getMCSymbol(S, PtrVT));
26121   }
26122 
26123   case Intrinsic::x86_seh_lsda: {
26124     // Compute the symbol for the LSDA. We know it'll get emitted later.
26125     MachineFunction &MF = DAG.getMachineFunction();
26126     SDValue Op1 = Op.getOperand(1);
26127     auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
26128     MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
26129         GlobalValue::dropLLVMManglingEscape(Fn->getName()));
26130 
26131     // Generate a simple absolute symbol reference. This intrinsic is only
26132     // supported on 32-bit Windows, which isn't PIC.
26133     SDValue Result = DAG.getMCSymbol(LSDASym, VT);
26134     return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
26135   }
26136 
26137   case Intrinsic::eh_recoverfp: {
26138     SDValue FnOp = Op.getOperand(1);
26139     SDValue IncomingFPOp = Op.getOperand(2);
26140     GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
26141     auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
26142     if (!Fn)
26143       report_fatal_error(
26144           "llvm.eh.recoverfp must take a function as the first argument");
26145     return recoverFramePointer(DAG, Fn, IncomingFPOp);
26146   }
26147 
26148   case Intrinsic::localaddress: {
26149     // Returns one of the stack, base, or frame pointer registers, depending on
26150     // which is used to reference local variables.
26151     MachineFunction &MF = DAG.getMachineFunction();
26152     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
26153     unsigned Reg;
26154     if (RegInfo->hasBasePointer(MF))
26155       Reg = RegInfo->getBaseRegister();
26156     else { // Handles the SP or FP case.
26157       bool CantUseFP = RegInfo->hasStackRealignment(MF);
26158       if (CantUseFP)
26159         Reg = RegInfo->getPtrSizedStackRegister(MF);
26160       else
26161         Reg = RegInfo->getPtrSizedFrameRegister(MF);
26162     }
26163     return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
26164   }
26165   case Intrinsic::x86_avx512_vp2intersect_q_512:
26166   case Intrinsic::x86_avx512_vp2intersect_q_256:
26167   case Intrinsic::x86_avx512_vp2intersect_q_128:
26168   case Intrinsic::x86_avx512_vp2intersect_d_512:
26169   case Intrinsic::x86_avx512_vp2intersect_d_256:
26170   case Intrinsic::x86_avx512_vp2intersect_d_128: {
26171     MVT MaskVT = Op.getSimpleValueType();
26172 
26173     SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
26174     SDLoc DL(Op);
26175 
26176     SDValue Operation =
26177         DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
26178                     Op->getOperand(1), Op->getOperand(2));
26179 
26180     SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
26181                                                  MaskVT, Operation);
26182     SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
26183                                                  MaskVT, Operation);
26184     return DAG.getMergeValues({Result0, Result1}, DL);
26185   }
26186   case Intrinsic::x86_mmx_pslli_w:
26187   case Intrinsic::x86_mmx_pslli_d:
26188   case Intrinsic::x86_mmx_pslli_q:
26189   case Intrinsic::x86_mmx_psrli_w:
26190   case Intrinsic::x86_mmx_psrli_d:
26191   case Intrinsic::x86_mmx_psrli_q:
26192   case Intrinsic::x86_mmx_psrai_w:
26193   case Intrinsic::x86_mmx_psrai_d: {
26194     SDLoc DL(Op);
26195     SDValue ShAmt = Op.getOperand(2);
26196     // If the argument is a constant, convert it to a target constant.
26197     if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
26198       // Clamp out of bounds shift amounts since they will otherwise be masked
26199       // to 8-bits which may make it no longer out of bounds.
26200       unsigned ShiftAmount = C->getAPIntValue().getLimitedValue(255);
26201       if (ShiftAmount == 0)
26202         return Op.getOperand(1);
26203 
26204       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
26205                          Op.getOperand(0), Op.getOperand(1),
26206                          DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
26207     }
26208 
26209     unsigned NewIntrinsic;
26210     switch (IntNo) {
26211     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
26212     case Intrinsic::x86_mmx_pslli_w:
26213       NewIntrinsic = Intrinsic::x86_mmx_psll_w;
26214       break;
26215     case Intrinsic::x86_mmx_pslli_d:
26216       NewIntrinsic = Intrinsic::x86_mmx_psll_d;
26217       break;
26218     case Intrinsic::x86_mmx_pslli_q:
26219       NewIntrinsic = Intrinsic::x86_mmx_psll_q;
26220       break;
26221     case Intrinsic::x86_mmx_psrli_w:
26222       NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
26223       break;
26224     case Intrinsic::x86_mmx_psrli_d:
26225       NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
26226       break;
26227     case Intrinsic::x86_mmx_psrli_q:
26228       NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
26229       break;
26230     case Intrinsic::x86_mmx_psrai_w:
26231       NewIntrinsic = Intrinsic::x86_mmx_psra_w;
26232       break;
26233     case Intrinsic::x86_mmx_psrai_d:
26234       NewIntrinsic = Intrinsic::x86_mmx_psra_d;
26235       break;
26236     }
26237 
26238     // The vector shift intrinsics with scalars uses 32b shift amounts but
26239     // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
26240     // MMX register.
26241     ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
26242     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
26243                        DAG.getTargetConstant(NewIntrinsic, DL,
26244                                              getPointerTy(DAG.getDataLayout())),
26245                        Op.getOperand(1), ShAmt);
26246   }
26247   case Intrinsic::thread_pointer: {
26248     if (Subtarget.isTargetELF()) {
26249       SDLoc dl(Op);
26250       EVT PtrVT = getPointerTy(DAG.getDataLayout());
26251       // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
26252       Value *Ptr = Constant::getNullValue(PointerType::get(
26253           *DAG.getContext(), Subtarget.is64Bit() ? X86AS::FS : X86AS::GS));
26254       return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
26255                          DAG.getIntPtrConstant(0, dl), MachinePointerInfo(Ptr));
26256     }
26257     report_fatal_error(
26258         "Target OS doesn't support __builtin_thread_pointer() yet.");
26259   }
26260   }
26261 }
26262 
26263 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
26264                                  SDValue Src, SDValue Mask, SDValue Base,
26265                                  SDValue Index, SDValue ScaleOp, SDValue Chain,
26266                                  const X86Subtarget &Subtarget) {
26267   SDLoc dl(Op);
26268   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
26269   // Scale must be constant.
26270   if (!C)
26271     return SDValue();
26272   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26273   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
26274                                         TLI.getPointerTy(DAG.getDataLayout()));
26275   EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
26276   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
26277   // If source is undef or we know it won't be used, use a zero vector
26278   // to break register dependency.
26279   // TODO: use undef instead and let BreakFalseDeps deal with it?
26280   if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
26281     Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
26282 
26283   // Cast mask to an integer type.
26284   Mask = DAG.getBitcast(MaskVT, Mask);
26285 
26286   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26287 
26288   SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
26289   SDValue Res =
26290       DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
26291                               MemIntr->getMemoryVT(), MemIntr->getMemOperand());
26292   return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
26293 }
26294 
26295 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
26296                              SDValue Src, SDValue Mask, SDValue Base,
26297                              SDValue Index, SDValue ScaleOp, SDValue Chain,
26298                              const X86Subtarget &Subtarget) {
26299   MVT VT = Op.getSimpleValueType();
26300   SDLoc dl(Op);
26301   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
26302   // Scale must be constant.
26303   if (!C)
26304     return SDValue();
26305   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26306   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
26307                                         TLI.getPointerTy(DAG.getDataLayout()));
26308   unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
26309                               VT.getVectorNumElements());
26310   MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
26311 
26312   // We support two versions of the gather intrinsics. One with scalar mask and
26313   // one with vXi1 mask. Convert scalar to vXi1 if necessary.
26314   if (Mask.getValueType() != MaskVT)
26315     Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
26316 
26317   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
26318   // If source is undef or we know it won't be used, use a zero vector
26319   // to break register dependency.
26320   // TODO: use undef instead and let BreakFalseDeps deal with it?
26321   if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
26322     Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
26323 
26324   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26325 
26326   SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
26327   SDValue Res =
26328       DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
26329                               MemIntr->getMemoryVT(), MemIntr->getMemOperand());
26330   return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
26331 }
26332 
26333 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
26334                                SDValue Src, SDValue Mask, SDValue Base,
26335                                SDValue Index, SDValue ScaleOp, SDValue Chain,
26336                                const X86Subtarget &Subtarget) {
26337   SDLoc dl(Op);
26338   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
26339   // Scale must be constant.
26340   if (!C)
26341     return SDValue();
26342   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26343   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
26344                                         TLI.getPointerTy(DAG.getDataLayout()));
26345   unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
26346                               Src.getSimpleValueType().getVectorNumElements());
26347   MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
26348 
26349   // We support two versions of the scatter intrinsics. One with scalar mask and
26350   // one with vXi1 mask. Convert scalar to vXi1 if necessary.
26351   if (Mask.getValueType() != MaskVT)
26352     Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
26353 
26354   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26355 
26356   SDVTList VTs = DAG.getVTList(MVT::Other);
26357   SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
26358   SDValue Res =
26359       DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
26360                               MemIntr->getMemoryVT(), MemIntr->getMemOperand());
26361   return Res;
26362 }
26363 
26364 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
26365                                SDValue Mask, SDValue Base, SDValue Index,
26366                                SDValue ScaleOp, SDValue Chain,
26367                                const X86Subtarget &Subtarget) {
26368   SDLoc dl(Op);
26369   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
26370   // Scale must be constant.
26371   if (!C)
26372     return SDValue();
26373   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26374   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
26375                                         TLI.getPointerTy(DAG.getDataLayout()));
26376   SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
26377   SDValue Segment = DAG.getRegister(0, MVT::i32);
26378   MVT MaskVT =
26379     MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
26380   SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
26381   SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
26382   SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
26383   return SDValue(Res, 0);
26384 }
26385 
26386 /// Handles the lowering of builtin intrinsics with chain that return their
26387 /// value into registers EDX:EAX.
26388 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
26389 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
26390 /// TargetOpcode.
26391 /// Returns a Glue value which can be used to add extra copy-from-reg if the
26392 /// expanded intrinsics implicitly defines extra registers (i.e. not just
26393 /// EDX:EAX).
26394 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
26395                                         SelectionDAG &DAG,
26396                                         unsigned TargetOpcode,
26397                                         unsigned SrcReg,
26398                                         const X86Subtarget &Subtarget,
26399                                         SmallVectorImpl<SDValue> &Results) {
26400   SDValue Chain = N->getOperand(0);
26401   SDValue Glue;
26402 
26403   if (SrcReg) {
26404     assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
26405     Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
26406     Glue = Chain.getValue(1);
26407   }
26408 
26409   SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
26410   SDValue N1Ops[] = {Chain, Glue};
26411   SDNode *N1 = DAG.getMachineNode(
26412       TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
26413   Chain = SDValue(N1, 0);
26414 
26415   // Reads the content of XCR and returns it in registers EDX:EAX.
26416   SDValue LO, HI;
26417   if (Subtarget.is64Bit()) {
26418     LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
26419     HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
26420                             LO.getValue(2));
26421   } else {
26422     LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
26423     HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
26424                             LO.getValue(2));
26425   }
26426   Chain = HI.getValue(1);
26427   Glue = HI.getValue(2);
26428 
26429   if (Subtarget.is64Bit()) {
26430     // Merge the two 32-bit values into a 64-bit one.
26431     SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
26432                               DAG.getConstant(32, DL, MVT::i8));
26433     Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
26434     Results.push_back(Chain);
26435     return Glue;
26436   }
26437 
26438   // Use a buildpair to merge the two 32-bit values into a 64-bit one.
26439   SDValue Ops[] = { LO, HI };
26440   SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
26441   Results.push_back(Pair);
26442   Results.push_back(Chain);
26443   return Glue;
26444 }
26445 
26446 /// Handles the lowering of builtin intrinsics that read the time stamp counter
26447 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
26448 /// READCYCLECOUNTER nodes.
26449 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
26450                                     SelectionDAG &DAG,
26451                                     const X86Subtarget &Subtarget,
26452                                     SmallVectorImpl<SDValue> &Results) {
26453   // The processor's time-stamp counter (a 64-bit MSR) is stored into the
26454   // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
26455   // and the EAX register is loaded with the low-order 32 bits.
26456   SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
26457                                              /* NoRegister */0, Subtarget,
26458                                              Results);
26459   if (Opcode != X86::RDTSCP)
26460     return;
26461 
26462   SDValue Chain = Results[1];
26463   // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
26464   // the ECX register. Add 'ecx' explicitly to the chain.
26465   SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
26466   Results[1] = ecx;
26467   Results.push_back(ecx.getValue(1));
26468 }
26469 
26470 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
26471                                      SelectionDAG &DAG) {
26472   SmallVector<SDValue, 3> Results;
26473   SDLoc DL(Op);
26474   getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
26475                           Results);
26476   return DAG.getMergeValues(Results, DL);
26477 }
26478 
26479 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
26480   MachineFunction &MF = DAG.getMachineFunction();
26481   SDValue Chain = Op.getOperand(0);
26482   SDValue RegNode = Op.getOperand(2);
26483   WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
26484   if (!EHInfo)
26485     report_fatal_error("EH registrations only live in functions using WinEH");
26486 
26487   // Cast the operand to an alloca, and remember the frame index.
26488   auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
26489   if (!FINode)
26490     report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
26491   EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
26492 
26493   // Return the chain operand without making any DAG nodes.
26494   return Chain;
26495 }
26496 
26497 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
26498   MachineFunction &MF = DAG.getMachineFunction();
26499   SDValue Chain = Op.getOperand(0);
26500   SDValue EHGuard = Op.getOperand(2);
26501   WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
26502   if (!EHInfo)
26503     report_fatal_error("EHGuard only live in functions using WinEH");
26504 
26505   // Cast the operand to an alloca, and remember the frame index.
26506   auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
26507   if (!FINode)
26508     report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
26509   EHInfo->EHGuardFrameIndex = FINode->getIndex();
26510 
26511   // Return the chain operand without making any DAG nodes.
26512   return Chain;
26513 }
26514 
26515 /// Emit Truncating Store with signed or unsigned saturation.
26516 static SDValue
26517 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &DL, SDValue Val,
26518                 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
26519                 SelectionDAG &DAG) {
26520   SDVTList VTs = DAG.getVTList(MVT::Other);
26521   SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
26522   SDValue Ops[] = { Chain, Val, Ptr, Undef };
26523   unsigned Opc = SignedSat ? X86ISD::VTRUNCSTORES : X86ISD::VTRUNCSTOREUS;
26524   return DAG.getMemIntrinsicNode(Opc, DL, VTs, Ops, MemVT, MMO);
26525 }
26526 
26527 /// Emit Masked Truncating Store with signed or unsigned saturation.
26528 static SDValue EmitMaskedTruncSStore(bool SignedSat, SDValue Chain,
26529                                      const SDLoc &DL,
26530                       SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
26531                       MachineMemOperand *MMO, SelectionDAG &DAG) {
26532   SDVTList VTs = DAG.getVTList(MVT::Other);
26533   SDValue Ops[] = { Chain, Val, Ptr, Mask };
26534   unsigned Opc = SignedSat ? X86ISD::VMTRUNCSTORES : X86ISD::VMTRUNCSTOREUS;
26535   return DAG.getMemIntrinsicNode(Opc, DL, VTs, Ops, MemVT, MMO);
26536 }
26537 
26538 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
26539                                       SelectionDAG &DAG) {
26540   unsigned IntNo = Op.getConstantOperandVal(1);
26541   const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
26542   if (!IntrData) {
26543     switch (IntNo) {
26544 
26545     case Intrinsic::swift_async_context_addr: {
26546       SDLoc dl(Op);
26547       auto &MF = DAG.getMachineFunction();
26548       auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
26549       if (Subtarget.is64Bit()) {
26550         MF.getFrameInfo().setFrameAddressIsTaken(true);
26551         X86FI->setHasSwiftAsyncContext(true);
26552         SDValue Chain = Op->getOperand(0);
26553         SDValue CopyRBP = DAG.getCopyFromReg(Chain, dl, X86::RBP, MVT::i64);
26554         SDValue Result =
26555             SDValue(DAG.getMachineNode(X86::SUB64ri32, dl, MVT::i64, CopyRBP,
26556                                        DAG.getTargetConstant(8, dl, MVT::i32)),
26557                     0);
26558         // Return { result, chain }.
26559         return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
26560                            CopyRBP.getValue(1));
26561       } else {
26562         // 32-bit so no special extended frame, create or reuse an existing
26563         // stack slot.
26564         if (!X86FI->getSwiftAsyncContextFrameIdx())
26565           X86FI->setSwiftAsyncContextFrameIdx(
26566               MF.getFrameInfo().CreateStackObject(4, Align(4), false));
26567         SDValue Result =
26568             DAG.getFrameIndex(*X86FI->getSwiftAsyncContextFrameIdx(), MVT::i32);
26569         // Return { result, chain }.
26570         return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
26571                            Op->getOperand(0));
26572       }
26573     }
26574 
26575     case llvm::Intrinsic::x86_seh_ehregnode:
26576       return MarkEHRegistrationNode(Op, DAG);
26577     case llvm::Intrinsic::x86_seh_ehguard:
26578       return MarkEHGuard(Op, DAG);
26579     case llvm::Intrinsic::x86_rdpkru: {
26580       SDLoc dl(Op);
26581       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
26582       // Create a RDPKRU node and pass 0 to the ECX parameter.
26583       return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
26584                          DAG.getConstant(0, dl, MVT::i32));
26585     }
26586     case llvm::Intrinsic::x86_wrpkru: {
26587       SDLoc dl(Op);
26588       // Create a WRPKRU node, pass the input to the EAX parameter,  and pass 0
26589       // to the EDX and ECX parameters.
26590       return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
26591                          Op.getOperand(0), Op.getOperand(2),
26592                          DAG.getConstant(0, dl, MVT::i32),
26593                          DAG.getConstant(0, dl, MVT::i32));
26594     }
26595     case llvm::Intrinsic::asan_check_memaccess: {
26596       // Mark this as adjustsStack because it will be lowered to a call.
26597       DAG.getMachineFunction().getFrameInfo().setAdjustsStack(true);
26598       // Don't do anything here, we will expand these intrinsics out later.
26599       return Op;
26600     }
26601     case llvm::Intrinsic::x86_flags_read_u32:
26602     case llvm::Intrinsic::x86_flags_read_u64:
26603     case llvm::Intrinsic::x86_flags_write_u32:
26604     case llvm::Intrinsic::x86_flags_write_u64: {
26605       // We need a frame pointer because this will get lowered to a PUSH/POP
26606       // sequence.
26607       MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
26608       MFI.setHasCopyImplyingStackAdjustment(true);
26609       // Don't do anything here, we will expand these intrinsics out later
26610       // during FinalizeISel in EmitInstrWithCustomInserter.
26611       return Op;
26612     }
26613     case Intrinsic::x86_lwpins32:
26614     case Intrinsic::x86_lwpins64:
26615     case Intrinsic::x86_umwait:
26616     case Intrinsic::x86_tpause: {
26617       SDLoc dl(Op);
26618       SDValue Chain = Op->getOperand(0);
26619       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
26620       unsigned Opcode;
26621 
26622       switch (IntNo) {
26623       default: llvm_unreachable("Impossible intrinsic");
26624       case Intrinsic::x86_umwait:
26625         Opcode = X86ISD::UMWAIT;
26626         break;
26627       case Intrinsic::x86_tpause:
26628         Opcode = X86ISD::TPAUSE;
26629         break;
26630       case Intrinsic::x86_lwpins32:
26631       case Intrinsic::x86_lwpins64:
26632         Opcode = X86ISD::LWPINS;
26633         break;
26634       }
26635 
26636       SDValue Operation =
26637           DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
26638                       Op->getOperand(3), Op->getOperand(4));
26639       SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
26640       return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
26641                          Operation.getValue(1));
26642     }
26643     case Intrinsic::x86_enqcmd:
26644     case Intrinsic::x86_enqcmds: {
26645       SDLoc dl(Op);
26646       SDValue Chain = Op.getOperand(0);
26647       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
26648       unsigned Opcode;
26649       switch (IntNo) {
26650       default: llvm_unreachable("Impossible intrinsic!");
26651       case Intrinsic::x86_enqcmd:
26652         Opcode = X86ISD::ENQCMD;
26653         break;
26654       case Intrinsic::x86_enqcmds:
26655         Opcode = X86ISD::ENQCMDS;
26656         break;
26657       }
26658       SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
26659                                       Op.getOperand(3));
26660       SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
26661       return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
26662                          Operation.getValue(1));
26663     }
26664     case Intrinsic::x86_aesenc128kl:
26665     case Intrinsic::x86_aesdec128kl:
26666     case Intrinsic::x86_aesenc256kl:
26667     case Intrinsic::x86_aesdec256kl: {
26668       SDLoc DL(Op);
26669       SDVTList VTs = DAG.getVTList(MVT::v2i64, MVT::i32, MVT::Other);
26670       SDValue Chain = Op.getOperand(0);
26671       unsigned Opcode;
26672 
26673       switch (IntNo) {
26674       default: llvm_unreachable("Impossible intrinsic");
26675       case Intrinsic::x86_aesenc128kl:
26676         Opcode = X86ISD::AESENC128KL;
26677         break;
26678       case Intrinsic::x86_aesdec128kl:
26679         Opcode = X86ISD::AESDEC128KL;
26680         break;
26681       case Intrinsic::x86_aesenc256kl:
26682         Opcode = X86ISD::AESENC256KL;
26683         break;
26684       case Intrinsic::x86_aesdec256kl:
26685         Opcode = X86ISD::AESDEC256KL;
26686         break;
26687       }
26688 
26689       MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26690       MachineMemOperand *MMO = MemIntr->getMemOperand();
26691       EVT MemVT = MemIntr->getMemoryVT();
26692       SDValue Operation = DAG.getMemIntrinsicNode(
26693           Opcode, DL, VTs, {Chain, Op.getOperand(2), Op.getOperand(3)}, MemVT,
26694           MMO);
26695       SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(1), DL, DAG);
26696 
26697       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
26698                          {ZF, Operation.getValue(0), Operation.getValue(2)});
26699     }
26700     case Intrinsic::x86_aesencwide128kl:
26701     case Intrinsic::x86_aesdecwide128kl:
26702     case Intrinsic::x86_aesencwide256kl:
26703     case Intrinsic::x86_aesdecwide256kl: {
26704       SDLoc DL(Op);
26705       SDVTList VTs = DAG.getVTList(
26706           {MVT::i32, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64,
26707            MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::Other});
26708       SDValue Chain = Op.getOperand(0);
26709       unsigned Opcode;
26710 
26711       switch (IntNo) {
26712       default: llvm_unreachable("Impossible intrinsic");
26713       case Intrinsic::x86_aesencwide128kl:
26714         Opcode = X86ISD::AESENCWIDE128KL;
26715         break;
26716       case Intrinsic::x86_aesdecwide128kl:
26717         Opcode = X86ISD::AESDECWIDE128KL;
26718         break;
26719       case Intrinsic::x86_aesencwide256kl:
26720         Opcode = X86ISD::AESENCWIDE256KL;
26721         break;
26722       case Intrinsic::x86_aesdecwide256kl:
26723         Opcode = X86ISD::AESDECWIDE256KL;
26724         break;
26725       }
26726 
26727       MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26728       MachineMemOperand *MMO = MemIntr->getMemOperand();
26729       EVT MemVT = MemIntr->getMemoryVT();
26730       SDValue Operation = DAG.getMemIntrinsicNode(
26731           Opcode, DL, VTs,
26732           {Chain, Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
26733            Op.getOperand(5), Op.getOperand(6), Op.getOperand(7),
26734            Op.getOperand(8), Op.getOperand(9), Op.getOperand(10)},
26735           MemVT, MMO);
26736       SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(0), DL, DAG);
26737 
26738       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
26739                          {ZF, Operation.getValue(1), Operation.getValue(2),
26740                           Operation.getValue(3), Operation.getValue(4),
26741                           Operation.getValue(5), Operation.getValue(6),
26742                           Operation.getValue(7), Operation.getValue(8),
26743                           Operation.getValue(9)});
26744     }
26745     case Intrinsic::x86_testui: {
26746       SDLoc dl(Op);
26747       SDValue Chain = Op.getOperand(0);
26748       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
26749       SDValue Operation = DAG.getNode(X86ISD::TESTUI, dl, VTs, Chain);
26750       SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
26751       return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
26752                          Operation.getValue(1));
26753     }
26754     case Intrinsic::x86_atomic_bts_rm:
26755     case Intrinsic::x86_atomic_btc_rm:
26756     case Intrinsic::x86_atomic_btr_rm: {
26757       SDLoc DL(Op);
26758       MVT VT = Op.getSimpleValueType();
26759       SDValue Chain = Op.getOperand(0);
26760       SDValue Op1 = Op.getOperand(2);
26761       SDValue Op2 = Op.getOperand(3);
26762       unsigned Opc = IntNo == Intrinsic::x86_atomic_bts_rm   ? X86ISD::LBTS_RM
26763                      : IntNo == Intrinsic::x86_atomic_btc_rm ? X86ISD::LBTC_RM
26764                                                              : X86ISD::LBTR_RM;
26765       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
26766       SDValue Res =
26767           DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
26768                                   {Chain, Op1, Op2}, VT, MMO);
26769       Chain = Res.getValue(1);
26770       Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
26771       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
26772     }
26773     case Intrinsic::x86_atomic_bts:
26774     case Intrinsic::x86_atomic_btc:
26775     case Intrinsic::x86_atomic_btr: {
26776       SDLoc DL(Op);
26777       MVT VT = Op.getSimpleValueType();
26778       SDValue Chain = Op.getOperand(0);
26779       SDValue Op1 = Op.getOperand(2);
26780       SDValue Op2 = Op.getOperand(3);
26781       unsigned Opc = IntNo == Intrinsic::x86_atomic_bts   ? X86ISD::LBTS
26782                      : IntNo == Intrinsic::x86_atomic_btc ? X86ISD::LBTC
26783                                                           : X86ISD::LBTR;
26784       SDValue Size = DAG.getConstant(VT.getScalarSizeInBits(), DL, MVT::i32);
26785       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
26786       SDValue Res =
26787           DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
26788                                   {Chain, Op1, Op2, Size}, VT, MMO);
26789       Chain = Res.getValue(1);
26790       Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
26791       unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
26792       if (Imm)
26793         Res = DAG.getNode(ISD::SHL, DL, VT, Res,
26794                           DAG.getShiftAmountConstant(Imm, VT, DL));
26795       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
26796     }
26797     case Intrinsic::x86_cmpccxadd32:
26798     case Intrinsic::x86_cmpccxadd64: {
26799       SDLoc DL(Op);
26800       SDValue Chain = Op.getOperand(0);
26801       SDValue Addr = Op.getOperand(2);
26802       SDValue Src1 = Op.getOperand(3);
26803       SDValue Src2 = Op.getOperand(4);
26804       SDValue CC = Op.getOperand(5);
26805       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
26806       SDValue Operation = DAG.getMemIntrinsicNode(
26807           X86ISD::CMPCCXADD, DL, Op->getVTList(), {Chain, Addr, Src1, Src2, CC},
26808           MVT::i32, MMO);
26809       return Operation;
26810     }
26811     case Intrinsic::x86_aadd32:
26812     case Intrinsic::x86_aadd64:
26813     case Intrinsic::x86_aand32:
26814     case Intrinsic::x86_aand64:
26815     case Intrinsic::x86_aor32:
26816     case Intrinsic::x86_aor64:
26817     case Intrinsic::x86_axor32:
26818     case Intrinsic::x86_axor64: {
26819       SDLoc DL(Op);
26820       SDValue Chain = Op.getOperand(0);
26821       SDValue Op1 = Op.getOperand(2);
26822       SDValue Op2 = Op.getOperand(3);
26823       MVT VT = Op2.getSimpleValueType();
26824       unsigned Opc = 0;
26825       switch (IntNo) {
26826       default:
26827         llvm_unreachable("Unknown Intrinsic");
26828       case Intrinsic::x86_aadd32:
26829       case Intrinsic::x86_aadd64:
26830         Opc = X86ISD::AADD;
26831         break;
26832       case Intrinsic::x86_aand32:
26833       case Intrinsic::x86_aand64:
26834         Opc = X86ISD::AAND;
26835         break;
26836       case Intrinsic::x86_aor32:
26837       case Intrinsic::x86_aor64:
26838         Opc = X86ISD::AOR;
26839         break;
26840       case Intrinsic::x86_axor32:
26841       case Intrinsic::x86_axor64:
26842         Opc = X86ISD::AXOR;
26843         break;
26844       }
26845       MachineMemOperand *MMO = cast<MemSDNode>(Op)->getMemOperand();
26846       return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(),
26847                                      {Chain, Op1, Op2}, VT, MMO);
26848     }
26849     case Intrinsic::x86_atomic_add_cc:
26850     case Intrinsic::x86_atomic_sub_cc:
26851     case Intrinsic::x86_atomic_or_cc:
26852     case Intrinsic::x86_atomic_and_cc:
26853     case Intrinsic::x86_atomic_xor_cc: {
26854       SDLoc DL(Op);
26855       SDValue Chain = Op.getOperand(0);
26856       SDValue Op1 = Op.getOperand(2);
26857       SDValue Op2 = Op.getOperand(3);
26858       X86::CondCode CC = (X86::CondCode)Op.getConstantOperandVal(4);
26859       MVT VT = Op2.getSimpleValueType();
26860       unsigned Opc = 0;
26861       switch (IntNo) {
26862       default:
26863         llvm_unreachable("Unknown Intrinsic");
26864       case Intrinsic::x86_atomic_add_cc:
26865         Opc = X86ISD::LADD;
26866         break;
26867       case Intrinsic::x86_atomic_sub_cc:
26868         Opc = X86ISD::LSUB;
26869         break;
26870       case Intrinsic::x86_atomic_or_cc:
26871         Opc = X86ISD::LOR;
26872         break;
26873       case Intrinsic::x86_atomic_and_cc:
26874         Opc = X86ISD::LAND;
26875         break;
26876       case Intrinsic::x86_atomic_xor_cc:
26877         Opc = X86ISD::LXOR;
26878         break;
26879       }
26880       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
26881       SDValue LockArith =
26882           DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
26883                                   {Chain, Op1, Op2}, VT, MMO);
26884       Chain = LockArith.getValue(1);
26885       return DAG.getMergeValues({getSETCC(CC, LockArith, DL, DAG), Chain}, DL);
26886     }
26887     }
26888     return SDValue();
26889   }
26890 
26891   SDLoc dl(Op);
26892   switch(IntrData->Type) {
26893   default: llvm_unreachable("Unknown Intrinsic Type");
26894   case RDSEED:
26895   case RDRAND: {
26896     // Emit the node with the right value type.
26897     SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
26898     SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
26899 
26900     // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
26901     // Otherwise return the value from Rand, which is always 0, casted to i32.
26902     SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
26903                      DAG.getConstant(1, dl, Op->getValueType(1)),
26904                      DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
26905                      SDValue(Result.getNode(), 1)};
26906     SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
26907 
26908     // Return { result, isValid, chain }.
26909     return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
26910                        SDValue(Result.getNode(), 2));
26911   }
26912   case GATHER_AVX2: {
26913     SDValue Chain = Op.getOperand(0);
26914     SDValue Src   = Op.getOperand(2);
26915     SDValue Base  = Op.getOperand(3);
26916     SDValue Index = Op.getOperand(4);
26917     SDValue Mask  = Op.getOperand(5);
26918     SDValue Scale = Op.getOperand(6);
26919     return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
26920                              Scale, Chain, Subtarget);
26921   }
26922   case GATHER: {
26923   //gather(v1, mask, index, base, scale);
26924     SDValue Chain = Op.getOperand(0);
26925     SDValue Src   = Op.getOperand(2);
26926     SDValue Base  = Op.getOperand(3);
26927     SDValue Index = Op.getOperand(4);
26928     SDValue Mask  = Op.getOperand(5);
26929     SDValue Scale = Op.getOperand(6);
26930     return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
26931                          Chain, Subtarget);
26932   }
26933   case SCATTER: {
26934   //scatter(base, mask, index, v1, scale);
26935     SDValue Chain = Op.getOperand(0);
26936     SDValue Base  = Op.getOperand(2);
26937     SDValue Mask  = Op.getOperand(3);
26938     SDValue Index = Op.getOperand(4);
26939     SDValue Src   = Op.getOperand(5);
26940     SDValue Scale = Op.getOperand(6);
26941     return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
26942                           Scale, Chain, Subtarget);
26943   }
26944   case PREFETCH: {
26945     const APInt &HintVal = Op.getConstantOperandAPInt(6);
26946     assert((HintVal == 2 || HintVal == 3) &&
26947            "Wrong prefetch hint in intrinsic: should be 2 or 3");
26948     unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
26949     SDValue Chain = Op.getOperand(0);
26950     SDValue Mask  = Op.getOperand(2);
26951     SDValue Index = Op.getOperand(3);
26952     SDValue Base  = Op.getOperand(4);
26953     SDValue Scale = Op.getOperand(5);
26954     return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
26955                            Subtarget);
26956   }
26957   // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
26958   case RDTSC: {
26959     SmallVector<SDValue, 2> Results;
26960     getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
26961                             Results);
26962     return DAG.getMergeValues(Results, dl);
26963   }
26964   // Read Performance Monitoring Counters.
26965   case RDPMC:
26966   // Read Processor Register.
26967   case RDPRU:
26968   // GetExtended Control Register.
26969   case XGETBV: {
26970     SmallVector<SDValue, 2> Results;
26971 
26972     // RDPMC uses ECX to select the index of the performance counter to read.
26973     // RDPRU uses ECX to select the processor register to read.
26974     // XGETBV uses ECX to select the index of the XCR register to return.
26975     // The result is stored into registers EDX:EAX.
26976     expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
26977                                 Subtarget, Results);
26978     return DAG.getMergeValues(Results, dl);
26979   }
26980   // XTEST intrinsics.
26981   case XTEST: {
26982     SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
26983     SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
26984 
26985     SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
26986     SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
26987     return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
26988                        Ret, SDValue(InTrans.getNode(), 1));
26989   }
26990   case TRUNCATE_TO_MEM_VI8:
26991   case TRUNCATE_TO_MEM_VI16:
26992   case TRUNCATE_TO_MEM_VI32: {
26993     SDValue Mask = Op.getOperand(4);
26994     SDValue DataToTruncate = Op.getOperand(3);
26995     SDValue Addr = Op.getOperand(2);
26996     SDValue Chain = Op.getOperand(0);
26997 
26998     MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
26999     assert(MemIntr && "Expected MemIntrinsicSDNode!");
27000 
27001     EVT MemVT  = MemIntr->getMemoryVT();
27002 
27003     uint16_t TruncationOp = IntrData->Opc0;
27004     switch (TruncationOp) {
27005     case X86ISD::VTRUNC: {
27006       if (isAllOnesConstant(Mask)) // return just a truncate store
27007         return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
27008                                  MemIntr->getMemOperand());
27009 
27010       MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
27011       SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27012       SDValue Offset = DAG.getUNDEF(VMask.getValueType());
27013 
27014       return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, Offset, VMask,
27015                                 MemVT, MemIntr->getMemOperand(), ISD::UNINDEXED,
27016                                 true /* truncating */);
27017     }
27018     case X86ISD::VTRUNCUS:
27019     case X86ISD::VTRUNCS: {
27020       bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
27021       if (isAllOnesConstant(Mask))
27022         return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
27023                                MemIntr->getMemOperand(), DAG);
27024 
27025       MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
27026       SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27027 
27028       return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
27029                                    VMask, MemVT, MemIntr->getMemOperand(), DAG);
27030     }
27031     default:
27032       llvm_unreachable("Unsupported truncstore intrinsic");
27033     }
27034   }
27035   }
27036 }
27037 
27038 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
27039                                            SelectionDAG &DAG) const {
27040   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
27041   MFI.setReturnAddressIsTaken(true);
27042 
27043   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
27044     return SDValue();
27045 
27046   unsigned Depth = Op.getConstantOperandVal(0);
27047   SDLoc dl(Op);
27048   EVT PtrVT = getPointerTy(DAG.getDataLayout());
27049 
27050   if (Depth > 0) {
27051     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
27052     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27053     SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
27054     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
27055                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
27056                        MachinePointerInfo());
27057   }
27058 
27059   // Just load the return address.
27060   SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
27061   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
27062                      MachinePointerInfo());
27063 }
27064 
27065 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
27066                                                  SelectionDAG &DAG) const {
27067   DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
27068   return getReturnAddressFrameIndex(DAG);
27069 }
27070 
27071 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
27072   MachineFunction &MF = DAG.getMachineFunction();
27073   MachineFrameInfo &MFI = MF.getFrameInfo();
27074   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
27075   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27076   EVT VT = Op.getValueType();
27077 
27078   MFI.setFrameAddressIsTaken(true);
27079 
27080   if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
27081     // Depth > 0 makes no sense on targets which use Windows unwind codes.  It
27082     // is not possible to crawl up the stack without looking at the unwind codes
27083     // simultaneously.
27084     int FrameAddrIndex = FuncInfo->getFAIndex();
27085     if (!FrameAddrIndex) {
27086       // Set up a frame object for the return address.
27087       unsigned SlotSize = RegInfo->getSlotSize();
27088       FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
27089           SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
27090       FuncInfo->setFAIndex(FrameAddrIndex);
27091     }
27092     return DAG.getFrameIndex(FrameAddrIndex, VT);
27093   }
27094 
27095   unsigned FrameReg =
27096       RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
27097   SDLoc dl(Op);  // FIXME probably not meaningful
27098   unsigned Depth = Op.getConstantOperandVal(0);
27099   assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
27100           (FrameReg == X86::EBP && VT == MVT::i32)) &&
27101          "Invalid Frame Register!");
27102   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
27103   while (Depth--)
27104     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
27105                             MachinePointerInfo());
27106   return FrameAddr;
27107 }
27108 
27109 // FIXME? Maybe this could be a TableGen attribute on some registers and
27110 // this table could be generated automatically from RegInfo.
27111 Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
27112                                               const MachineFunction &MF) const {
27113   const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
27114 
27115   Register Reg = StringSwitch<unsigned>(RegName)
27116                      .Case("esp", X86::ESP)
27117                      .Case("rsp", X86::RSP)
27118                      .Case("ebp", X86::EBP)
27119                      .Case("rbp", X86::RBP)
27120                      .Case("r14", X86::R14)
27121                      .Case("r15", X86::R15)
27122                      .Default(0);
27123 
27124   if (Reg == X86::EBP || Reg == X86::RBP) {
27125     if (!TFI.hasFP(MF))
27126       report_fatal_error("register " + StringRef(RegName) +
27127                          " is allocatable: function has no frame pointer");
27128 #ifndef NDEBUG
27129     else {
27130       const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27131       Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
27132       assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
27133              "Invalid Frame Register!");
27134     }
27135 #endif
27136   }
27137 
27138   if (Reg)
27139     return Reg;
27140 
27141   report_fatal_error("Invalid register name global variable");
27142 }
27143 
27144 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
27145                                                      SelectionDAG &DAG) const {
27146   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27147   return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
27148 }
27149 
27150 Register X86TargetLowering::getExceptionPointerRegister(
27151     const Constant *PersonalityFn) const {
27152   if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
27153     return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
27154 
27155   return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
27156 }
27157 
27158 Register X86TargetLowering::getExceptionSelectorRegister(
27159     const Constant *PersonalityFn) const {
27160   // Funclet personalities don't use selectors (the runtime does the selection).
27161   if (isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)))
27162     return X86::NoRegister;
27163   return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
27164 }
27165 
27166 bool X86TargetLowering::needsFixedCatchObjects() const {
27167   return Subtarget.isTargetWin64();
27168 }
27169 
27170 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
27171   SDValue Chain     = Op.getOperand(0);
27172   SDValue Offset    = Op.getOperand(1);
27173   SDValue Handler   = Op.getOperand(2);
27174   SDLoc dl      (Op);
27175 
27176   EVT PtrVT = getPointerTy(DAG.getDataLayout());
27177   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27178   Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
27179   assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
27180           (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
27181          "Invalid Frame Register!");
27182   SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
27183   Register StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
27184 
27185   SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
27186                                  DAG.getIntPtrConstant(RegInfo->getSlotSize(),
27187                                                        dl));
27188   StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
27189   Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
27190   Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
27191 
27192   return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
27193                      DAG.getRegister(StoreAddrReg, PtrVT));
27194 }
27195 
27196 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
27197                                                SelectionDAG &DAG) const {
27198   SDLoc DL(Op);
27199   // If the subtarget is not 64bit, we may need the global base reg
27200   // after isel expand pseudo, i.e., after CGBR pass ran.
27201   // Therefore, ask for the GlobalBaseReg now, so that the pass
27202   // inserts the code for us in case we need it.
27203   // Otherwise, we will end up in a situation where we will
27204   // reference a virtual register that is not defined!
27205   if (!Subtarget.is64Bit()) {
27206     const X86InstrInfo *TII = Subtarget.getInstrInfo();
27207     (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
27208   }
27209   return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
27210                      DAG.getVTList(MVT::i32, MVT::Other),
27211                      Op.getOperand(0), Op.getOperand(1));
27212 }
27213 
27214 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
27215                                                 SelectionDAG &DAG) const {
27216   SDLoc DL(Op);
27217   return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
27218                      Op.getOperand(0), Op.getOperand(1));
27219 }
27220 
27221 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
27222                                                        SelectionDAG &DAG) const {
27223   SDLoc DL(Op);
27224   return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
27225                      Op.getOperand(0));
27226 }
27227 
27228 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
27229   return Op.getOperand(0);
27230 }
27231 
27232 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
27233                                                 SelectionDAG &DAG) const {
27234   SDValue Root = Op.getOperand(0);
27235   SDValue Trmp = Op.getOperand(1); // trampoline
27236   SDValue FPtr = Op.getOperand(2); // nested function
27237   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
27238   SDLoc dl (Op);
27239 
27240   const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
27241   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
27242 
27243   if (Subtarget.is64Bit()) {
27244     SDValue OutChains[6];
27245 
27246     // Large code-model.
27247     const unsigned char JMP64r  = 0xFF; // 64-bit jmp through register opcode.
27248     const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
27249 
27250     const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
27251     const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
27252 
27253     const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
27254 
27255     // Load the pointer to the nested function into R11.
27256     unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
27257     SDValue Addr = Trmp;
27258     OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
27259                                 Addr, MachinePointerInfo(TrmpAddr));
27260 
27261     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27262                        DAG.getConstant(2, dl, MVT::i64));
27263     OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
27264                                 MachinePointerInfo(TrmpAddr, 2), Align(2));
27265 
27266     // Load the 'nest' parameter value into R10.
27267     // R10 is specified in X86CallingConv.td
27268     OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
27269     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27270                        DAG.getConstant(10, dl, MVT::i64));
27271     OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
27272                                 Addr, MachinePointerInfo(TrmpAddr, 10));
27273 
27274     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27275                        DAG.getConstant(12, dl, MVT::i64));
27276     OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
27277                                 MachinePointerInfo(TrmpAddr, 12), Align(2));
27278 
27279     // Jump to the nested function.
27280     OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
27281     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27282                        DAG.getConstant(20, dl, MVT::i64));
27283     OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
27284                                 Addr, MachinePointerInfo(TrmpAddr, 20));
27285 
27286     unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
27287     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27288                        DAG.getConstant(22, dl, MVT::i64));
27289     OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
27290                                 Addr, MachinePointerInfo(TrmpAddr, 22));
27291 
27292     return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
27293   } else {
27294     const Function *Func =
27295       cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
27296     CallingConv::ID CC = Func->getCallingConv();
27297     unsigned NestReg;
27298 
27299     switch (CC) {
27300     default:
27301       llvm_unreachable("Unsupported calling convention");
27302     case CallingConv::C:
27303     case CallingConv::X86_StdCall: {
27304       // Pass 'nest' parameter in ECX.
27305       // Must be kept in sync with X86CallingConv.td
27306       NestReg = X86::ECX;
27307 
27308       // Check that ECX wasn't needed by an 'inreg' parameter.
27309       FunctionType *FTy = Func->getFunctionType();
27310       const AttributeList &Attrs = Func->getAttributes();
27311 
27312       if (!Attrs.isEmpty() && !Func->isVarArg()) {
27313         unsigned InRegCount = 0;
27314         unsigned Idx = 0;
27315 
27316         for (FunctionType::param_iterator I = FTy->param_begin(),
27317              E = FTy->param_end(); I != E; ++I, ++Idx)
27318           if (Attrs.hasParamAttr(Idx, Attribute::InReg)) {
27319             const DataLayout &DL = DAG.getDataLayout();
27320             // FIXME: should only count parameters that are lowered to integers.
27321             InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
27322           }
27323 
27324         if (InRegCount > 2) {
27325           report_fatal_error("Nest register in use - reduce number of inreg"
27326                              " parameters!");
27327         }
27328       }
27329       break;
27330     }
27331     case CallingConv::X86_FastCall:
27332     case CallingConv::X86_ThisCall:
27333     case CallingConv::Fast:
27334     case CallingConv::Tail:
27335     case CallingConv::SwiftTail:
27336       // Pass 'nest' parameter in EAX.
27337       // Must be kept in sync with X86CallingConv.td
27338       NestReg = X86::EAX;
27339       break;
27340     }
27341 
27342     SDValue OutChains[4];
27343     SDValue Addr, Disp;
27344 
27345     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
27346                        DAG.getConstant(10, dl, MVT::i32));
27347     Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
27348 
27349     // This is storing the opcode for MOV32ri.
27350     const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
27351     const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
27352     OutChains[0] =
27353         DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
27354                      Trmp, MachinePointerInfo(TrmpAddr));
27355 
27356     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
27357                        DAG.getConstant(1, dl, MVT::i32));
27358     OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
27359                                 MachinePointerInfo(TrmpAddr, 1), Align(1));
27360 
27361     const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
27362     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
27363                        DAG.getConstant(5, dl, MVT::i32));
27364     OutChains[2] =
27365         DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8), Addr,
27366                      MachinePointerInfo(TrmpAddr, 5), Align(1));
27367 
27368     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
27369                        DAG.getConstant(6, dl, MVT::i32));
27370     OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
27371                                 MachinePointerInfo(TrmpAddr, 6), Align(1));
27372 
27373     return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
27374   }
27375 }
27376 
27377 SDValue X86TargetLowering::LowerGET_ROUNDING(SDValue Op,
27378                                              SelectionDAG &DAG) const {
27379   /*
27380    The rounding mode is in bits 11:10 of FPSR, and has the following
27381    settings:
27382      00 Round to nearest
27383      01 Round to -inf
27384      10 Round to +inf
27385      11 Round to 0
27386 
27387   GET_ROUNDING, on the other hand, expects the following:
27388     -1 Undefined
27389      0 Round to 0
27390      1 Round to nearest
27391      2 Round to +inf
27392      3 Round to -inf
27393 
27394   To perform the conversion, we use a packed lookup table of the four 2-bit
27395   values that we can index by FPSP[11:10]
27396     0x2d --> (0b00,10,11,01) --> (0,2,3,1) >> FPSR[11:10]
27397 
27398     (0x2d >> ((FPSR & 0xc00) >> 9)) & 3
27399   */
27400 
27401   MachineFunction &MF = DAG.getMachineFunction();
27402   MVT VT = Op.getSimpleValueType();
27403   SDLoc DL(Op);
27404 
27405   // Save FP Control Word to stack slot
27406   int SSFI = MF.getFrameInfo().CreateStackObject(2, Align(2), false);
27407   SDValue StackSlot =
27408       DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
27409 
27410   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
27411 
27412   SDValue Chain = Op.getOperand(0);
27413   SDValue Ops[] = {Chain, StackSlot};
27414   Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
27415                                   DAG.getVTList(MVT::Other), Ops, MVT::i16, MPI,
27416                                   Align(2), MachineMemOperand::MOStore);
27417 
27418   // Load FP Control Word from stack slot
27419   SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI, Align(2));
27420   Chain = CWD.getValue(1);
27421 
27422   // Mask and turn the control bits into a shift for the lookup table.
27423   SDValue Shift =
27424     DAG.getNode(ISD::SRL, DL, MVT::i16,
27425                 DAG.getNode(ISD::AND, DL, MVT::i16,
27426                             CWD, DAG.getConstant(0xc00, DL, MVT::i16)),
27427                 DAG.getConstant(9, DL, MVT::i8));
27428   Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Shift);
27429 
27430   SDValue LUT = DAG.getConstant(0x2d, DL, MVT::i32);
27431   SDValue RetVal =
27432     DAG.getNode(ISD::AND, DL, MVT::i32,
27433                 DAG.getNode(ISD::SRL, DL, MVT::i32, LUT, Shift),
27434                 DAG.getConstant(3, DL, MVT::i32));
27435 
27436   RetVal = DAG.getZExtOrTrunc(RetVal, DL, VT);
27437 
27438   return DAG.getMergeValues({RetVal, Chain}, DL);
27439 }
27440 
27441 SDValue X86TargetLowering::LowerSET_ROUNDING(SDValue Op,
27442                                              SelectionDAG &DAG) const {
27443   MachineFunction &MF = DAG.getMachineFunction();
27444   SDLoc DL(Op);
27445   SDValue Chain = Op.getNode()->getOperand(0);
27446 
27447   // FP control word may be set only from data in memory. So we need to allocate
27448   // stack space to save/load FP control word.
27449   int OldCWFrameIdx = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
27450   SDValue StackSlot =
27451       DAG.getFrameIndex(OldCWFrameIdx, getPointerTy(DAG.getDataLayout()));
27452   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, OldCWFrameIdx);
27453   MachineMemOperand *MMO =
27454       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 2, Align(2));
27455 
27456   // Store FP control word into memory.
27457   SDValue Ops[] = {Chain, StackSlot};
27458   Chain = DAG.getMemIntrinsicNode(
27459       X86ISD::FNSTCW16m, DL, DAG.getVTList(MVT::Other), Ops, MVT::i16, MMO);
27460 
27461   // Load FP Control Word from stack slot and clear RM field (bits 11:10).
27462   SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI);
27463   Chain = CWD.getValue(1);
27464   CWD = DAG.getNode(ISD::AND, DL, MVT::i16, CWD.getValue(0),
27465                     DAG.getConstant(0xf3ff, DL, MVT::i16));
27466 
27467   // Calculate new rounding mode.
27468   SDValue NewRM = Op.getNode()->getOperand(1);
27469   SDValue RMBits;
27470   if (auto *CVal = dyn_cast<ConstantSDNode>(NewRM)) {
27471     uint64_t RM = CVal->getZExtValue();
27472     int FieldVal;
27473     switch (static_cast<RoundingMode>(RM)) {
27474     case RoundingMode::NearestTiesToEven: FieldVal = X86::rmToNearest; break;
27475     case RoundingMode::TowardNegative:    FieldVal = X86::rmDownward; break;
27476     case RoundingMode::TowardPositive:    FieldVal = X86::rmUpward; break;
27477     case RoundingMode::TowardZero:        FieldVal = X86::rmTowardZero; break;
27478     default:
27479       llvm_unreachable("rounding mode is not supported by X86 hardware");
27480     }
27481     RMBits = DAG.getConstant(FieldVal, DL, MVT::i16);
27482   } else {
27483     // Need to convert argument into bits of control word:
27484     //    0 Round to 0       -> 11
27485     //    1 Round to nearest -> 00
27486     //    2 Round to +inf    -> 10
27487     //    3 Round to -inf    -> 01
27488     // The 2-bit value needs then to be shifted so that it occupies bits 11:10.
27489     // To make the conversion, put all these values into a value 0xc9 and shift
27490     // it left depending on the rounding mode:
27491     //    (0xc9 << 4) & 0xc00 = X86::rmTowardZero
27492     //    (0xc9 << 6) & 0xc00 = X86::rmToNearest
27493     //    ...
27494     // (0xc9 << (2 * NewRM + 4)) & 0xc00
27495     SDValue ShiftValue =
27496         DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
27497                     DAG.getNode(ISD::ADD, DL, MVT::i32,
27498                                 DAG.getNode(ISD::SHL, DL, MVT::i32, NewRM,
27499                                             DAG.getConstant(1, DL, MVT::i8)),
27500                                 DAG.getConstant(4, DL, MVT::i32)));
27501     SDValue Shifted =
27502         DAG.getNode(ISD::SHL, DL, MVT::i16, DAG.getConstant(0xc9, DL, MVT::i16),
27503                     ShiftValue);
27504     RMBits = DAG.getNode(ISD::AND, DL, MVT::i16, Shifted,
27505                          DAG.getConstant(0xc00, DL, MVT::i16));
27506   }
27507 
27508   // Update rounding mode bits and store the new FP Control Word into stack.
27509   CWD = DAG.getNode(ISD::OR, DL, MVT::i16, CWD, RMBits);
27510   Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, Align(2));
27511 
27512   // Load FP control word from the slot.
27513   SDValue OpsLD[] = {Chain, StackSlot};
27514   MachineMemOperand *MMOL =
27515       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 2, Align(2));
27516   Chain = DAG.getMemIntrinsicNode(
27517       X86ISD::FLDCW16m, DL, DAG.getVTList(MVT::Other), OpsLD, MVT::i16, MMOL);
27518 
27519   // If target supports SSE, set MXCSR as well. Rounding mode is encoded in the
27520   // same way but in bits 14:13.
27521   if (Subtarget.hasSSE1()) {
27522     // Store MXCSR into memory.
27523     Chain = DAG.getNode(
27524         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
27525         DAG.getTargetConstant(Intrinsic::x86_sse_stmxcsr, DL, MVT::i32),
27526         StackSlot);
27527 
27528     // Load MXCSR from stack slot and clear RM field (bits 14:13).
27529     SDValue CWD = DAG.getLoad(MVT::i32, DL, Chain, StackSlot, MPI);
27530     Chain = CWD.getValue(1);
27531     CWD = DAG.getNode(ISD::AND, DL, MVT::i32, CWD.getValue(0),
27532                       DAG.getConstant(0xffff9fff, DL, MVT::i32));
27533 
27534     // Shift X87 RM bits from 11:10 to 14:13.
27535     RMBits = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, RMBits);
27536     RMBits = DAG.getNode(ISD::SHL, DL, MVT::i32, RMBits,
27537                          DAG.getConstant(3, DL, MVT::i8));
27538 
27539     // Update rounding mode bits and store the new FP Control Word into stack.
27540     CWD = DAG.getNode(ISD::OR, DL, MVT::i32, CWD, RMBits);
27541     Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, Align(4));
27542 
27543     // Load MXCSR from the slot.
27544     Chain = DAG.getNode(
27545         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
27546         DAG.getTargetConstant(Intrinsic::x86_sse_ldmxcsr, DL, MVT::i32),
27547         StackSlot);
27548   }
27549 
27550   return Chain;
27551 }
27552 
27553 const unsigned X87StateSize = 28;
27554 const unsigned FPStateSize = 32;
27555 [[maybe_unused]] const unsigned FPStateSizeInBits = FPStateSize * 8;
27556 
27557 SDValue X86TargetLowering::LowerGET_FPENV_MEM(SDValue Op,
27558                                               SelectionDAG &DAG) const {
27559   MachineFunction &MF = DAG.getMachineFunction();
27560   SDLoc DL(Op);
27561   SDValue Chain = Op->getOperand(0);
27562   SDValue Ptr = Op->getOperand(1);
27563   auto *Node = cast<FPStateAccessSDNode>(Op);
27564   EVT MemVT = Node->getMemoryVT();
27565   assert(MemVT.getSizeInBits() == FPStateSizeInBits);
27566   MachineMemOperand *MMO = cast<FPStateAccessSDNode>(Op)->getMemOperand();
27567 
27568   // Get x87 state, if it presents.
27569   if (Subtarget.hasX87()) {
27570     Chain =
27571         DAG.getMemIntrinsicNode(X86ISD::FNSTENVm, DL, DAG.getVTList(MVT::Other),
27572                                 {Chain, Ptr}, MemVT, MMO);
27573 
27574     // FNSTENV changes the exception mask, so load back the stored environment.
27575     MachineMemOperand::Flags NewFlags =
27576         MachineMemOperand::MOLoad |
27577         (MMO->getFlags() & ~MachineMemOperand::MOStore);
27578     MMO = MF.getMachineMemOperand(MMO, NewFlags);
27579     Chain =
27580         DAG.getMemIntrinsicNode(X86ISD::FLDENVm, DL, DAG.getVTList(MVT::Other),
27581                                 {Chain, Ptr}, MemVT, MMO);
27582   }
27583 
27584   // If target supports SSE, get MXCSR as well.
27585   if (Subtarget.hasSSE1()) {
27586     // Get pointer to the MXCSR location in memory.
27587     MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
27588     SDValue MXCSRAddr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr,
27589                                     DAG.getConstant(X87StateSize, DL, PtrVT));
27590     // Store MXCSR into memory.
27591     Chain = DAG.getNode(
27592         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
27593         DAG.getTargetConstant(Intrinsic::x86_sse_stmxcsr, DL, MVT::i32),
27594         MXCSRAddr);
27595   }
27596 
27597   return Chain;
27598 }
27599 
27600 static SDValue createSetFPEnvNodes(SDValue Ptr, SDValue Chain, SDLoc DL,
27601                                    EVT MemVT, MachineMemOperand *MMO,
27602                                    SelectionDAG &DAG,
27603                                    const X86Subtarget &Subtarget) {
27604   // Set x87 state, if it presents.
27605   if (Subtarget.hasX87())
27606     Chain =
27607         DAG.getMemIntrinsicNode(X86ISD::FLDENVm, DL, DAG.getVTList(MVT::Other),
27608                                 {Chain, Ptr}, MemVT, MMO);
27609   // If target supports SSE, set MXCSR as well.
27610   if (Subtarget.hasSSE1()) {
27611     // Get pointer to the MXCSR location in memory.
27612     MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
27613     SDValue MXCSRAddr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr,
27614                                     DAG.getConstant(X87StateSize, DL, PtrVT));
27615     // Load MXCSR from memory.
27616     Chain = DAG.getNode(
27617         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
27618         DAG.getTargetConstant(Intrinsic::x86_sse_ldmxcsr, DL, MVT::i32),
27619         MXCSRAddr);
27620   }
27621   return Chain;
27622 }
27623 
27624 SDValue X86TargetLowering::LowerSET_FPENV_MEM(SDValue Op,
27625                                               SelectionDAG &DAG) const {
27626   SDLoc DL(Op);
27627   SDValue Chain = Op->getOperand(0);
27628   SDValue Ptr = Op->getOperand(1);
27629   auto *Node = cast<FPStateAccessSDNode>(Op);
27630   EVT MemVT = Node->getMemoryVT();
27631   assert(MemVT.getSizeInBits() == FPStateSizeInBits);
27632   MachineMemOperand *MMO = cast<FPStateAccessSDNode>(Op)->getMemOperand();
27633   return createSetFPEnvNodes(Ptr, Chain, DL, MemVT, MMO, DAG, Subtarget);
27634 }
27635 
27636 SDValue X86TargetLowering::LowerRESET_FPENV(SDValue Op,
27637                                             SelectionDAG &DAG) const {
27638   MachineFunction &MF = DAG.getMachineFunction();
27639   SDLoc DL(Op);
27640   SDValue Chain = Op.getNode()->getOperand(0);
27641 
27642   IntegerType *ItemTy = Type::getInt32Ty(*DAG.getContext());
27643   ArrayType *FPEnvTy = ArrayType::get(ItemTy, 8);
27644   SmallVector<Constant *, 8> FPEnvVals;
27645 
27646   // x87 FPU Control Word: mask all floating-point exceptions, sets rounding to
27647   // nearest. FPU precision is set to 53 bits on Windows and 64 bits otherwise
27648   // for compatibility with glibc.
27649   unsigned X87CW = Subtarget.isTargetWindowsMSVC() ? 0x27F : 0x37F;
27650   FPEnvVals.push_back(ConstantInt::get(ItemTy, X87CW));
27651   Constant *Zero = ConstantInt::get(ItemTy, 0);
27652   for (unsigned I = 0; I < 6; ++I)
27653     FPEnvVals.push_back(Zero);
27654 
27655   // MXCSR: mask all floating-point exceptions, sets rounding to nearest, clear
27656   // all exceptions, sets DAZ and FTZ to 0.
27657   FPEnvVals.push_back(ConstantInt::get(ItemTy, 0x1F80));
27658   Constant *FPEnvBits = ConstantArray::get(FPEnvTy, FPEnvVals);
27659   MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
27660   SDValue Env = DAG.getConstantPool(FPEnvBits, PtrVT);
27661   MachinePointerInfo MPI =
27662       MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
27663   MachineMemOperand *MMO = MF.getMachineMemOperand(
27664       MPI, MachineMemOperand::MOStore, X87StateSize, Align(4));
27665 
27666   return createSetFPEnvNodes(Env, Chain, DL, MVT::i32, MMO, DAG, Subtarget);
27667 }
27668 
27669 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
27670 //
27671 // i8/i16 vector implemented using dword LZCNT vector instruction
27672 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
27673 // split the vector, perform operation on it's Lo a Hi part and
27674 // concatenate the results.
27675 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
27676                                          const X86Subtarget &Subtarget) {
27677   assert(Op.getOpcode() == ISD::CTLZ);
27678   SDLoc dl(Op);
27679   MVT VT = Op.getSimpleValueType();
27680   MVT EltVT = VT.getVectorElementType();
27681   unsigned NumElems = VT.getVectorNumElements();
27682 
27683   assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
27684           "Unsupported element type");
27685 
27686   // Split vector, it's Lo and Hi parts will be handled in next iteration.
27687   if (NumElems > 16 ||
27688       (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
27689     return splitVectorIntUnary(Op, DAG);
27690 
27691   MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
27692   assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
27693           "Unsupported value type for operation");
27694 
27695   // Use native supported vector instruction vplzcntd.
27696   Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
27697   SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
27698   SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
27699   SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
27700 
27701   return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
27702 }
27703 
27704 // Lower CTLZ using a PSHUFB lookup table implementation.
27705 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
27706                                        const X86Subtarget &Subtarget,
27707                                        SelectionDAG &DAG) {
27708   MVT VT = Op.getSimpleValueType();
27709   int NumElts = VT.getVectorNumElements();
27710   int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
27711   MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
27712 
27713   // Per-nibble leading zero PSHUFB lookup table.
27714   const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
27715                        /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
27716                        /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
27717                        /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
27718 
27719   SmallVector<SDValue, 64> LUTVec;
27720   for (int i = 0; i < NumBytes; ++i)
27721     LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
27722   SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
27723 
27724   // Begin by bitcasting the input to byte vector, then split those bytes
27725   // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
27726   // If the hi input nibble is zero then we add both results together, otherwise
27727   // we just take the hi result (by masking the lo result to zero before the
27728   // add).
27729   SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
27730   SDValue Zero = DAG.getConstant(0, DL, CurrVT);
27731 
27732   SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
27733   SDValue Lo = Op0;
27734   SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
27735   SDValue HiZ;
27736   if (CurrVT.is512BitVector()) {
27737     MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
27738     HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
27739     HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
27740   } else {
27741     HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
27742   }
27743 
27744   Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
27745   Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
27746   Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
27747   SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
27748 
27749   // Merge result back from vXi8 back to VT, working on the lo/hi halves
27750   // of the current vector width in the same way we did for the nibbles.
27751   // If the upper half of the input element is zero then add the halves'
27752   // leading zero counts together, otherwise just use the upper half's.
27753   // Double the width of the result until we are at target width.
27754   while (CurrVT != VT) {
27755     int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
27756     int CurrNumElts = CurrVT.getVectorNumElements();
27757     MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
27758     MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
27759     SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
27760 
27761     // Check if the upper half of the input element is zero.
27762     if (CurrVT.is512BitVector()) {
27763       MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
27764       HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
27765                          DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
27766       HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
27767     } else {
27768       HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
27769                          DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
27770     }
27771     HiZ = DAG.getBitcast(NextVT, HiZ);
27772 
27773     // Move the upper/lower halves to the lower bits as we'll be extending to
27774     // NextVT. Mask the lower result to zero if HiZ is true and add the results
27775     // together.
27776     SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
27777     SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
27778     SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
27779     R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
27780     Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
27781     CurrVT = NextVT;
27782   }
27783 
27784   return Res;
27785 }
27786 
27787 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
27788                                const X86Subtarget &Subtarget,
27789                                SelectionDAG &DAG) {
27790   MVT VT = Op.getSimpleValueType();
27791 
27792   if (Subtarget.hasCDI() &&
27793       // vXi8 vectors need to be promoted to 512-bits for vXi32.
27794       (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
27795     return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
27796 
27797   // Decompose 256-bit ops into smaller 128-bit ops.
27798   if (VT.is256BitVector() && !Subtarget.hasInt256())
27799     return splitVectorIntUnary(Op, DAG);
27800 
27801   // Decompose 512-bit ops into smaller 256-bit ops.
27802   if (VT.is512BitVector() && !Subtarget.hasBWI())
27803     return splitVectorIntUnary(Op, DAG);
27804 
27805   assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
27806   return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
27807 }
27808 
27809 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
27810                          SelectionDAG &DAG) {
27811   MVT VT = Op.getSimpleValueType();
27812   MVT OpVT = VT;
27813   unsigned NumBits = VT.getSizeInBits();
27814   SDLoc dl(Op);
27815   unsigned Opc = Op.getOpcode();
27816 
27817   if (VT.isVector())
27818     return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
27819 
27820   Op = Op.getOperand(0);
27821   if (VT == MVT::i8) {
27822     // Zero extend to i32 since there is not an i8 bsr.
27823     OpVT = MVT::i32;
27824     Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
27825   }
27826 
27827   // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
27828   SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
27829   Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
27830 
27831   if (Opc == ISD::CTLZ) {
27832     // If src is zero (i.e. bsr sets ZF), returns NumBits.
27833     SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
27834                      DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
27835                      Op.getValue(1)};
27836     Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
27837   }
27838 
27839   // Finally xor with NumBits-1.
27840   Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
27841                    DAG.getConstant(NumBits - 1, dl, OpVT));
27842 
27843   if (VT == MVT::i8)
27844     Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
27845   return Op;
27846 }
27847 
27848 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
27849                          SelectionDAG &DAG) {
27850   MVT VT = Op.getSimpleValueType();
27851   unsigned NumBits = VT.getScalarSizeInBits();
27852   SDValue N0 = Op.getOperand(0);
27853   SDLoc dl(Op);
27854 
27855   assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
27856          "Only scalar CTTZ requires custom lowering");
27857 
27858   // Issue a bsf (scan bits forward) which also sets EFLAGS.
27859   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
27860   Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
27861 
27862   // If src is known never zero we can skip the CMOV.
27863   if (DAG.isKnownNeverZero(N0))
27864     return Op;
27865 
27866   // If src is zero (i.e. bsf sets ZF), returns NumBits.
27867   SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
27868                    DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
27869                    Op.getValue(1)};
27870   return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
27871 }
27872 
27873 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
27874                            const X86Subtarget &Subtarget) {
27875   MVT VT = Op.getSimpleValueType();
27876   if (VT == MVT::i16 || VT == MVT::i32)
27877     return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
27878 
27879   if (VT == MVT::v32i16 || VT == MVT::v64i8)
27880     return splitVectorIntBinary(Op, DAG);
27881 
27882   assert(Op.getSimpleValueType().is256BitVector() &&
27883          Op.getSimpleValueType().isInteger() &&
27884          "Only handle AVX 256-bit vector integer operation");
27885   return splitVectorIntBinary(Op, DAG);
27886 }
27887 
27888 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
27889                                   const X86Subtarget &Subtarget) {
27890   MVT VT = Op.getSimpleValueType();
27891   SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
27892   unsigned Opcode = Op.getOpcode();
27893   SDLoc DL(Op);
27894 
27895   if (VT == MVT::v32i16 || VT == MVT::v64i8 ||
27896       (VT.is256BitVector() && !Subtarget.hasInt256())) {
27897     assert(Op.getSimpleValueType().isInteger() &&
27898            "Only handle AVX vector integer operation");
27899     return splitVectorIntBinary(Op, DAG);
27900   }
27901 
27902   // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
27903   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27904   EVT SetCCResultType =
27905       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
27906 
27907   unsigned BitWidth = VT.getScalarSizeInBits();
27908   if (Opcode == ISD::USUBSAT) {
27909     if (!TLI.isOperationLegal(ISD::UMAX, VT) || useVPTERNLOG(Subtarget, VT)) {
27910       // Handle a special-case with a bit-hack instead of cmp+select:
27911       // usubsat X, SMIN --> (X ^ SMIN) & (X s>> BW-1)
27912       // If the target can use VPTERNLOG, DAGToDAG will match this as
27913       // "vpsra + vpternlog" which is better than "vpmax + vpsub" with a
27914       // "broadcast" constant load.
27915       ConstantSDNode *C = isConstOrConstSplat(Y, true);
27916       if (C && C->getAPIntValue().isSignMask()) {
27917         SDValue SignMask = DAG.getConstant(C->getAPIntValue(), DL, VT);
27918         SDValue ShiftAmt = DAG.getConstant(BitWidth - 1, DL, VT);
27919         SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, X, SignMask);
27920         SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShiftAmt);
27921         return DAG.getNode(ISD::AND, DL, VT, Xor, Sra);
27922       }
27923     }
27924     if (!TLI.isOperationLegal(ISD::UMAX, VT)) {
27925       // usubsat X, Y --> (X >u Y) ? X - Y : 0
27926       SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
27927       SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
27928       // TODO: Move this to DAGCombiner?
27929       if (SetCCResultType == VT &&
27930           DAG.ComputeNumSignBits(Cmp) == VT.getScalarSizeInBits())
27931         return DAG.getNode(ISD::AND, DL, VT, Cmp, Sub);
27932       return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
27933     }
27934   }
27935 
27936   if ((Opcode == ISD::SADDSAT || Opcode == ISD::SSUBSAT) &&
27937       (!VT.isVector() || VT == MVT::v2i64)) {
27938     APInt MinVal = APInt::getSignedMinValue(BitWidth);
27939     APInt MaxVal = APInt::getSignedMaxValue(BitWidth);
27940     SDValue Zero = DAG.getConstant(0, DL, VT);
27941     SDValue Result =
27942         DAG.getNode(Opcode == ISD::SADDSAT ? ISD::SADDO : ISD::SSUBO, DL,
27943                     DAG.getVTList(VT, SetCCResultType), X, Y);
27944     SDValue SumDiff = Result.getValue(0);
27945     SDValue Overflow = Result.getValue(1);
27946     SDValue SatMin = DAG.getConstant(MinVal, DL, VT);
27947     SDValue SatMax = DAG.getConstant(MaxVal, DL, VT);
27948     SDValue SumNeg =
27949         DAG.getSetCC(DL, SetCCResultType, SumDiff, Zero, ISD::SETLT);
27950     Result = DAG.getSelect(DL, VT, SumNeg, SatMax, SatMin);
27951     return DAG.getSelect(DL, VT, Overflow, Result, SumDiff);
27952   }
27953 
27954   // Use default expansion.
27955   return SDValue();
27956 }
27957 
27958 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
27959                         SelectionDAG &DAG) {
27960   MVT VT = Op.getSimpleValueType();
27961   if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
27962     // Since X86 does not have CMOV for 8-bit integer, we don't convert
27963     // 8-bit integer abs to NEG and CMOV.
27964     SDLoc DL(Op);
27965     SDValue N0 = Op.getOperand(0);
27966     SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
27967                               DAG.getConstant(0, DL, VT), N0);
27968     SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_NS, DL, MVT::i8),
27969                      SDValue(Neg.getNode(), 1)};
27970     return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
27971   }
27972 
27973   // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
27974   if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
27975     SDLoc DL(Op);
27976     SDValue Src = Op.getOperand(0);
27977     SDValue Sub =
27978         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
27979     return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
27980   }
27981 
27982   if (VT.is256BitVector() && !Subtarget.hasInt256()) {
27983     assert(VT.isInteger() &&
27984            "Only handle AVX 256-bit vector integer operation");
27985     return splitVectorIntUnary(Op, DAG);
27986   }
27987 
27988   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
27989     return splitVectorIntUnary(Op, DAG);
27990 
27991   // Default to expand.
27992   return SDValue();
27993 }
27994 
27995 static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget,
27996                         SelectionDAG &DAG) {
27997   MVT VT = Op.getSimpleValueType();
27998 
27999   // For AVX1 cases, split to use legal ops.
28000   if (VT.is256BitVector() && !Subtarget.hasInt256())
28001     return splitVectorIntBinary(Op, DAG);
28002 
28003   if (VT == MVT::v32i16 || VT == MVT::v64i8)
28004     return splitVectorIntBinary(Op, DAG);
28005 
28006   // Default to expand.
28007   return SDValue();
28008 }
28009 
28010 static SDValue LowerMINMAX(SDValue Op, const X86Subtarget &Subtarget,
28011                            SelectionDAG &DAG) {
28012   MVT VT = Op.getSimpleValueType();
28013 
28014   // For AVX1 cases, split to use legal ops.
28015   if (VT.is256BitVector() && !Subtarget.hasInt256())
28016     return splitVectorIntBinary(Op, DAG);
28017 
28018   if (VT == MVT::v32i16 || VT == MVT::v64i8)
28019     return splitVectorIntBinary(Op, DAG);
28020 
28021   // Default to expand.
28022   return SDValue();
28023 }
28024 
28025 static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget,
28026                                       SelectionDAG &DAG) {
28027   assert((Op.getOpcode() == ISD::FMAXIMUM || Op.getOpcode() == ISD::FMINIMUM) &&
28028          "Expected FMAXIMUM or FMINIMUM opcode");
28029   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28030   EVT VT = Op.getValueType();
28031   SDValue X = Op.getOperand(0);
28032   SDValue Y = Op.getOperand(1);
28033   SDLoc DL(Op);
28034   uint64_t SizeInBits = VT.getScalarSizeInBits();
28035   APInt PreferredZero = APInt::getZero(SizeInBits);
28036   APInt OppositeZero = PreferredZero;
28037   EVT IVT = VT.changeTypeToInteger();
28038   X86ISD::NodeType MinMaxOp;
28039   if (Op.getOpcode() == ISD::FMAXIMUM) {
28040     MinMaxOp = X86ISD::FMAX;
28041     OppositeZero.setSignBit();
28042   } else {
28043     PreferredZero.setSignBit();
28044     MinMaxOp = X86ISD::FMIN;
28045   }
28046   EVT SetCCType =
28047       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
28048 
28049   // The tables below show the expected result of Max in cases of NaN and
28050   // signed zeros.
28051   //
28052   //                 Y                       Y
28053   //             Num   xNaN              +0     -0
28054   //          ---------------         ---------------
28055   //     Num  |  Max |   Y  |     +0  |  +0  |  +0  |
28056   // X        ---------------  X      ---------------
28057   //    xNaN  |   X  |  X/Y |     -0  |  +0  |  -0  |
28058   //          ---------------         ---------------
28059   //
28060   // It is achieved by means of FMAX/FMIN with preliminary checks and operand
28061   // reordering.
28062   //
28063   // We check if any of operands is NaN and return NaN. Then we check if any of
28064   // operands is zero or negative zero (for fmaximum and fminimum respectively)
28065   // to ensure the correct zero is returned.
28066   auto MatchesZero = [](SDValue Op, APInt Zero) {
28067     Op = peekThroughBitcasts(Op);
28068     if (auto *CstOp = dyn_cast<ConstantFPSDNode>(Op))
28069       return CstOp->getValueAPF().bitcastToAPInt() == Zero;
28070     if (auto *CstOp = dyn_cast<ConstantSDNode>(Op))
28071       return CstOp->getAPIntValue() == Zero;
28072     if (Op->getOpcode() == ISD::BUILD_VECTOR ||
28073         Op->getOpcode() == ISD::SPLAT_VECTOR) {
28074       for (const SDValue &OpVal : Op->op_values()) {
28075         if (OpVal.isUndef())
28076           continue;
28077         auto *CstOp = dyn_cast<ConstantFPSDNode>(OpVal);
28078         if (!CstOp)
28079           return false;
28080         if (!CstOp->getValueAPF().isZero())
28081           continue;
28082         if (CstOp->getValueAPF().bitcastToAPInt() != Zero)
28083           return false;
28084       }
28085       return true;
28086     }
28087     return false;
28088   };
28089 
28090   bool IsXNeverNaN = DAG.isKnownNeverNaN(X);
28091   bool IsYNeverNaN = DAG.isKnownNeverNaN(Y);
28092   bool IgnoreSignedZero = DAG.getTarget().Options.NoSignedZerosFPMath ||
28093                           Op->getFlags().hasNoSignedZeros() ||
28094                           DAG.isKnownNeverZeroFloat(X) ||
28095                           DAG.isKnownNeverZeroFloat(Y);
28096   SDValue NewX, NewY;
28097   if (IgnoreSignedZero || MatchesZero(Y, PreferredZero) ||
28098       MatchesZero(X, OppositeZero)) {
28099     // Operands are already in right order or order does not matter.
28100     NewX = X;
28101     NewY = Y;
28102   } else if (MatchesZero(X, PreferredZero) || MatchesZero(Y, OppositeZero)) {
28103     NewX = Y;
28104     NewY = X;
28105   } else if (!VT.isVector() && (VT == MVT::f16 || Subtarget.hasDQI()) &&
28106              (Op->getFlags().hasNoNaNs() || IsXNeverNaN || IsYNeverNaN)) {
28107     if (IsXNeverNaN)
28108       std::swap(X, Y);
28109     // VFPCLASSS consumes a vector type. So provide a minimal one corresponded
28110     // xmm register.
28111     MVT VectorType = MVT::getVectorVT(VT.getSimpleVT(), 128 / SizeInBits);
28112     SDValue VX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorType, X);
28113     // Bits of classes:
28114     // Bits  Imm8[0] Imm8[1] Imm8[2] Imm8[3] Imm8[4]  Imm8[5]  Imm8[6] Imm8[7]
28115     // Class    QNAN PosZero NegZero  PosINF  NegINF Denormal Negative    SNAN
28116     SDValue Imm = DAG.getTargetConstant(MinMaxOp == X86ISD::FMAX ? 0b11 : 0b101,
28117                                         DL, MVT::i32);
28118     SDValue IsNanZero = DAG.getNode(X86ISD::VFPCLASSS, DL, MVT::v1i1, VX, Imm);
28119     SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i1,
28120                               DAG.getConstant(0, DL, MVT::v8i1), IsNanZero,
28121                               DAG.getIntPtrConstant(0, DL));
28122     SDValue NeedSwap = DAG.getBitcast(MVT::i8, Ins);
28123     NewX = DAG.getSelect(DL, VT, NeedSwap, Y, X);
28124     NewY = DAG.getSelect(DL, VT, NeedSwap, X, Y);
28125     return DAG.getNode(MinMaxOp, DL, VT, NewX, NewY, Op->getFlags());
28126   } else {
28127     SDValue IsXSigned;
28128     if (Subtarget.is64Bit() || VT != MVT::f64) {
28129       SDValue XInt = DAG.getNode(ISD::BITCAST, DL, IVT, X);
28130       SDValue ZeroCst = DAG.getConstant(0, DL, IVT);
28131       IsXSigned = DAG.getSetCC(DL, SetCCType, XInt, ZeroCst, ISD::SETLT);
28132     } else {
28133       assert(VT == MVT::f64);
28134       SDValue Ins = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v2f64,
28135                                 DAG.getConstantFP(0, DL, MVT::v2f64), X,
28136                                 DAG.getIntPtrConstant(0, DL));
28137       SDValue VX = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, Ins);
28138       SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VX,
28139                                DAG.getIntPtrConstant(1, DL));
28140       Hi = DAG.getBitcast(MVT::i32, Hi);
28141       SDValue ZeroCst = DAG.getConstant(0, DL, MVT::i32);
28142       EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(),
28143                                              *DAG.getContext(), MVT::i32);
28144       IsXSigned = DAG.getSetCC(DL, SetCCType, Hi, ZeroCst, ISD::SETLT);
28145     }
28146     if (MinMaxOp == X86ISD::FMAX) {
28147       NewX = DAG.getSelect(DL, VT, IsXSigned, X, Y);
28148       NewY = DAG.getSelect(DL, VT, IsXSigned, Y, X);
28149     } else {
28150       NewX = DAG.getSelect(DL, VT, IsXSigned, Y, X);
28151       NewY = DAG.getSelect(DL, VT, IsXSigned, X, Y);
28152     }
28153   }
28154 
28155   bool IgnoreNaN = DAG.getTarget().Options.NoNaNsFPMath ||
28156                    Op->getFlags().hasNoNaNs() || (IsXNeverNaN && IsYNeverNaN);
28157 
28158   // If we did no ordering operands for signed zero handling and we need
28159   // to process NaN and we know that the second operand is not NaN then put
28160   // it in first operand and we will not need to post handle NaN after max/min.
28161   if (IgnoreSignedZero && !IgnoreNaN && DAG.isKnownNeverNaN(NewY))
28162     std::swap(NewX, NewY);
28163 
28164   SDValue MinMax = DAG.getNode(MinMaxOp, DL, VT, NewX, NewY, Op->getFlags());
28165 
28166   if (IgnoreNaN || DAG.isKnownNeverNaN(NewX))
28167     return MinMax;
28168 
28169   SDValue IsNaN = DAG.getSetCC(DL, SetCCType, NewX, NewX, ISD::SETUO);
28170   return DAG.getSelect(DL, VT, IsNaN, NewX, MinMax);
28171 }
28172 
28173 static SDValue LowerABD(SDValue Op, const X86Subtarget &Subtarget,
28174                         SelectionDAG &DAG) {
28175   MVT VT = Op.getSimpleValueType();
28176 
28177   // For AVX1 cases, split to use legal ops.
28178   if (VT.is256BitVector() && !Subtarget.hasInt256())
28179     return splitVectorIntBinary(Op, DAG);
28180 
28181   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.useBWIRegs())
28182     return splitVectorIntBinary(Op, DAG);
28183 
28184   SDLoc dl(Op);
28185   bool IsSigned = Op.getOpcode() == ISD::ABDS;
28186   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28187 
28188   // TODO: Move to TargetLowering expandABD() once we have ABD promotion.
28189   if (VT.isScalarInteger()) {
28190     unsigned WideBits = std::max<unsigned>(2 * VT.getScalarSizeInBits(), 32u);
28191     MVT WideVT = MVT::getIntegerVT(WideBits);
28192     if (TLI.isTypeLegal(WideVT)) {
28193       // abds(lhs, rhs) -> trunc(abs(sub(sext(lhs), sext(rhs))))
28194       // abdu(lhs, rhs) -> trunc(abs(sub(zext(lhs), zext(rhs))))
28195       unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
28196       SDValue LHS = DAG.getNode(ExtOpc, dl, WideVT, Op.getOperand(0));
28197       SDValue RHS = DAG.getNode(ExtOpc, dl, WideVT, Op.getOperand(1));
28198       SDValue Diff = DAG.getNode(ISD::SUB, dl, WideVT, LHS, RHS);
28199       SDValue AbsDiff = DAG.getNode(ISD::ABS, dl, WideVT, Diff);
28200       return DAG.getNode(ISD::TRUNCATE, dl, VT, AbsDiff);
28201     }
28202   }
28203 
28204   // TODO: Move to TargetLowering expandABD().
28205   if (!Subtarget.hasSSE41() &&
28206       ((IsSigned && VT == MVT::v16i8) || VT == MVT::v4i32)) {
28207     SDValue LHS = DAG.getFreeze(Op.getOperand(0));
28208     SDValue RHS = DAG.getFreeze(Op.getOperand(1));
28209     ISD::CondCode CC = IsSigned ? ISD::CondCode::SETGT : ISD::CondCode::SETUGT;
28210     SDValue Cmp = DAG.getSetCC(dl, VT, LHS, RHS, CC);
28211     SDValue Diff0 = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
28212     SDValue Diff1 = DAG.getNode(ISD::SUB, dl, VT, RHS, LHS);
28213     return getBitSelect(dl, VT, Diff0, Diff1, Cmp, DAG);
28214   }
28215 
28216   // Default to expand.
28217   return SDValue();
28218 }
28219 
28220 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
28221                         SelectionDAG &DAG) {
28222   SDLoc dl(Op);
28223   MVT VT = Op.getSimpleValueType();
28224 
28225   // Decompose 256-bit ops into 128-bit ops.
28226   if (VT.is256BitVector() && !Subtarget.hasInt256())
28227     return splitVectorIntBinary(Op, DAG);
28228 
28229   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28230     return splitVectorIntBinary(Op, DAG);
28231 
28232   SDValue A = Op.getOperand(0);
28233   SDValue B = Op.getOperand(1);
28234 
28235   // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
28236   // vector pairs, multiply and truncate.
28237   if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
28238     unsigned NumElts = VT.getVectorNumElements();
28239 
28240     if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
28241         (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
28242       MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
28243       return DAG.getNode(
28244           ISD::TRUNCATE, dl, VT,
28245           DAG.getNode(ISD::MUL, dl, ExVT,
28246                       DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
28247                       DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
28248     }
28249 
28250     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
28251 
28252     // Extract the lo/hi parts to any extend to i16.
28253     // We're going to mask off the low byte of each result element of the
28254     // pmullw, so it doesn't matter what's in the high byte of each 16-bit
28255     // element.
28256     SDValue Undef = DAG.getUNDEF(VT);
28257     SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
28258     SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
28259 
28260     SDValue BLo, BHi;
28261     if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
28262       // If the RHS is a constant, manually unpackl/unpackh.
28263       SmallVector<SDValue, 16> LoOps, HiOps;
28264       for (unsigned i = 0; i != NumElts; i += 16) {
28265         for (unsigned j = 0; j != 8; ++j) {
28266           LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
28267                                                MVT::i16));
28268           HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
28269                                                MVT::i16));
28270         }
28271       }
28272 
28273       BLo = DAG.getBuildVector(ExVT, dl, LoOps);
28274       BHi = DAG.getBuildVector(ExVT, dl, HiOps);
28275     } else {
28276       BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
28277       BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
28278     }
28279 
28280     // Multiply, mask the lower 8bits of the lo/hi results and pack.
28281     SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
28282     SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
28283     return getPack(DAG, Subtarget, dl, VT, RLo, RHi);
28284   }
28285 
28286   // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
28287   if (VT == MVT::v4i32) {
28288     assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
28289            "Should not custom lower when pmulld is available!");
28290 
28291     // Extract the odd parts.
28292     static const int UnpackMask[] = { 1, -1, 3, -1 };
28293     SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
28294     SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
28295 
28296     // Multiply the even parts.
28297     SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
28298                                 DAG.getBitcast(MVT::v2i64, A),
28299                                 DAG.getBitcast(MVT::v2i64, B));
28300     // Now multiply odd parts.
28301     SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
28302                                DAG.getBitcast(MVT::v2i64, Aodds),
28303                                DAG.getBitcast(MVT::v2i64, Bodds));
28304 
28305     Evens = DAG.getBitcast(VT, Evens);
28306     Odds = DAG.getBitcast(VT, Odds);
28307 
28308     // Merge the two vectors back together with a shuffle. This expands into 2
28309     // shuffles.
28310     static const int ShufMask[] = { 0, 4, 2, 6 };
28311     return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
28312   }
28313 
28314   assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
28315          "Only know how to lower V2I64/V4I64/V8I64 multiply");
28316   assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
28317 
28318   //  Ahi = psrlqi(a, 32);
28319   //  Bhi = psrlqi(b, 32);
28320   //
28321   //  AloBlo = pmuludq(a, b);
28322   //  AloBhi = pmuludq(a, Bhi);
28323   //  AhiBlo = pmuludq(Ahi, b);
28324   //
28325   //  Hi = psllqi(AloBhi + AhiBlo, 32);
28326   //  return AloBlo + Hi;
28327   KnownBits AKnown = DAG.computeKnownBits(A);
28328   KnownBits BKnown = DAG.computeKnownBits(B);
28329 
28330   APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
28331   bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
28332   bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
28333 
28334   APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
28335   bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
28336   bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
28337 
28338   SDValue Zero = DAG.getConstant(0, dl, VT);
28339 
28340   // Only multiply lo/hi halves that aren't known to be zero.
28341   SDValue AloBlo = Zero;
28342   if (!ALoIsZero && !BLoIsZero)
28343     AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
28344 
28345   SDValue AloBhi = Zero;
28346   if (!ALoIsZero && !BHiIsZero) {
28347     SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
28348     AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
28349   }
28350 
28351   SDValue AhiBlo = Zero;
28352   if (!AHiIsZero && !BLoIsZero) {
28353     SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
28354     AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
28355   }
28356 
28357   SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
28358   Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
28359 
28360   return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
28361 }
28362 
28363 static SDValue LowervXi8MulWithUNPCK(SDValue A, SDValue B, const SDLoc &dl,
28364                                      MVT VT, bool IsSigned,
28365                                      const X86Subtarget &Subtarget,
28366                                      SelectionDAG &DAG,
28367                                      SDValue *Low = nullptr) {
28368   unsigned NumElts = VT.getVectorNumElements();
28369 
28370   // For vXi8 we will unpack the low and high half of each 128 bit lane to widen
28371   // to a vXi16 type. Do the multiplies, shift the results and pack the half
28372   // lane results back together.
28373 
28374   // We'll take different approaches for signed and unsigned.
28375   // For unsigned we'll use punpcklbw/punpckhbw to put zero extend the bytes
28376   // and use pmullw to calculate the full 16-bit product.
28377   // For signed we'll use punpcklbw/punpckbw to extend the bytes to words and
28378   // shift them left into the upper byte of each word. This allows us to use
28379   // pmulhw to calculate the full 16-bit product. This trick means we don't
28380   // need to sign extend the bytes to use pmullw.
28381 
28382   MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
28383   SDValue Zero = DAG.getConstant(0, dl, VT);
28384 
28385   SDValue ALo, AHi;
28386   if (IsSigned) {
28387     ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, A));
28388     AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, A));
28389   } else {
28390     ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Zero));
28391     AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Zero));
28392   }
28393 
28394   SDValue BLo, BHi;
28395   if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
28396     // If the RHS is a constant, manually unpackl/unpackh and extend.
28397     SmallVector<SDValue, 16> LoOps, HiOps;
28398     for (unsigned i = 0; i != NumElts; i += 16) {
28399       for (unsigned j = 0; j != 8; ++j) {
28400         SDValue LoOp = B.getOperand(i + j);
28401         SDValue HiOp = B.getOperand(i + j + 8);
28402 
28403         if (IsSigned) {
28404           LoOp = DAG.getAnyExtOrTrunc(LoOp, dl, MVT::i16);
28405           HiOp = DAG.getAnyExtOrTrunc(HiOp, dl, MVT::i16);
28406           LoOp = DAG.getNode(ISD::SHL, dl, MVT::i16, LoOp,
28407                              DAG.getConstant(8, dl, MVT::i16));
28408           HiOp = DAG.getNode(ISD::SHL, dl, MVT::i16, HiOp,
28409                              DAG.getConstant(8, dl, MVT::i16));
28410         } else {
28411           LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
28412           HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
28413         }
28414 
28415         LoOps.push_back(LoOp);
28416         HiOps.push_back(HiOp);
28417       }
28418     }
28419 
28420     BLo = DAG.getBuildVector(ExVT, dl, LoOps);
28421     BHi = DAG.getBuildVector(ExVT, dl, HiOps);
28422   } else if (IsSigned) {
28423     BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, B));
28424     BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, B));
28425   } else {
28426     BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Zero));
28427     BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Zero));
28428   }
28429 
28430   // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
28431   // pack back to vXi8.
28432   unsigned MulOpc = IsSigned ? ISD::MULHS : ISD::MUL;
28433   SDValue RLo = DAG.getNode(MulOpc, dl, ExVT, ALo, BLo);
28434   SDValue RHi = DAG.getNode(MulOpc, dl, ExVT, AHi, BHi);
28435 
28436   if (Low)
28437     *Low = getPack(DAG, Subtarget, dl, VT, RLo, RHi);
28438 
28439   return getPack(DAG, Subtarget, dl, VT, RLo, RHi, /*PackHiHalf*/ true);
28440 }
28441 
28442 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
28443                          SelectionDAG &DAG) {
28444   SDLoc dl(Op);
28445   MVT VT = Op.getSimpleValueType();
28446   bool IsSigned = Op->getOpcode() == ISD::MULHS;
28447   unsigned NumElts = VT.getVectorNumElements();
28448   SDValue A = Op.getOperand(0);
28449   SDValue B = Op.getOperand(1);
28450 
28451   // Decompose 256-bit ops into 128-bit ops.
28452   if (VT.is256BitVector() && !Subtarget.hasInt256())
28453     return splitVectorIntBinary(Op, DAG);
28454 
28455   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28456     return splitVectorIntBinary(Op, DAG);
28457 
28458   if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
28459     assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
28460            (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
28461            (VT == MVT::v16i32 && Subtarget.hasAVX512()));
28462 
28463     // PMULxD operations multiply each even value (starting at 0) of LHS with
28464     // the related value of RHS and produce a widen result.
28465     // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
28466     // => <2 x i64> <ae|cg>
28467     //
28468     // In other word, to have all the results, we need to perform two PMULxD:
28469     // 1. one with the even values.
28470     // 2. one with the odd values.
28471     // To achieve #2, with need to place the odd values at an even position.
28472     //
28473     // Place the odd value at an even position (basically, shift all values 1
28474     // step to the left):
28475     const int Mask[] = {1, -1,  3, -1,  5, -1,  7, -1,
28476                         9, -1, 11, -1, 13, -1, 15, -1};
28477     // <a|b|c|d> => <b|undef|d|undef>
28478     SDValue Odd0 =
28479         DAG.getVectorShuffle(VT, dl, A, A, ArrayRef(&Mask[0], NumElts));
28480     // <e|f|g|h> => <f|undef|h|undef>
28481     SDValue Odd1 =
28482         DAG.getVectorShuffle(VT, dl, B, B, ArrayRef(&Mask[0], NumElts));
28483 
28484     // Emit two multiplies, one for the lower 2 ints and one for the higher 2
28485     // ints.
28486     MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
28487     unsigned Opcode =
28488         (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
28489     // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
28490     // => <2 x i64> <ae|cg>
28491     SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
28492                                                   DAG.getBitcast(MulVT, A),
28493                                                   DAG.getBitcast(MulVT, B)));
28494     // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
28495     // => <2 x i64> <bf|dh>
28496     SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
28497                                                   DAG.getBitcast(MulVT, Odd0),
28498                                                   DAG.getBitcast(MulVT, Odd1)));
28499 
28500     // Shuffle it back into the right order.
28501     SmallVector<int, 16> ShufMask(NumElts);
28502     for (int i = 0; i != (int)NumElts; ++i)
28503       ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
28504 
28505     SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
28506 
28507     // If we have a signed multiply but no PMULDQ fix up the result of an
28508     // unsigned multiply.
28509     if (IsSigned && !Subtarget.hasSSE41()) {
28510       SDValue Zero = DAG.getConstant(0, dl, VT);
28511       SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
28512                                DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
28513       SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
28514                                DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
28515 
28516       SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
28517       Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
28518     }
28519 
28520     return Res;
28521   }
28522 
28523   // Only i8 vectors should need custom lowering after this.
28524   assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
28525          (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
28526          "Unsupported vector type");
28527 
28528   // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
28529   // logical shift down the upper half and pack back to i8.
28530 
28531   // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
28532   // and then ashr/lshr the upper bits down to the lower bits before multiply.
28533 
28534   if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
28535       (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
28536     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
28537     unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
28538     SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
28539     SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
28540     SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
28541     Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
28542     return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
28543   }
28544 
28545   return LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG);
28546 }
28547 
28548 // Custom lowering for SMULO/UMULO.
28549 static SDValue LowerMULO(SDValue Op, const X86Subtarget &Subtarget,
28550                          SelectionDAG &DAG) {
28551   MVT VT = Op.getSimpleValueType();
28552 
28553   // Scalars defer to LowerXALUO.
28554   if (!VT.isVector())
28555     return LowerXALUO(Op, DAG);
28556 
28557   SDLoc dl(Op);
28558   bool IsSigned = Op->getOpcode() == ISD::SMULO;
28559   SDValue A = Op.getOperand(0);
28560   SDValue B = Op.getOperand(1);
28561   EVT OvfVT = Op->getValueType(1);
28562 
28563   if ((VT == MVT::v32i8 && !Subtarget.hasInt256()) ||
28564       (VT == MVT::v64i8 && !Subtarget.hasBWI())) {
28565     // Extract the LHS Lo/Hi vectors
28566     SDValue LHSLo, LHSHi;
28567     std::tie(LHSLo, LHSHi) = splitVector(A, DAG, dl);
28568 
28569     // Extract the RHS Lo/Hi vectors
28570     SDValue RHSLo, RHSHi;
28571     std::tie(RHSLo, RHSHi) = splitVector(B, DAG, dl);
28572 
28573     EVT LoOvfVT, HiOvfVT;
28574     std::tie(LoOvfVT, HiOvfVT) = DAG.GetSplitDestVTs(OvfVT);
28575     SDVTList LoVTs = DAG.getVTList(LHSLo.getValueType(), LoOvfVT);
28576     SDVTList HiVTs = DAG.getVTList(LHSHi.getValueType(), HiOvfVT);
28577 
28578     // Issue the split operations.
28579     SDValue Lo = DAG.getNode(Op.getOpcode(), dl, LoVTs, LHSLo, RHSLo);
28580     SDValue Hi = DAG.getNode(Op.getOpcode(), dl, HiVTs, LHSHi, RHSHi);
28581 
28582     // Join the separate data results and the overflow results.
28583     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28584     SDValue Ovf = DAG.getNode(ISD::CONCAT_VECTORS, dl, OvfVT, Lo.getValue(1),
28585                               Hi.getValue(1));
28586 
28587     return DAG.getMergeValues({Res, Ovf}, dl);
28588   }
28589 
28590   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28591   EVT SetccVT =
28592       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
28593 
28594   if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
28595       (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
28596     unsigned NumElts = VT.getVectorNumElements();
28597     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
28598     unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
28599     SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
28600     SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
28601     SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
28602 
28603     SDValue Low = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
28604 
28605     SDValue Ovf;
28606     if (IsSigned) {
28607       SDValue High, LowSign;
28608       if (OvfVT.getVectorElementType() == MVT::i1 &&
28609           (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
28610         // Rather the truncating try to do the compare on vXi16 or vXi32.
28611         // Shift the high down filling with sign bits.
28612         High = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Mul, 8, DAG);
28613         // Fill all 16 bits with the sign bit from the low.
28614         LowSign =
28615             getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExVT, Mul, 8, DAG);
28616         LowSign = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, LowSign,
28617                                              15, DAG);
28618         SetccVT = OvfVT;
28619         if (!Subtarget.hasBWI()) {
28620           // We can't do a vXi16 compare so sign extend to v16i32.
28621           High = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, High);
28622           LowSign = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, LowSign);
28623         }
28624       } else {
28625         // Otherwise do the compare at vXi8.
28626         High = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
28627         High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
28628         LowSign =
28629             DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
28630       }
28631 
28632       Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
28633     } else {
28634       SDValue High =
28635           getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
28636       if (OvfVT.getVectorElementType() == MVT::i1 &&
28637           (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
28638         // Rather the truncating try to do the compare on vXi16 or vXi32.
28639         SetccVT = OvfVT;
28640         if (!Subtarget.hasBWI()) {
28641           // We can't do a vXi16 compare so sign extend to v16i32.
28642           High = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v16i32, High);
28643         }
28644       } else {
28645         // Otherwise do the compare at vXi8.
28646         High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
28647       }
28648 
28649       Ovf =
28650           DAG.getSetCC(dl, SetccVT, High,
28651                        DAG.getConstant(0, dl, High.getValueType()), ISD::SETNE);
28652     }
28653 
28654     Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
28655 
28656     return DAG.getMergeValues({Low, Ovf}, dl);
28657   }
28658 
28659   SDValue Low;
28660   SDValue High =
28661       LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG, &Low);
28662 
28663   SDValue Ovf;
28664   if (IsSigned) {
28665     // SMULO overflows if the high bits don't match the sign of the low.
28666     SDValue LowSign =
28667         DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
28668     Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
28669   } else {
28670     // UMULO overflows if the high bits are non-zero.
28671     Ovf =
28672         DAG.getSetCC(dl, SetccVT, High, DAG.getConstant(0, dl, VT), ISD::SETNE);
28673   }
28674 
28675   Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
28676 
28677   return DAG.getMergeValues({Low, Ovf}, dl);
28678 }
28679 
28680 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
28681   assert(Subtarget.isTargetWin64() && "Unexpected target");
28682   EVT VT = Op.getValueType();
28683   assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
28684          "Unexpected return type for lowering");
28685 
28686   if (isa<ConstantSDNode>(Op->getOperand(1))) {
28687     SmallVector<SDValue> Result;
28688     if (expandDIVREMByConstant(Op.getNode(), Result, MVT::i64, DAG))
28689       return DAG.getNode(ISD::BUILD_PAIR, SDLoc(Op), VT, Result[0], Result[1]);
28690   }
28691 
28692   RTLIB::Libcall LC;
28693   bool isSigned;
28694   switch (Op->getOpcode()) {
28695   default: llvm_unreachable("Unexpected request for libcall!");
28696   case ISD::SDIV:      isSigned = true;  LC = RTLIB::SDIV_I128;    break;
28697   case ISD::UDIV:      isSigned = false; LC = RTLIB::UDIV_I128;    break;
28698   case ISD::SREM:      isSigned = true;  LC = RTLIB::SREM_I128;    break;
28699   case ISD::UREM:      isSigned = false; LC = RTLIB::UREM_I128;    break;
28700   }
28701 
28702   SDLoc dl(Op);
28703   SDValue InChain = DAG.getEntryNode();
28704 
28705   TargetLowering::ArgListTy Args;
28706   TargetLowering::ArgListEntry Entry;
28707   for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
28708     EVT ArgVT = Op->getOperand(i).getValueType();
28709     assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
28710            "Unexpected argument type for lowering");
28711     SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
28712     int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
28713     MachinePointerInfo MPI =
28714         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
28715     Entry.Node = StackPtr;
28716     InChain =
28717         DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MPI, Align(16));
28718     Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
28719     Entry.Ty = PointerType::get(ArgTy,0);
28720     Entry.IsSExt = false;
28721     Entry.IsZExt = false;
28722     Args.push_back(Entry);
28723   }
28724 
28725   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
28726                                          getPointerTy(DAG.getDataLayout()));
28727 
28728   TargetLowering::CallLoweringInfo CLI(DAG);
28729   CLI.setDebugLoc(dl)
28730       .setChain(InChain)
28731       .setLibCallee(
28732           getLibcallCallingConv(LC),
28733           static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
28734           std::move(Args))
28735       .setInRegister()
28736       .setSExtResult(isSigned)
28737       .setZExtResult(!isSigned);
28738 
28739   std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
28740   return DAG.getBitcast(VT, CallInfo.first);
28741 }
28742 
28743 SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
28744                                                    SelectionDAG &DAG,
28745                                                    SDValue &Chain) const {
28746   assert(Subtarget.isTargetWin64() && "Unexpected target");
28747   EVT VT = Op.getValueType();
28748   bool IsStrict = Op->isStrictFPOpcode();
28749 
28750   SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
28751   EVT ArgVT = Arg.getValueType();
28752 
28753   assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
28754          "Unexpected return type for lowering");
28755 
28756   RTLIB::Libcall LC;
28757   if (Op->getOpcode() == ISD::FP_TO_SINT ||
28758       Op->getOpcode() == ISD::STRICT_FP_TO_SINT)
28759     LC = RTLIB::getFPTOSINT(ArgVT, VT);
28760   else
28761     LC = RTLIB::getFPTOUINT(ArgVT, VT);
28762   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
28763 
28764   SDLoc dl(Op);
28765   MakeLibCallOptions CallOptions;
28766   Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
28767 
28768   SDValue Result;
28769   // Expect the i128 argument returned as a v2i64 in xmm0, cast back to the
28770   // expected VT (i128).
28771   std::tie(Result, Chain) =
28772       makeLibCall(DAG, LC, MVT::v2i64, Arg, CallOptions, dl, Chain);
28773   Result = DAG.getBitcast(VT, Result);
28774   return Result;
28775 }
28776 
28777 SDValue X86TargetLowering::LowerWin64_INT128_TO_FP(SDValue Op,
28778                                                    SelectionDAG &DAG) const {
28779   assert(Subtarget.isTargetWin64() && "Unexpected target");
28780   EVT VT = Op.getValueType();
28781   bool IsStrict = Op->isStrictFPOpcode();
28782 
28783   SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
28784   EVT ArgVT = Arg.getValueType();
28785 
28786   assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
28787          "Unexpected argument type for lowering");
28788 
28789   RTLIB::Libcall LC;
28790   if (Op->getOpcode() == ISD::SINT_TO_FP ||
28791       Op->getOpcode() == ISD::STRICT_SINT_TO_FP)
28792     LC = RTLIB::getSINTTOFP(ArgVT, VT);
28793   else
28794     LC = RTLIB::getUINTTOFP(ArgVT, VT);
28795   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
28796 
28797   SDLoc dl(Op);
28798   MakeLibCallOptions CallOptions;
28799   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
28800 
28801   // Pass the i128 argument as an indirect argument on the stack.
28802   SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
28803   int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
28804   MachinePointerInfo MPI =
28805       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
28806   Chain = DAG.getStore(Chain, dl, Arg, StackPtr, MPI, Align(16));
28807 
28808   SDValue Result;
28809   std::tie(Result, Chain) =
28810       makeLibCall(DAG, LC, VT, StackPtr, CallOptions, dl, Chain);
28811   return IsStrict ? DAG.getMergeValues({Result, Chain}, dl) : Result;
28812 }
28813 
28814 // Return true if the required (according to Opcode) shift-imm form is natively
28815 // supported by the Subtarget
28816 static bool supportedVectorShiftWithImm(EVT VT, const X86Subtarget &Subtarget,
28817                                         unsigned Opcode) {
28818   if (!VT.isSimple())
28819     return false;
28820 
28821   if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
28822     return false;
28823 
28824   if (VT.getScalarSizeInBits() < 16)
28825     return false;
28826 
28827   if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
28828       (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
28829     return true;
28830 
28831   bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
28832                 (VT.is256BitVector() && Subtarget.hasInt256());
28833 
28834   bool AShift = LShift && (Subtarget.hasAVX512() ||
28835                            (VT != MVT::v2i64 && VT != MVT::v4i64));
28836   return (Opcode == ISD::SRA) ? AShift : LShift;
28837 }
28838 
28839 // The shift amount is a variable, but it is the same for all vector lanes.
28840 // These instructions are defined together with shift-immediate.
28841 static
28842 bool supportedVectorShiftWithBaseAmnt(EVT VT, const X86Subtarget &Subtarget,
28843                                       unsigned Opcode) {
28844   return supportedVectorShiftWithImm(VT, Subtarget, Opcode);
28845 }
28846 
28847 // Return true if the required (according to Opcode) variable-shift form is
28848 // natively supported by the Subtarget
28849 static bool supportedVectorVarShift(EVT VT, const X86Subtarget &Subtarget,
28850                                     unsigned Opcode) {
28851   if (!VT.isSimple())
28852     return false;
28853 
28854   if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
28855     return false;
28856 
28857   if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
28858     return false;
28859 
28860   // vXi16 supported only on AVX-512, BWI
28861   if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
28862     return false;
28863 
28864   if (Subtarget.hasAVX512() &&
28865       (Subtarget.useAVX512Regs() || !VT.is512BitVector()))
28866     return true;
28867 
28868   bool LShift = VT.is128BitVector() || VT.is256BitVector();
28869   bool AShift = LShift &&  VT != MVT::v2i64 && VT != MVT::v4i64;
28870   return (Opcode == ISD::SRA) ? AShift : LShift;
28871 }
28872 
28873 static SDValue LowerShiftByScalarImmediate(SDValue Op, SelectionDAG &DAG,
28874                                            const X86Subtarget &Subtarget) {
28875   MVT VT = Op.getSimpleValueType();
28876   SDLoc dl(Op);
28877   SDValue R = Op.getOperand(0);
28878   SDValue Amt = Op.getOperand(1);
28879   unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
28880 
28881   auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
28882     assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
28883     MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
28884     SDValue Ex = DAG.getBitcast(ExVT, R);
28885 
28886     // ashr(R, 63) === cmp_slt(R, 0)
28887     if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
28888       assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
28889              "Unsupported PCMPGT op");
28890       return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
28891     }
28892 
28893     if (ShiftAmt >= 32) {
28894       // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
28895       SDValue Upper =
28896           getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
28897       SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
28898                                                  ShiftAmt - 32, DAG);
28899       if (VT == MVT::v2i64)
28900         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
28901       if (VT == MVT::v4i64)
28902         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
28903                                   {9, 1, 11, 3, 13, 5, 15, 7});
28904     } else {
28905       // SRA upper i32, SRL whole i64 and select lower i32.
28906       SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
28907                                                  ShiftAmt, DAG);
28908       SDValue Lower =
28909           getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
28910       Lower = DAG.getBitcast(ExVT, Lower);
28911       if (VT == MVT::v2i64)
28912         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
28913       if (VT == MVT::v4i64)
28914         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
28915                                   {8, 1, 10, 3, 12, 5, 14, 7});
28916     }
28917     return DAG.getBitcast(VT, Ex);
28918   };
28919 
28920   // Optimize shl/srl/sra with constant shift amount.
28921   APInt APIntShiftAmt;
28922   if (!X86::isConstantSplat(Amt, APIntShiftAmt))
28923     return SDValue();
28924 
28925   // If the shift amount is out of range, return undef.
28926   if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
28927     return DAG.getUNDEF(VT);
28928 
28929   uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
28930 
28931   if (supportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) {
28932     // Hardware support for vector shifts is sparse which makes us scalarize the
28933     // vector operations in many cases. Also, on sandybridge ADD is faster than
28934     // shl: (shl V, 1) -> (add (freeze V), (freeze V))
28935     if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
28936       // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
28937       // must be 0). (add undef, undef) however can be any value. To make this
28938       // safe, we must freeze R to ensure that register allocation uses the same
28939       // register for an undefined value. This ensures that the result will
28940       // still be even and preserves the original semantics.
28941       R = DAG.getFreeze(R);
28942       return DAG.getNode(ISD::ADD, dl, VT, R, R);
28943     }
28944 
28945     return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
28946   }
28947 
28948   // i64 SRA needs to be performed as partial shifts.
28949   if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
28950        (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
28951       Op.getOpcode() == ISD::SRA)
28952     return ArithmeticShiftRight64(ShiftAmt);
28953 
28954   if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
28955       (Subtarget.hasBWI() && VT == MVT::v64i8)) {
28956     unsigned NumElts = VT.getVectorNumElements();
28957     MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
28958 
28959     // Simple i8 add case
28960     if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
28961       // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
28962       // must be 0). (add undef, undef) however can be any value. To make this
28963       // safe, we must freeze R to ensure that register allocation uses the same
28964       // register for an undefined value. This ensures that the result will
28965       // still be even and preserves the original semantics.
28966       R = DAG.getFreeze(R);
28967       return DAG.getNode(ISD::ADD, dl, VT, R, R);
28968     }
28969 
28970     // ashr(R, 7)  === cmp_slt(R, 0)
28971     if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
28972       SDValue Zeros = DAG.getConstant(0, dl, VT);
28973       if (VT.is512BitVector()) {
28974         assert(VT == MVT::v64i8 && "Unexpected element type!");
28975         SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
28976         return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
28977       }
28978       return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
28979     }
28980 
28981     // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
28982     if (VT == MVT::v16i8 && Subtarget.hasXOP())
28983       return SDValue();
28984 
28985     if (Op.getOpcode() == ISD::SHL) {
28986       // Make a large shift.
28987       SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
28988                                                ShiftAmt, DAG);
28989       SHL = DAG.getBitcast(VT, SHL);
28990       // Zero out the rightmost bits.
28991       APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
28992       return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
28993     }
28994     if (Op.getOpcode() == ISD::SRL) {
28995       // Make a large shift.
28996       SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
28997                                                ShiftAmt, DAG);
28998       SRL = DAG.getBitcast(VT, SRL);
28999       // Zero out the leftmost bits.
29000       APInt Mask = APInt::getLowBitsSet(8, 8 - ShiftAmt);
29001       return DAG.getNode(ISD::AND, dl, VT, SRL, DAG.getConstant(Mask, dl, VT));
29002     }
29003     if (Op.getOpcode() == ISD::SRA) {
29004       // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
29005       SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
29006 
29007       SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
29008       Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
29009       Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
29010       return Res;
29011     }
29012     llvm_unreachable("Unknown shift opcode.");
29013   }
29014 
29015   return SDValue();
29016 }
29017 
29018 static SDValue LowerShiftByScalarVariable(SDValue Op, SelectionDAG &DAG,
29019                                           const X86Subtarget &Subtarget) {
29020   MVT VT = Op.getSimpleValueType();
29021   SDLoc dl(Op);
29022   SDValue R = Op.getOperand(0);
29023   SDValue Amt = Op.getOperand(1);
29024   unsigned Opcode = Op.getOpcode();
29025   unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
29026 
29027   int BaseShAmtIdx = -1;
29028   if (SDValue BaseShAmt = DAG.getSplatSourceVector(Amt, BaseShAmtIdx)) {
29029     if (supportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode))
29030       return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, BaseShAmtIdx,
29031                                  Subtarget, DAG);
29032 
29033     // vXi8 shifts - shift as v8i16 + mask result.
29034     if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
29035          (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
29036          VT == MVT::v64i8) &&
29037         !Subtarget.hasXOP()) {
29038       unsigned NumElts = VT.getVectorNumElements();
29039       MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29040       if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
29041         unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
29042         unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
29043 
29044         // Create the mask using vXi16 shifts. For shift-rights we need to move
29045         // the upper byte down before splatting the vXi8 mask.
29046         SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
29047         BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
29048                                       BaseShAmt, BaseShAmtIdx, Subtarget, DAG);
29049         if (Opcode != ISD::SHL)
29050           BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
29051                                                8, DAG);
29052         BitMask = DAG.getBitcast(VT, BitMask);
29053         BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
29054                                        SmallVector<int, 64>(NumElts, 0));
29055 
29056         SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
29057                                           DAG.getBitcast(ExtVT, R), BaseShAmt,
29058                                           BaseShAmtIdx, Subtarget, DAG);
29059         Res = DAG.getBitcast(VT, Res);
29060         Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
29061 
29062         if (Opcode == ISD::SRA) {
29063           // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
29064           // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
29065           SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
29066           SignMask =
29067               getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask, BaseShAmt,
29068                                   BaseShAmtIdx, Subtarget, DAG);
29069           SignMask = DAG.getBitcast(VT, SignMask);
29070           Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
29071           Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
29072         }
29073         return Res;
29074       }
29075     }
29076   }
29077 
29078   return SDValue();
29079 }
29080 
29081 // Convert a shift/rotate left amount to a multiplication scale factor.
29082 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
29083                                        const X86Subtarget &Subtarget,
29084                                        SelectionDAG &DAG) {
29085   MVT VT = Amt.getSimpleValueType();
29086   if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
29087         (Subtarget.hasInt256() && VT == MVT::v16i16) ||
29088         (Subtarget.hasAVX512() && VT == MVT::v32i16) ||
29089         (!Subtarget.hasAVX512() && VT == MVT::v16i8) ||
29090         (Subtarget.hasInt256() && VT == MVT::v32i8) ||
29091         (Subtarget.hasBWI() && VT == MVT::v64i8)))
29092     return SDValue();
29093 
29094   MVT SVT = VT.getVectorElementType();
29095   unsigned SVTBits = SVT.getSizeInBits();
29096   unsigned NumElems = VT.getVectorNumElements();
29097 
29098   APInt UndefElts;
29099   SmallVector<APInt> EltBits;
29100   if (getTargetConstantBitsFromNode(Amt, SVTBits, UndefElts, EltBits)) {
29101     APInt One(SVTBits, 1);
29102     SmallVector<SDValue> Elts(NumElems, DAG.getUNDEF(SVT));
29103     for (unsigned I = 0; I != NumElems; ++I) {
29104       if (UndefElts[I] || EltBits[I].uge(SVTBits))
29105         continue;
29106       uint64_t ShAmt = EltBits[I].getZExtValue();
29107       Elts[I] = DAG.getConstant(One.shl(ShAmt), dl, SVT);
29108     }
29109     return DAG.getBuildVector(VT, dl, Elts);
29110   }
29111 
29112   // If the target doesn't support variable shifts, use either FP conversion
29113   // or integer multiplication to avoid shifting each element individually.
29114   if (VT == MVT::v4i32) {
29115     Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
29116     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
29117                       DAG.getConstant(0x3f800000U, dl, VT));
29118     Amt = DAG.getBitcast(MVT::v4f32, Amt);
29119     return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
29120   }
29121 
29122   // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
29123   if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
29124     SDValue Z = DAG.getConstant(0, dl, VT);
29125     SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
29126     SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
29127     Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
29128     Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
29129     if (Subtarget.hasSSE41())
29130       return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
29131     return getPack(DAG, Subtarget, dl, VT, Lo, Hi);
29132   }
29133 
29134   return SDValue();
29135 }
29136 
29137 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
29138                           SelectionDAG &DAG) {
29139   MVT VT = Op.getSimpleValueType();
29140   SDLoc dl(Op);
29141   SDValue R = Op.getOperand(0);
29142   SDValue Amt = Op.getOperand(1);
29143   unsigned EltSizeInBits = VT.getScalarSizeInBits();
29144   bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
29145 
29146   unsigned Opc = Op.getOpcode();
29147   unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
29148   unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
29149 
29150   assert(VT.isVector() && "Custom lowering only for vector shifts!");
29151   assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
29152 
29153   if (SDValue V = LowerShiftByScalarImmediate(Op, DAG, Subtarget))
29154     return V;
29155 
29156   if (SDValue V = LowerShiftByScalarVariable(Op, DAG, Subtarget))
29157     return V;
29158 
29159   if (supportedVectorVarShift(VT, Subtarget, Opc))
29160     return Op;
29161 
29162   // i64 vector arithmetic shift can be emulated with the transform:
29163   // M = lshr(SIGN_MASK, Amt)
29164   // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
29165   if (((VT == MVT::v2i64 && !Subtarget.hasXOP()) ||
29166        (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
29167       Opc == ISD::SRA) {
29168     SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
29169     SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
29170     R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
29171     R = DAG.getNode(ISD::XOR, dl, VT, R, M);
29172     R = DAG.getNode(ISD::SUB, dl, VT, R, M);
29173     return R;
29174   }
29175 
29176   // XOP has 128-bit variable logical/arithmetic shifts.
29177   // +ve/-ve Amt = shift left/right.
29178   if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
29179                              VT == MVT::v8i16 || VT == MVT::v16i8)) {
29180     if (Opc == ISD::SRL || Opc == ISD::SRA) {
29181       SDValue Zero = DAG.getConstant(0, dl, VT);
29182       Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
29183     }
29184     if (Opc == ISD::SHL || Opc == ISD::SRL)
29185       return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
29186     if (Opc == ISD::SRA)
29187       return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
29188   }
29189 
29190   // 2i64 vector logical shifts can efficiently avoid scalarization - do the
29191   // shifts per-lane and then shuffle the partial results back together.
29192   if (VT == MVT::v2i64 && Opc != ISD::SRA) {
29193     // Splat the shift amounts so the scalar shifts above will catch it.
29194     SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
29195     SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
29196     SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
29197     SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
29198     return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
29199   }
29200 
29201   // If possible, lower this shift as a sequence of two shifts by
29202   // constant plus a BLENDing shuffle instead of scalarizing it.
29203   // Example:
29204   //   (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
29205   //
29206   // Could be rewritten as:
29207   //   (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
29208   //
29209   // The advantage is that the two shifts from the example would be
29210   // lowered as X86ISD::VSRLI nodes in parallel before blending.
29211   if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
29212                       (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
29213     SDValue Amt1, Amt2;
29214     unsigned NumElts = VT.getVectorNumElements();
29215     SmallVector<int, 8> ShuffleMask;
29216     for (unsigned i = 0; i != NumElts; ++i) {
29217       SDValue A = Amt->getOperand(i);
29218       if (A.isUndef()) {
29219         ShuffleMask.push_back(SM_SentinelUndef);
29220         continue;
29221       }
29222       if (!Amt1 || Amt1 == A) {
29223         ShuffleMask.push_back(i);
29224         Amt1 = A;
29225         continue;
29226       }
29227       if (!Amt2 || Amt2 == A) {
29228         ShuffleMask.push_back(i + NumElts);
29229         Amt2 = A;
29230         continue;
29231       }
29232       break;
29233     }
29234 
29235     // Only perform this blend if we can perform it without loading a mask.
29236     if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
29237         (VT != MVT::v16i16 ||
29238          is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
29239         (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
29240          canWidenShuffleElements(ShuffleMask))) {
29241       auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
29242       auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
29243       if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
29244           Cst2->getAPIntValue().ult(EltSizeInBits)) {
29245         SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
29246                                                     Cst1->getZExtValue(), DAG);
29247         SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
29248                                                     Cst2->getZExtValue(), DAG);
29249         return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
29250       }
29251     }
29252   }
29253 
29254   // If possible, lower this packed shift into a vector multiply instead of
29255   // expanding it into a sequence of scalar shifts.
29256   // For v32i8 cases, it might be quicker to split/extend to vXi16 shifts.
29257   if (Opc == ISD::SHL && !(VT == MVT::v32i8 && (Subtarget.hasXOP() ||
29258                                                 Subtarget.canExtendTo512BW())))
29259     if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
29260       return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
29261 
29262   // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
29263   // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
29264   if (Opc == ISD::SRL && ConstantAmt &&
29265       (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
29266     SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
29267     SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
29268     if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
29269       SDValue Zero = DAG.getConstant(0, dl, VT);
29270       SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
29271       SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
29272       return DAG.getSelect(dl, VT, ZAmt, R, Res);
29273     }
29274   }
29275 
29276   // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
29277   // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
29278   // TODO: Special case handling for shift by 0/1, really we can afford either
29279   // of these cases in pre-SSE41/XOP/AVX512 but not both.
29280   if (Opc == ISD::SRA && ConstantAmt &&
29281       (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
29282       ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
29283         !Subtarget.hasAVX512()) ||
29284        DAG.isKnownNeverZero(Amt))) {
29285     SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
29286     SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
29287     if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
29288       SDValue Amt0 =
29289           DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
29290       SDValue Amt1 =
29291           DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
29292       SDValue Sra1 =
29293           getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
29294       SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
29295       Res = DAG.getSelect(dl, VT, Amt0, R, Res);
29296       return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
29297     }
29298   }
29299 
29300   // v4i32 Non Uniform Shifts.
29301   // If the shift amount is constant we can shift each lane using the SSE2
29302   // immediate shifts, else we need to zero-extend each lane to the lower i64
29303   // and shift using the SSE2 variable shifts.
29304   // The separate results can then be blended together.
29305   if (VT == MVT::v4i32) {
29306     SDValue Amt0, Amt1, Amt2, Amt3;
29307     if (ConstantAmt) {
29308       Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
29309       Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
29310       Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
29311       Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
29312     } else {
29313       // The SSE2 shifts use the lower i64 as the same shift amount for
29314       // all lanes and the upper i64 is ignored. On AVX we're better off
29315       // just zero-extending, but for SSE just duplicating the top 16-bits is
29316       // cheaper and has the same effect for out of range values.
29317       if (Subtarget.hasAVX()) {
29318         SDValue Z = DAG.getConstant(0, dl, VT);
29319         Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
29320         Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
29321         Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
29322         Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
29323       } else {
29324         SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
29325         SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
29326                                              {4, 5, 6, 7, -1, -1, -1, -1});
29327         SDValue Msk02 = getV4X86ShuffleImm8ForMask({0, 1, 1, 1}, dl, DAG);
29328         SDValue Msk13 = getV4X86ShuffleImm8ForMask({2, 3, 3, 3}, dl, DAG);
29329         Amt0 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk02);
29330         Amt1 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk13);
29331         Amt2 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk02);
29332         Amt3 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk13);
29333       }
29334     }
29335 
29336     unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
29337     SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
29338     SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
29339     SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
29340     SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
29341 
29342     // Merge the shifted lane results optimally with/without PBLENDW.
29343     // TODO - ideally shuffle combining would handle this.
29344     if (Subtarget.hasSSE41()) {
29345       SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
29346       SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
29347       return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
29348     }
29349     SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
29350     SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
29351     return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
29352   }
29353 
29354   // It's worth extending once and using the vXi16/vXi32 shifts for smaller
29355   // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
29356   // make the existing SSE solution better.
29357   // NOTE: We honor prefered vector width before promoting to 512-bits.
29358   if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
29359       (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
29360       (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
29361       (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
29362       (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
29363     assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
29364            "Unexpected vector type");
29365     MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
29366     MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
29367     unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
29368     R = DAG.getNode(ExtOpc, dl, ExtVT, R);
29369     Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
29370     return DAG.getNode(ISD::TRUNCATE, dl, VT,
29371                        DAG.getNode(Opc, dl, ExtVT, R, Amt));
29372   }
29373 
29374   // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
29375   // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
29376   if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
29377       (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
29378        (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
29379       !Subtarget.hasXOP()) {
29380     int NumElts = VT.getVectorNumElements();
29381     SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
29382 
29383     // Extend constant shift amount to vXi16 (it doesn't matter if the type
29384     // isn't legal).
29385     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
29386     Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
29387     Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
29388     Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
29389     assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
29390            "Constant build vector expected");
29391 
29392     if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
29393       bool IsSigned = Opc == ISD::SRA;
29394       R = DAG.getExtOrTrunc(IsSigned, R, dl, ExVT);
29395       R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
29396       R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
29397       return DAG.getZExtOrTrunc(R, dl, VT);
29398     }
29399 
29400     SmallVector<SDValue, 16> LoAmt, HiAmt;
29401     for (int i = 0; i != NumElts; i += 16) {
29402       for (int j = 0; j != 8; ++j) {
29403         LoAmt.push_back(Amt.getOperand(i + j));
29404         HiAmt.push_back(Amt.getOperand(i + j + 8));
29405       }
29406     }
29407 
29408     MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
29409     SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
29410     SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
29411 
29412     SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
29413     SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
29414     LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
29415     HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
29416     LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
29417     HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
29418     LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
29419     HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
29420     return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
29421   }
29422 
29423   if (VT == MVT::v16i8 ||
29424       (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
29425       (VT == MVT::v64i8 && Subtarget.hasBWI())) {
29426     MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
29427 
29428     auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
29429       if (VT.is512BitVector()) {
29430         // On AVX512BW targets we make use of the fact that VSELECT lowers
29431         // to a masked blend which selects bytes based just on the sign bit
29432         // extracted to a mask.
29433         MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
29434         V0 = DAG.getBitcast(VT, V0);
29435         V1 = DAG.getBitcast(VT, V1);
29436         Sel = DAG.getBitcast(VT, Sel);
29437         Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
29438                            ISD::SETGT);
29439         return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
29440       } else if (Subtarget.hasSSE41()) {
29441         // On SSE41 targets we can use PBLENDVB which selects bytes based just
29442         // on the sign bit.
29443         V0 = DAG.getBitcast(VT, V0);
29444         V1 = DAG.getBitcast(VT, V1);
29445         Sel = DAG.getBitcast(VT, Sel);
29446         return DAG.getBitcast(SelVT,
29447                               DAG.getNode(X86ISD::BLENDV, dl, VT, Sel, V0, V1));
29448       }
29449       // On pre-SSE41 targets we test for the sign bit by comparing to
29450       // zero - a negative value will set all bits of the lanes to true
29451       // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
29452       SDValue Z = DAG.getConstant(0, dl, SelVT);
29453       SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
29454       return DAG.getSelect(dl, SelVT, C, V0, V1);
29455     };
29456 
29457     // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
29458     // We can safely do this using i16 shifts as we're only interested in
29459     // the 3 lower bits of each byte.
29460     Amt = DAG.getBitcast(ExtVT, Amt);
29461     Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
29462     Amt = DAG.getBitcast(VT, Amt);
29463 
29464     if (Opc == ISD::SHL || Opc == ISD::SRL) {
29465       // r = VSELECT(r, shift(r, 4), a);
29466       SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
29467       R = SignBitSelect(VT, Amt, M, R);
29468 
29469       // a += a
29470       Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29471 
29472       // r = VSELECT(r, shift(r, 2), a);
29473       M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
29474       R = SignBitSelect(VT, Amt, M, R);
29475 
29476       // a += a
29477       Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29478 
29479       // return VSELECT(r, shift(r, 1), a);
29480       M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
29481       R = SignBitSelect(VT, Amt, M, R);
29482       return R;
29483     }
29484 
29485     if (Opc == ISD::SRA) {
29486       // For SRA we need to unpack each byte to the higher byte of a i16 vector
29487       // so we can correctly sign extend. We don't care what happens to the
29488       // lower byte.
29489       SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
29490       SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
29491       SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
29492       SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
29493       ALo = DAG.getBitcast(ExtVT, ALo);
29494       AHi = DAG.getBitcast(ExtVT, AHi);
29495       RLo = DAG.getBitcast(ExtVT, RLo);
29496       RHi = DAG.getBitcast(ExtVT, RHi);
29497 
29498       // r = VSELECT(r, shift(r, 4), a);
29499       SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
29500       SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
29501       RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
29502       RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
29503 
29504       // a += a
29505       ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
29506       AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
29507 
29508       // r = VSELECT(r, shift(r, 2), a);
29509       MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
29510       MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
29511       RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
29512       RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
29513 
29514       // a += a
29515       ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
29516       AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
29517 
29518       // r = VSELECT(r, shift(r, 1), a);
29519       MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
29520       MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
29521       RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
29522       RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
29523 
29524       // Logical shift the result back to the lower byte, leaving a zero upper
29525       // byte meaning that we can safely pack with PACKUSWB.
29526       RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
29527       RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
29528       return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
29529     }
29530   }
29531 
29532   if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
29533     MVT ExtVT = MVT::v8i32;
29534     SDValue Z = DAG.getConstant(0, dl, VT);
29535     SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
29536     SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
29537     SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
29538     SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
29539     ALo = DAG.getBitcast(ExtVT, ALo);
29540     AHi = DAG.getBitcast(ExtVT, AHi);
29541     RLo = DAG.getBitcast(ExtVT, RLo);
29542     RHi = DAG.getBitcast(ExtVT, RHi);
29543     SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
29544     SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
29545     Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
29546     Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
29547     return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
29548   }
29549 
29550   if (VT == MVT::v8i16) {
29551     // If we have a constant shift amount, the non-SSE41 path is best as
29552     // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
29553     bool UseSSE41 = Subtarget.hasSSE41() &&
29554                     !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
29555 
29556     auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
29557       // On SSE41 targets we can use PBLENDVB which selects bytes based just on
29558       // the sign bit.
29559       if (UseSSE41) {
29560         MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
29561         V0 = DAG.getBitcast(ExtVT, V0);
29562         V1 = DAG.getBitcast(ExtVT, V1);
29563         Sel = DAG.getBitcast(ExtVT, Sel);
29564         return DAG.getBitcast(
29565             VT, DAG.getNode(X86ISD::BLENDV, dl, ExtVT, Sel, V0, V1));
29566       }
29567       // On pre-SSE41 targets we splat the sign bit - a negative value will
29568       // set all bits of the lanes to true and VSELECT uses that in
29569       // its OR(AND(V0,C),AND(V1,~C)) lowering.
29570       SDValue C =
29571           getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
29572       return DAG.getSelect(dl, VT, C, V0, V1);
29573     };
29574 
29575     // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
29576     if (UseSSE41) {
29577       // On SSE41 targets we need to replicate the shift mask in both
29578       // bytes for PBLENDVB.
29579       Amt = DAG.getNode(
29580           ISD::OR, dl, VT,
29581           getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
29582           getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
29583     } else {
29584       Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
29585     }
29586 
29587     // r = VSELECT(r, shift(r, 8), a);
29588     SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
29589     R = SignBitSelect(Amt, M, R);
29590 
29591     // a += a
29592     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29593 
29594     // r = VSELECT(r, shift(r, 4), a);
29595     M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
29596     R = SignBitSelect(Amt, M, R);
29597 
29598     // a += a
29599     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29600 
29601     // r = VSELECT(r, shift(r, 2), a);
29602     M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
29603     R = SignBitSelect(Amt, M, R);
29604 
29605     // a += a
29606     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29607 
29608     // return VSELECT(r, shift(r, 1), a);
29609     M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
29610     R = SignBitSelect(Amt, M, R);
29611     return R;
29612   }
29613 
29614   // Decompose 256-bit shifts into 128-bit shifts.
29615   if (VT.is256BitVector())
29616     return splitVectorIntBinary(Op, DAG);
29617 
29618   if (VT == MVT::v32i16 || VT == MVT::v64i8)
29619     return splitVectorIntBinary(Op, DAG);
29620 
29621   return SDValue();
29622 }
29623 
29624 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
29625                                 SelectionDAG &DAG) {
29626   MVT VT = Op.getSimpleValueType();
29627   assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
29628          "Unexpected funnel shift opcode!");
29629 
29630   SDLoc DL(Op);
29631   SDValue Op0 = Op.getOperand(0);
29632   SDValue Op1 = Op.getOperand(1);
29633   SDValue Amt = Op.getOperand(2);
29634   unsigned EltSizeInBits = VT.getScalarSizeInBits();
29635   bool IsFSHR = Op.getOpcode() == ISD::FSHR;
29636 
29637   if (VT.isVector()) {
29638     APInt APIntShiftAmt;
29639     bool IsCstSplat = X86::isConstantSplat(Amt, APIntShiftAmt);
29640 
29641     if (Subtarget.hasVBMI2() && EltSizeInBits > 8) {
29642       if (IsFSHR)
29643         std::swap(Op0, Op1);
29644 
29645       if (IsCstSplat) {
29646         uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
29647         SDValue Imm = DAG.getTargetConstant(ShiftAmt, DL, MVT::i8);
29648         return getAVX512Node(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
29649                              {Op0, Op1, Imm}, DAG, Subtarget);
29650       }
29651       return getAVX512Node(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
29652                            {Op0, Op1, Amt}, DAG, Subtarget);
29653     }
29654     assert((VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8 ||
29655             VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16 ||
29656             VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) &&
29657            "Unexpected funnel shift type!");
29658 
29659     // fshl(x,y,z) -> unpack(y,x) << (z & (bw-1))) >> bw.
29660     // fshr(x,y,z) -> unpack(y,x) >> (z & (bw-1))).
29661     if (IsCstSplat) {
29662       // TODO: Can't use generic expansion as UNDEF amt elements can be
29663       // converted to other values when folded to shift amounts, losing the
29664       // splat.
29665       uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
29666       uint64_t ShXAmt = IsFSHR ? (EltSizeInBits - ShiftAmt) : ShiftAmt;
29667       uint64_t ShYAmt = IsFSHR ? ShiftAmt : (EltSizeInBits - ShiftAmt);
29668       SDValue ShX = DAG.getNode(ISD::SHL, DL, VT, Op0,
29669                                 DAG.getShiftAmountConstant(ShXAmt, VT, DL));
29670       SDValue ShY = DAG.getNode(ISD::SRL, DL, VT, Op1,
29671                                 DAG.getShiftAmountConstant(ShYAmt, VT, DL));
29672       return DAG.getNode(ISD::OR, DL, VT, ShX, ShY);
29673     }
29674 
29675     SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
29676     SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
29677     bool IsCst = ISD::isBuildVectorOfConstantSDNodes(AmtMod.getNode());
29678 
29679     // Constant vXi16 funnel shifts can be efficiently handled by default.
29680     if (IsCst && EltSizeInBits == 16)
29681       return SDValue();
29682 
29683     unsigned ShiftOpc = IsFSHR ? ISD::SRL : ISD::SHL;
29684     unsigned NumElts = VT.getVectorNumElements();
29685     MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
29686     MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
29687 
29688     // Split 256-bit integers on XOP/pre-AVX2 targets.
29689     // Split 512-bit integers on non 512-bit BWI targets.
29690     if ((VT.is256BitVector() && ((Subtarget.hasXOP() && EltSizeInBits < 16) ||
29691                                  !Subtarget.hasAVX2())) ||
29692         (VT.is512BitVector() && !Subtarget.useBWIRegs() &&
29693          EltSizeInBits < 32)) {
29694       // Pre-mask the amount modulo using the wider vector.
29695       Op = DAG.getNode(Op.getOpcode(), DL, VT, Op0, Op1, AmtMod);
29696       return splitVectorOp(Op, DAG);
29697     }
29698 
29699     // Attempt to fold scalar shift as unpack(y,x) << zext(splat(z))
29700     if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, ShiftOpc)) {
29701       int ScalarAmtIdx = -1;
29702       if (SDValue ScalarAmt = DAG.getSplatSourceVector(AmtMod, ScalarAmtIdx)) {
29703         // Uniform vXi16 funnel shifts can be efficiently handled by default.
29704         if (EltSizeInBits == 16)
29705           return SDValue();
29706 
29707         SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
29708         SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
29709         Lo = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Lo, ScalarAmt,
29710                                  ScalarAmtIdx, Subtarget, DAG);
29711         Hi = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Hi, ScalarAmt,
29712                                  ScalarAmtIdx, Subtarget, DAG);
29713         return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
29714       }
29715     }
29716 
29717     MVT WideSVT = MVT::getIntegerVT(
29718         std::min<unsigned>(EltSizeInBits * 2, Subtarget.hasBWI() ? 16 : 32));
29719     MVT WideVT = MVT::getVectorVT(WideSVT, NumElts);
29720 
29721     // If per-element shifts are legal, fallback to generic expansion.
29722     if (supportedVectorVarShift(VT, Subtarget, ShiftOpc) || Subtarget.hasXOP())
29723       return SDValue();
29724 
29725     // Attempt to fold as:
29726     // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
29727     // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
29728     if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
29729         supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
29730       Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Op0);
29731       Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Op1);
29732       AmtMod = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
29733       Op0 = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, Op0,
29734                                        EltSizeInBits, DAG);
29735       SDValue Res = DAG.getNode(ISD::OR, DL, WideVT, Op0, Op1);
29736       Res = DAG.getNode(ShiftOpc, DL, WideVT, Res, AmtMod);
29737       if (!IsFSHR)
29738         Res = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, Res,
29739                                          EltSizeInBits, DAG);
29740       return DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
29741     }
29742 
29743     // Attempt to fold per-element (ExtVT) shift as unpack(y,x) << zext(z)
29744     if (((IsCst || !Subtarget.hasAVX512()) && !IsFSHR && EltSizeInBits <= 16) ||
29745         supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
29746       SDValue Z = DAG.getConstant(0, DL, VT);
29747       SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
29748       SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
29749       SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
29750       SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
29751       SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
29752       SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
29753       return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
29754     }
29755 
29756     // Fallback to generic expansion.
29757     return SDValue();
29758   }
29759   assert(
29760       (VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
29761       "Unexpected funnel shift type!");
29762 
29763   // Expand slow SHLD/SHRD cases if we are not optimizing for size.
29764   bool OptForSize = DAG.shouldOptForSize();
29765   bool ExpandFunnel = !OptForSize && Subtarget.isSHLDSlow();
29766 
29767   // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
29768   // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
29769   if ((VT == MVT::i8 || (ExpandFunnel && VT == MVT::i16)) &&
29770       !isa<ConstantSDNode>(Amt)) {
29771     SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, Amt.getValueType());
29772     SDValue HiShift = DAG.getConstant(EltSizeInBits, DL, Amt.getValueType());
29773     Op0 = DAG.getAnyExtOrTrunc(Op0, DL, MVT::i32);
29774     Op1 = DAG.getZExtOrTrunc(Op1, DL, MVT::i32);
29775     Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt, Mask);
29776     SDValue Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Op0, HiShift);
29777     Res = DAG.getNode(ISD::OR, DL, MVT::i32, Res, Op1);
29778     if (IsFSHR) {
29779       Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, Amt);
29780     } else {
29781       Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Res, Amt);
29782       Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, HiShift);
29783     }
29784     return DAG.getZExtOrTrunc(Res, DL, VT);
29785   }
29786 
29787   if (VT == MVT::i8 || ExpandFunnel)
29788     return SDValue();
29789 
29790   // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
29791   if (VT == MVT::i16) {
29792     Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
29793                       DAG.getConstant(15, DL, Amt.getValueType()));
29794     unsigned FSHOp = (IsFSHR ? X86ISD::FSHR : X86ISD::FSHL);
29795     return DAG.getNode(FSHOp, DL, VT, Op0, Op1, Amt);
29796   }
29797 
29798   return Op;
29799 }
29800 
29801 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
29802                            SelectionDAG &DAG) {
29803   MVT VT = Op.getSimpleValueType();
29804   assert(VT.isVector() && "Custom lowering only for vector rotates!");
29805 
29806   SDLoc DL(Op);
29807   SDValue R = Op.getOperand(0);
29808   SDValue Amt = Op.getOperand(1);
29809   unsigned Opcode = Op.getOpcode();
29810   unsigned EltSizeInBits = VT.getScalarSizeInBits();
29811   int NumElts = VT.getVectorNumElements();
29812   bool IsROTL = Opcode == ISD::ROTL;
29813 
29814   // Check for constant splat rotation amount.
29815   APInt CstSplatValue;
29816   bool IsCstSplat = X86::isConstantSplat(Amt, CstSplatValue);
29817 
29818   // Check for splat rotate by zero.
29819   if (IsCstSplat && CstSplatValue.urem(EltSizeInBits) == 0)
29820     return R;
29821 
29822   // AVX512 implicitly uses modulo rotation amounts.
29823   if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
29824     // Attempt to rotate by immediate.
29825     if (IsCstSplat) {
29826       unsigned RotOpc = IsROTL ? X86ISD::VROTLI : X86ISD::VROTRI;
29827       uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
29828       return DAG.getNode(RotOpc, DL, VT, R,
29829                          DAG.getTargetConstant(RotAmt, DL, MVT::i8));
29830     }
29831 
29832     // Else, fall-back on VPROLV/VPRORV.
29833     return Op;
29834   }
29835 
29836   // AVX512 VBMI2 vXi16 - lower to funnel shifts.
29837   if (Subtarget.hasVBMI2() && 16 == EltSizeInBits) {
29838     unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
29839     return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
29840   }
29841 
29842   SDValue Z = DAG.getConstant(0, DL, VT);
29843 
29844   if (!IsROTL) {
29845     // If the ISD::ROTR amount is constant, we're always better converting to
29846     // ISD::ROTL.
29847     if (SDValue NegAmt = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {Z, Amt}))
29848       return DAG.getNode(ISD::ROTL, DL, VT, R, NegAmt);
29849 
29850     // XOP targets always prefers ISD::ROTL.
29851     if (Subtarget.hasXOP())
29852       return DAG.getNode(ISD::ROTL, DL, VT, R,
29853                          DAG.getNode(ISD::SUB, DL, VT, Z, Amt));
29854   }
29855 
29856   // Split 256-bit integers on XOP/pre-AVX2 targets.
29857   if (VT.is256BitVector() && (Subtarget.hasXOP() || !Subtarget.hasAVX2()))
29858     return splitVectorIntBinary(Op, DAG);
29859 
29860   // XOP has 128-bit vector variable + immediate rotates.
29861   // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
29862   // XOP implicitly uses modulo rotation amounts.
29863   if (Subtarget.hasXOP()) {
29864     assert(IsROTL && "Only ROTL expected");
29865     assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
29866 
29867     // Attempt to rotate by immediate.
29868     if (IsCstSplat) {
29869       uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
29870       return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
29871                          DAG.getTargetConstant(RotAmt, DL, MVT::i8));
29872     }
29873 
29874     // Use general rotate by variable (per-element).
29875     return Op;
29876   }
29877 
29878   // Rotate by an uniform constant - expand back to shifts.
29879   // TODO: Can't use generic expansion as UNDEF amt elements can be converted
29880   // to other values when folded to shift amounts, losing the splat.
29881   if (IsCstSplat) {
29882     uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
29883     uint64_t ShlAmt = IsROTL ? RotAmt : (EltSizeInBits - RotAmt);
29884     uint64_t SrlAmt = IsROTL ? (EltSizeInBits - RotAmt) : RotAmt;
29885     SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, R,
29886                               DAG.getShiftAmountConstant(ShlAmt, VT, DL));
29887     SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, R,
29888                               DAG.getShiftAmountConstant(SrlAmt, VT, DL));
29889     return DAG.getNode(ISD::OR, DL, VT, Shl, Srl);
29890   }
29891 
29892   // Split 512-bit integers on non 512-bit BWI targets.
29893   if (VT.is512BitVector() && !Subtarget.useBWIRegs())
29894     return splitVectorIntBinary(Op, DAG);
29895 
29896   assert(
29897       (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
29898        ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
29899         Subtarget.hasAVX2()) ||
29900        ((VT == MVT::v32i16 || VT == MVT::v64i8) && Subtarget.useBWIRegs())) &&
29901       "Only vXi32/vXi16/vXi8 vector rotates supported");
29902 
29903   MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
29904   MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
29905 
29906   SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
29907   SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
29908 
29909   // Attempt to fold as unpack(x,x) << zext(splat(y)):
29910   // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
29911   // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
29912   if (EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) {
29913     int BaseRotAmtIdx = -1;
29914     if (SDValue BaseRotAmt = DAG.getSplatSourceVector(AmtMod, BaseRotAmtIdx)) {
29915       if (EltSizeInBits == 16 && Subtarget.hasSSE41()) {
29916         unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
29917         return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
29918       }
29919       unsigned ShiftX86Opc = IsROTL ? X86ISD::VSHLI : X86ISD::VSRLI;
29920       SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
29921       SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
29922       Lo = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Lo, BaseRotAmt,
29923                                BaseRotAmtIdx, Subtarget, DAG);
29924       Hi = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Hi, BaseRotAmt,
29925                                BaseRotAmtIdx, Subtarget, DAG);
29926       return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
29927     }
29928   }
29929 
29930   bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
29931   unsigned ShiftOpc = IsROTL ? ISD::SHL : ISD::SRL;
29932 
29933   // Attempt to fold as unpack(x,x) << zext(y):
29934   // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
29935   // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
29936   // Const vXi16/vXi32 are excluded in favor of MUL-based lowering.
29937   if (!(ConstantAmt && EltSizeInBits != 8) &&
29938       !supportedVectorVarShift(VT, Subtarget, ShiftOpc) &&
29939       (ConstantAmt || supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc))) {
29940     SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
29941     SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
29942     SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
29943     SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
29944     SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
29945     SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
29946     return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
29947   }
29948 
29949   // v16i8/v32i8/v64i8: Split rotation into rot4/rot2/rot1 stages and select by
29950   // the amount bit.
29951   // TODO: We're doing nothing here that we couldn't do for funnel shifts.
29952   if (EltSizeInBits == 8) {
29953     MVT WideVT =
29954         MVT::getVectorVT(Subtarget.hasBWI() ? MVT::i16 : MVT::i32, NumElts);
29955 
29956     // Attempt to fold as:
29957     // rotl(x,y) -> (((aext(x) << bw) | zext(x)) << (y & (bw-1))) >> bw.
29958     // rotr(x,y) -> (((aext(x) << bw) | zext(x)) >> (y & (bw-1))).
29959     if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
29960         supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
29961       // If we're rotating by constant, just use default promotion.
29962       if (ConstantAmt)
29963         return SDValue();
29964       // See if we can perform this by widening to vXi16 or vXi32.
29965       R = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, R);
29966       R = DAG.getNode(
29967           ISD::OR, DL, WideVT, R,
29968           getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, R, 8, DAG));
29969       Amt = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
29970       R = DAG.getNode(ShiftOpc, DL, WideVT, R, Amt);
29971       if (IsROTL)
29972         R = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, R, 8, DAG);
29973       return DAG.getNode(ISD::TRUNCATE, DL, VT, R);
29974     }
29975 
29976     // We don't need ModuloAmt here as we just peek at individual bits.
29977     auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
29978       if (Subtarget.hasSSE41()) {
29979         // On SSE41 targets we can use PBLENDVB which selects bytes based just
29980         // on the sign bit.
29981         V0 = DAG.getBitcast(VT, V0);
29982         V1 = DAG.getBitcast(VT, V1);
29983         Sel = DAG.getBitcast(VT, Sel);
29984         return DAG.getBitcast(SelVT,
29985                               DAG.getNode(X86ISD::BLENDV, DL, VT, Sel, V0, V1));
29986       }
29987       // On pre-SSE41 targets we test for the sign bit by comparing to
29988       // zero - a negative value will set all bits of the lanes to true
29989       // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
29990       SDValue Z = DAG.getConstant(0, DL, SelVT);
29991       SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
29992       return DAG.getSelect(DL, SelVT, C, V0, V1);
29993     };
29994 
29995     // ISD::ROTR is currently only profitable on AVX512 targets with VPTERNLOG.
29996     if (!IsROTL && !useVPTERNLOG(Subtarget, VT)) {
29997       Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
29998       IsROTL = true;
29999     }
30000 
30001     unsigned ShiftLHS = IsROTL ? ISD::SHL : ISD::SRL;
30002     unsigned ShiftRHS = IsROTL ? ISD::SRL : ISD::SHL;
30003 
30004     // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
30005     // We can safely do this using i16 shifts as we're only interested in
30006     // the 3 lower bits of each byte.
30007     Amt = DAG.getBitcast(ExtVT, Amt);
30008     Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
30009     Amt = DAG.getBitcast(VT, Amt);
30010 
30011     // r = VSELECT(r, rot(r, 4), a);
30012     SDValue M;
30013     M = DAG.getNode(
30014         ISD::OR, DL, VT,
30015         DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(4, DL, VT)),
30016         DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(4, DL, VT)));
30017     R = SignBitSelect(VT, Amt, M, R);
30018 
30019     // a += a
30020     Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
30021 
30022     // r = VSELECT(r, rot(r, 2), a);
30023     M = DAG.getNode(
30024         ISD::OR, DL, VT,
30025         DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(2, DL, VT)),
30026         DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(6, DL, VT)));
30027     R = SignBitSelect(VT, Amt, M, R);
30028 
30029     // a += a
30030     Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
30031 
30032     // return VSELECT(r, rot(r, 1), a);
30033     M = DAG.getNode(
30034         ISD::OR, DL, VT,
30035         DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(1, DL, VT)),
30036         DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(7, DL, VT)));
30037     return SignBitSelect(VT, Amt, M, R);
30038   }
30039 
30040   bool IsSplatAmt = DAG.isSplatValue(Amt);
30041   bool LegalVarShifts = supportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
30042                         supportedVectorVarShift(VT, Subtarget, ISD::SRL);
30043 
30044   // Fallback for splats + all supported variable shifts.
30045   // Fallback for non-constants AVX2 vXi16 as well.
30046   if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
30047     Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
30048     SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
30049     AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
30050     SDValue SHL = DAG.getNode(IsROTL ? ISD::SHL : ISD::SRL, DL, VT, R, Amt);
30051     SDValue SRL = DAG.getNode(IsROTL ? ISD::SRL : ISD::SHL, DL, VT, R, AmtR);
30052     return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
30053   }
30054 
30055   // Everything below assumes ISD::ROTL.
30056   if (!IsROTL) {
30057     Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
30058     IsROTL = true;
30059   }
30060 
30061   // ISD::ROT* uses modulo rotate amounts.
30062   Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
30063 
30064   assert(IsROTL && "Only ROTL supported");
30065 
30066   // As with shifts, attempt to convert the rotation amount to a multiplication
30067   // factor, fallback to general expansion.
30068   SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
30069   if (!Scale)
30070     return SDValue();
30071 
30072   // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
30073   if (EltSizeInBits == 16) {
30074     SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
30075     SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
30076     return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
30077   }
30078 
30079   // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
30080   // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
30081   // that can then be OR'd with the lower 32-bits.
30082   assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
30083   static const int OddMask[] = {1, -1, 3, -1};
30084   SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
30085   SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
30086 
30087   SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
30088                               DAG.getBitcast(MVT::v2i64, R),
30089                               DAG.getBitcast(MVT::v2i64, Scale));
30090   SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
30091                               DAG.getBitcast(MVT::v2i64, R13),
30092                               DAG.getBitcast(MVT::v2i64, Scale13));
30093   Res02 = DAG.getBitcast(VT, Res02);
30094   Res13 = DAG.getBitcast(VT, Res13);
30095 
30096   return DAG.getNode(ISD::OR, DL, VT,
30097                      DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
30098                      DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
30099 }
30100 
30101 /// Returns true if the operand type is exactly twice the native width, and
30102 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
30103 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
30104 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
30105 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
30106   unsigned OpWidth = MemType->getPrimitiveSizeInBits();
30107 
30108   if (OpWidth == 64)
30109     return Subtarget.canUseCMPXCHG8B() && !Subtarget.is64Bit();
30110   if (OpWidth == 128)
30111     return Subtarget.canUseCMPXCHG16B();
30112 
30113   return false;
30114 }
30115 
30116 TargetLoweringBase::AtomicExpansionKind
30117 X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
30118   Type *MemType = SI->getValueOperand()->getType();
30119 
30120   bool NoImplicitFloatOps =
30121       SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
30122   if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
30123       !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
30124       (Subtarget.hasSSE1() || Subtarget.hasX87()))
30125     return AtomicExpansionKind::None;
30126 
30127   return needsCmpXchgNb(MemType) ? AtomicExpansionKind::Expand
30128                                  : AtomicExpansionKind::None;
30129 }
30130 
30131 // Note: this turns large loads into lock cmpxchg8b/16b.
30132 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
30133 TargetLowering::AtomicExpansionKind
30134 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
30135   Type *MemType = LI->getType();
30136 
30137   // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
30138   // can use movq to do the load. If we have X87 we can load into an 80-bit
30139   // X87 register and store it to a stack temporary.
30140   bool NoImplicitFloatOps =
30141       LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
30142   if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
30143       !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
30144       (Subtarget.hasSSE1() || Subtarget.hasX87()))
30145     return AtomicExpansionKind::None;
30146 
30147   return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
30148                                  : AtomicExpansionKind::None;
30149 }
30150 
30151 enum BitTestKind : unsigned {
30152   UndefBit,
30153   ConstantBit,
30154   NotConstantBit,
30155   ShiftBit,
30156   NotShiftBit
30157 };
30158 
30159 static std::pair<Value *, BitTestKind> FindSingleBitChange(Value *V) {
30160   using namespace llvm::PatternMatch;
30161   BitTestKind BTK = UndefBit;
30162   auto *C = dyn_cast<ConstantInt>(V);
30163   if (C) {
30164     // Check if V is a power of 2 or NOT power of 2.
30165     if (isPowerOf2_64(C->getZExtValue()))
30166       BTK = ConstantBit;
30167     else if (isPowerOf2_64((~C->getValue()).getZExtValue()))
30168       BTK = NotConstantBit;
30169     return {V, BTK};
30170   }
30171 
30172   // Check if V is some power of 2 pattern known to be non-zero
30173   auto *I = dyn_cast<Instruction>(V);
30174   if (I) {
30175     bool Not = false;
30176     // Check if we have a NOT
30177     Value *PeekI;
30178     if (match(I, m_c_Xor(m_Value(PeekI), m_AllOnes())) ||
30179         match(I, m_Sub(m_AllOnes(), m_Value(PeekI)))) {
30180       Not = true;
30181       I = dyn_cast<Instruction>(PeekI);
30182 
30183       // If I is constant, it will fold and we can evaluate later. If its an
30184       // argument or something of that nature, we can't analyze.
30185       if (I == nullptr)
30186         return {nullptr, UndefBit};
30187     }
30188     // We can only use 1 << X without more sophisticated analysis. C << X where
30189     // C is a power of 2 but not 1 can result in zero which cannot be translated
30190     // to bittest. Likewise any C >> X (either arith or logical) can be zero.
30191     if (I->getOpcode() == Instruction::Shl) {
30192       // Todo(1): The cmpxchg case is pretty costly so matching `BLSI(X)`, `X &
30193       // -X` and some other provable power of 2 patterns that we can use CTZ on
30194       // may be profitable.
30195       // Todo(2): It may be possible in some cases to prove that Shl(C, X) is
30196       // non-zero even where C != 1. Likewise LShr(C, X) and AShr(C, X) may also
30197       // be provably a non-zero power of 2.
30198       // Todo(3): ROTL and ROTR patterns on a power of 2 C should also be
30199       // transformable to bittest.
30200       auto *ShiftVal = dyn_cast<ConstantInt>(I->getOperand(0));
30201       if (!ShiftVal)
30202         return {nullptr, UndefBit};
30203       if (ShiftVal->equalsInt(1))
30204         BTK = Not ? NotShiftBit : ShiftBit;
30205 
30206       if (BTK == UndefBit)
30207         return {nullptr, UndefBit};
30208 
30209       Value *BitV = I->getOperand(1);
30210 
30211       Value *AndOp;
30212       const APInt *AndC;
30213       if (match(BitV, m_c_And(m_Value(AndOp), m_APInt(AndC)))) {
30214         // Read past a shiftmask instruction to find count
30215         if (*AndC == (I->getType()->getPrimitiveSizeInBits() - 1))
30216           BitV = AndOp;
30217       }
30218       return {BitV, BTK};
30219     }
30220   }
30221   return {nullptr, UndefBit};
30222 }
30223 
30224 TargetLowering::AtomicExpansionKind
30225 X86TargetLowering::shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const {
30226   using namespace llvm::PatternMatch;
30227   // If the atomicrmw's result isn't actually used, we can just add a "lock"
30228   // prefix to a normal instruction for these operations.
30229   if (AI->use_empty())
30230     return AtomicExpansionKind::None;
30231 
30232   if (AI->getOperation() == AtomicRMWInst::Xor) {
30233     // A ^ SignBit -> A + SignBit. This allows us to use `xadd` which is
30234     // preferable to both `cmpxchg` and `btc`.
30235     if (match(AI->getOperand(1), m_SignMask()))
30236       return AtomicExpansionKind::None;
30237   }
30238 
30239   // If the atomicrmw's result is used by a single bit AND, we may use
30240   // bts/btr/btc instruction for these operations.
30241   // Note: InstCombinePass can cause a de-optimization here. It replaces the
30242   // SETCC(And(AtomicRMW(P, power_of_2), power_of_2)) with LShr and Xor
30243   // (depending on CC). This pattern can only use bts/btr/btc but we don't
30244   // detect it.
30245   Instruction *I = AI->user_back();
30246   auto BitChange = FindSingleBitChange(AI->getValOperand());
30247   if (BitChange.second == UndefBit || !AI->hasOneUse() ||
30248       I->getOpcode() != Instruction::And ||
30249       AI->getType()->getPrimitiveSizeInBits() == 8 ||
30250       AI->getParent() != I->getParent())
30251     return AtomicExpansionKind::CmpXChg;
30252 
30253   unsigned OtherIdx = I->getOperand(0) == AI ? 1 : 0;
30254 
30255   // This is a redundant AND, it should get cleaned up elsewhere.
30256   if (AI == I->getOperand(OtherIdx))
30257     return AtomicExpansionKind::CmpXChg;
30258 
30259   // The following instruction must be a AND single bit.
30260   if (BitChange.second == ConstantBit || BitChange.second == NotConstantBit) {
30261     auto *C1 = cast<ConstantInt>(AI->getValOperand());
30262     auto *C2 = dyn_cast<ConstantInt>(I->getOperand(OtherIdx));
30263     if (!C2 || !isPowerOf2_64(C2->getZExtValue())) {
30264       return AtomicExpansionKind::CmpXChg;
30265     }
30266     if (AI->getOperation() == AtomicRMWInst::And) {
30267       return ~C1->getValue() == C2->getValue()
30268                  ? AtomicExpansionKind::BitTestIntrinsic
30269                  : AtomicExpansionKind::CmpXChg;
30270     }
30271     return C1 == C2 ? AtomicExpansionKind::BitTestIntrinsic
30272                     : AtomicExpansionKind::CmpXChg;
30273   }
30274 
30275   assert(BitChange.second == ShiftBit || BitChange.second == NotShiftBit);
30276 
30277   auto BitTested = FindSingleBitChange(I->getOperand(OtherIdx));
30278   if (BitTested.second != ShiftBit && BitTested.second != NotShiftBit)
30279     return AtomicExpansionKind::CmpXChg;
30280 
30281   assert(BitChange.first != nullptr && BitTested.first != nullptr);
30282 
30283   // If shift amounts are not the same we can't use BitTestIntrinsic.
30284   if (BitChange.first != BitTested.first)
30285     return AtomicExpansionKind::CmpXChg;
30286 
30287   // If atomic AND need to be masking all be one bit and testing the one bit
30288   // unset in the mask.
30289   if (AI->getOperation() == AtomicRMWInst::And)
30290     return (BitChange.second == NotShiftBit && BitTested.second == ShiftBit)
30291                ? AtomicExpansionKind::BitTestIntrinsic
30292                : AtomicExpansionKind::CmpXChg;
30293 
30294   // If atomic XOR/OR need to be setting and testing the same bit.
30295   return (BitChange.second == ShiftBit && BitTested.second == ShiftBit)
30296              ? AtomicExpansionKind::BitTestIntrinsic
30297              : AtomicExpansionKind::CmpXChg;
30298 }
30299 
30300 void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
30301   IRBuilder<> Builder(AI);
30302   Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
30303   Intrinsic::ID IID_C = Intrinsic::not_intrinsic;
30304   Intrinsic::ID IID_I = Intrinsic::not_intrinsic;
30305   switch (AI->getOperation()) {
30306   default:
30307     llvm_unreachable("Unknown atomic operation");
30308   case AtomicRMWInst::Or:
30309     IID_C = Intrinsic::x86_atomic_bts;
30310     IID_I = Intrinsic::x86_atomic_bts_rm;
30311     break;
30312   case AtomicRMWInst::Xor:
30313     IID_C = Intrinsic::x86_atomic_btc;
30314     IID_I = Intrinsic::x86_atomic_btc_rm;
30315     break;
30316   case AtomicRMWInst::And:
30317     IID_C = Intrinsic::x86_atomic_btr;
30318     IID_I = Intrinsic::x86_atomic_btr_rm;
30319     break;
30320   }
30321   Instruction *I = AI->user_back();
30322   LLVMContext &Ctx = AI->getContext();
30323   Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
30324                                           PointerType::getUnqual(Ctx));
30325   Function *BitTest = nullptr;
30326   Value *Result = nullptr;
30327   auto BitTested = FindSingleBitChange(AI->getValOperand());
30328   assert(BitTested.first != nullptr);
30329 
30330   if (BitTested.second == ConstantBit || BitTested.second == NotConstantBit) {
30331     auto *C = cast<ConstantInt>(I->getOperand(I->getOperand(0) == AI ? 1 : 0));
30332 
30333     BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_C, AI->getType());
30334 
30335     unsigned Imm = llvm::countr_zero(C->getZExtValue());
30336     Result = Builder.CreateCall(BitTest, {Addr, Builder.getInt8(Imm)});
30337   } else {
30338     BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_I, AI->getType());
30339 
30340     assert(BitTested.second == ShiftBit || BitTested.second == NotShiftBit);
30341 
30342     Value *SI = BitTested.first;
30343     assert(SI != nullptr);
30344 
30345     // BT{S|R|C} on memory operand don't modulo bit position so we need to
30346     // mask it.
30347     unsigned ShiftBits = SI->getType()->getPrimitiveSizeInBits();
30348     Value *BitPos =
30349         Builder.CreateAnd(SI, Builder.getIntN(ShiftBits, ShiftBits - 1));
30350     // Todo(1): In many cases it may be provable that SI is less than
30351     // ShiftBits in which case this mask is unnecessary
30352     // Todo(2): In the fairly idiomatic case of P[X / sizeof_bits(X)] OP 1
30353     // << (X % sizeof_bits(X)) we can drop the shift mask and AGEN in
30354     // favor of just a raw BT{S|R|C}.
30355 
30356     Result = Builder.CreateCall(BitTest, {Addr, BitPos});
30357     Result = Builder.CreateZExtOrTrunc(Result, AI->getType());
30358 
30359     // If the result is only used for zero/non-zero status then we don't need to
30360     // shift value back. Otherwise do so.
30361     for (auto It = I->user_begin(); It != I->user_end(); ++It) {
30362       if (auto *ICmp = dyn_cast<ICmpInst>(*It)) {
30363         if (ICmp->isEquality()) {
30364           auto *C0 = dyn_cast<ConstantInt>(ICmp->getOperand(0));
30365           auto *C1 = dyn_cast<ConstantInt>(ICmp->getOperand(1));
30366           if (C0 || C1) {
30367             assert(C0 == nullptr || C1 == nullptr);
30368             if ((C0 ? C0 : C1)->isZero())
30369               continue;
30370           }
30371         }
30372       }
30373       Result = Builder.CreateShl(Result, BitPos);
30374       break;
30375     }
30376   }
30377 
30378   I->replaceAllUsesWith(Result);
30379   I->eraseFromParent();
30380   AI->eraseFromParent();
30381 }
30382 
30383 static bool shouldExpandCmpArithRMWInIR(AtomicRMWInst *AI) {
30384   using namespace llvm::PatternMatch;
30385   if (!AI->hasOneUse())
30386     return false;
30387 
30388   Value *Op = AI->getOperand(1);
30389   ICmpInst::Predicate Pred;
30390   Instruction *I = AI->user_back();
30391   AtomicRMWInst::BinOp Opc = AI->getOperation();
30392   if (Opc == AtomicRMWInst::Add) {
30393     if (match(I, m_c_ICmp(Pred, m_Sub(m_ZeroInt(), m_Specific(Op)), m_Value())))
30394       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
30395     if (match(I, m_OneUse(m_c_Add(m_Specific(Op), m_Value())))) {
30396       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
30397         return Pred == CmpInst::ICMP_SLT;
30398       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
30399         return Pred == CmpInst::ICMP_SGT;
30400     }
30401     return false;
30402   }
30403   if (Opc == AtomicRMWInst::Sub) {
30404     if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
30405       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
30406     if (match(I, m_OneUse(m_Sub(m_Value(), m_Specific(Op))))) {
30407       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
30408         return Pred == CmpInst::ICMP_SLT;
30409       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
30410         return Pred == CmpInst::ICMP_SGT;
30411     }
30412     return false;
30413   }
30414   if ((Opc == AtomicRMWInst::Or &&
30415        match(I, m_OneUse(m_c_Or(m_Specific(Op), m_Value())))) ||
30416       (Opc == AtomicRMWInst::And &&
30417        match(I, m_OneUse(m_c_And(m_Specific(Op), m_Value()))))) {
30418     if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
30419       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE ||
30420              Pred == CmpInst::ICMP_SLT;
30421     if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
30422       return Pred == CmpInst::ICMP_SGT;
30423     return false;
30424   }
30425   if (Opc == AtomicRMWInst::Xor) {
30426     if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
30427       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
30428     if (match(I, m_OneUse(m_c_Xor(m_Specific(Op), m_Value())))) {
30429       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
30430         return Pred == CmpInst::ICMP_SLT;
30431       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
30432         return Pred == CmpInst::ICMP_SGT;
30433     }
30434     return false;
30435   }
30436 
30437   return false;
30438 }
30439 
30440 void X86TargetLowering::emitCmpArithAtomicRMWIntrinsic(
30441     AtomicRMWInst *AI) const {
30442   IRBuilder<> Builder(AI);
30443   Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
30444   Instruction *TempI = nullptr;
30445   LLVMContext &Ctx = AI->getContext();
30446   ICmpInst *ICI = dyn_cast<ICmpInst>(AI->user_back());
30447   if (!ICI) {
30448     TempI = AI->user_back();
30449     assert(TempI->hasOneUse() && "Must have one use");
30450     ICI = cast<ICmpInst>(TempI->user_back());
30451   }
30452   X86::CondCode CC = X86::COND_INVALID;
30453   ICmpInst::Predicate Pred = ICI->getPredicate();
30454   switch (Pred) {
30455   default:
30456     llvm_unreachable("Not supported Pred");
30457   case CmpInst::ICMP_EQ:
30458     CC = X86::COND_E;
30459     break;
30460   case CmpInst::ICMP_NE:
30461     CC = X86::COND_NE;
30462     break;
30463   case CmpInst::ICMP_SLT:
30464     CC = X86::COND_S;
30465     break;
30466   case CmpInst::ICMP_SGT:
30467     CC = X86::COND_NS;
30468     break;
30469   }
30470   Intrinsic::ID IID = Intrinsic::not_intrinsic;
30471   switch (AI->getOperation()) {
30472   default:
30473     llvm_unreachable("Unknown atomic operation");
30474   case AtomicRMWInst::Add:
30475     IID = Intrinsic::x86_atomic_add_cc;
30476     break;
30477   case AtomicRMWInst::Sub:
30478     IID = Intrinsic::x86_atomic_sub_cc;
30479     break;
30480   case AtomicRMWInst::Or:
30481     IID = Intrinsic::x86_atomic_or_cc;
30482     break;
30483   case AtomicRMWInst::And:
30484     IID = Intrinsic::x86_atomic_and_cc;
30485     break;
30486   case AtomicRMWInst::Xor:
30487     IID = Intrinsic::x86_atomic_xor_cc;
30488     break;
30489   }
30490   Function *CmpArith =
30491       Intrinsic::getDeclaration(AI->getModule(), IID, AI->getType());
30492   Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
30493                                           PointerType::getUnqual(Ctx));
30494   Value *Call = Builder.CreateCall(
30495       CmpArith, {Addr, AI->getValOperand(), Builder.getInt32((unsigned)CC)});
30496   Value *Result = Builder.CreateTrunc(Call, Type::getInt1Ty(Ctx));
30497   ICI->replaceAllUsesWith(Result);
30498   ICI->eraseFromParent();
30499   if (TempI)
30500     TempI->eraseFromParent();
30501   AI->eraseFromParent();
30502 }
30503 
30504 TargetLowering::AtomicExpansionKind
30505 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
30506   unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
30507   Type *MemType = AI->getType();
30508 
30509   // If the operand is too big, we must see if cmpxchg8/16b is available
30510   // and default to library calls otherwise.
30511   if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
30512     return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
30513                                    : AtomicExpansionKind::None;
30514   }
30515 
30516   AtomicRMWInst::BinOp Op = AI->getOperation();
30517   switch (Op) {
30518   case AtomicRMWInst::Xchg:
30519     return AtomicExpansionKind::None;
30520   case AtomicRMWInst::Add:
30521   case AtomicRMWInst::Sub:
30522     if (shouldExpandCmpArithRMWInIR(AI))
30523       return AtomicExpansionKind::CmpArithIntrinsic;
30524     // It's better to use xadd, xsub or xchg for these in other cases.
30525     return AtomicExpansionKind::None;
30526   case AtomicRMWInst::Or:
30527   case AtomicRMWInst::And:
30528   case AtomicRMWInst::Xor:
30529     if (shouldExpandCmpArithRMWInIR(AI))
30530       return AtomicExpansionKind::CmpArithIntrinsic;
30531     return shouldExpandLogicAtomicRMWInIR(AI);
30532   case AtomicRMWInst::Nand:
30533   case AtomicRMWInst::Max:
30534   case AtomicRMWInst::Min:
30535   case AtomicRMWInst::UMax:
30536   case AtomicRMWInst::UMin:
30537   case AtomicRMWInst::FAdd:
30538   case AtomicRMWInst::FSub:
30539   case AtomicRMWInst::FMax:
30540   case AtomicRMWInst::FMin:
30541   case AtomicRMWInst::UIncWrap:
30542   case AtomicRMWInst::UDecWrap:
30543   default:
30544     // These always require a non-trivial set of data operations on x86. We must
30545     // use a cmpxchg loop.
30546     return AtomicExpansionKind::CmpXChg;
30547   }
30548 }
30549 
30550 LoadInst *
30551 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
30552   unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
30553   Type *MemType = AI->getType();
30554   // Accesses larger than the native width are turned into cmpxchg/libcalls, so
30555   // there is no benefit in turning such RMWs into loads, and it is actually
30556   // harmful as it introduces a mfence.
30557   if (MemType->getPrimitiveSizeInBits() > NativeWidth)
30558     return nullptr;
30559 
30560   // If this is a canonical idempotent atomicrmw w/no uses, we have a better
30561   // lowering available in lowerAtomicArith.
30562   // TODO: push more cases through this path.
30563   if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
30564     if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
30565         AI->use_empty())
30566       return nullptr;
30567 
30568   IRBuilder<> Builder(AI);
30569   Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
30570   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
30571   auto SSID = AI->getSyncScopeID();
30572   // We must restrict the ordering to avoid generating loads with Release or
30573   // ReleaseAcquire orderings.
30574   auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
30575 
30576   // Before the load we need a fence. Here is an example lifted from
30577   // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
30578   // is required:
30579   // Thread 0:
30580   //   x.store(1, relaxed);
30581   //   r1 = y.fetch_add(0, release);
30582   // Thread 1:
30583   //   y.fetch_add(42, acquire);
30584   //   r2 = x.load(relaxed);
30585   // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
30586   // lowered to just a load without a fence. A mfence flushes the store buffer,
30587   // making the optimization clearly correct.
30588   // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
30589   // otherwise, we might be able to be more aggressive on relaxed idempotent
30590   // rmw. In practice, they do not look useful, so we don't try to be
30591   // especially clever.
30592   if (SSID == SyncScope::SingleThread)
30593     // FIXME: we could just insert an ISD::MEMBARRIER here, except we are at
30594     // the IR level, so we must wrap it in an intrinsic.
30595     return nullptr;
30596 
30597   if (!Subtarget.hasMFence())
30598     // FIXME: it might make sense to use a locked operation here but on a
30599     // different cache-line to prevent cache-line bouncing. In practice it
30600     // is probably a small win, and x86 processors without mfence are rare
30601     // enough that we do not bother.
30602     return nullptr;
30603 
30604   Function *MFence =
30605       llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
30606   Builder.CreateCall(MFence, {});
30607 
30608   // Finally we can emit the atomic load.
30609   LoadInst *Loaded = Builder.CreateAlignedLoad(
30610       AI->getType(), AI->getPointerOperand(), AI->getAlign());
30611   Loaded->setAtomic(Order, SSID);
30612   AI->replaceAllUsesWith(Loaded);
30613   AI->eraseFromParent();
30614   return Loaded;
30615 }
30616 
30617 /// Emit a locked operation on a stack location which does not change any
30618 /// memory location, but does involve a lock prefix.  Location is chosen to be
30619 /// a) very likely accessed only by a single thread to minimize cache traffic,
30620 /// and b) definitely dereferenceable.  Returns the new Chain result.
30621 static SDValue emitLockedStackOp(SelectionDAG &DAG,
30622                                  const X86Subtarget &Subtarget, SDValue Chain,
30623                                  const SDLoc &DL) {
30624   // Implementation notes:
30625   // 1) LOCK prefix creates a full read/write reordering barrier for memory
30626   // operations issued by the current processor.  As such, the location
30627   // referenced is not relevant for the ordering properties of the instruction.
30628   // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
30629   // 8.2.3.9  Loads and Stores Are Not Reordered with Locked Instructions
30630   // 2) Using an immediate operand appears to be the best encoding choice
30631   // here since it doesn't require an extra register.
30632   // 3) OR appears to be very slightly faster than ADD. (Though, the difference
30633   // is small enough it might just be measurement noise.)
30634   // 4) When choosing offsets, there are several contributing factors:
30635   //   a) If there's no redzone, we default to TOS.  (We could allocate a cache
30636   //      line aligned stack object to improve this case.)
30637   //   b) To minimize our chances of introducing a false dependence, we prefer
30638   //      to offset the stack usage from TOS slightly.
30639   //   c) To minimize concerns about cross thread stack usage - in particular,
30640   //      the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
30641   //      captures state in the TOS frame and accesses it from many threads -
30642   //      we want to use an offset such that the offset is in a distinct cache
30643   //      line from the TOS frame.
30644   //
30645   // For a general discussion of the tradeoffs and benchmark results, see:
30646   // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
30647 
30648   auto &MF = DAG.getMachineFunction();
30649   auto &TFL = *Subtarget.getFrameLowering();
30650   const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
30651 
30652   if (Subtarget.is64Bit()) {
30653     SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
30654     SDValue Ops[] = {
30655       DAG.getRegister(X86::RSP, MVT::i64),                  // Base
30656       DAG.getTargetConstant(1, DL, MVT::i8),                // Scale
30657       DAG.getRegister(0, MVT::i64),                         // Index
30658       DAG.getTargetConstant(SPOffset, DL, MVT::i32),        // Disp
30659       DAG.getRegister(0, MVT::i16),                         // Segment.
30660       Zero,
30661       Chain};
30662     SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
30663                                      MVT::Other, Ops);
30664     return SDValue(Res, 1);
30665   }
30666 
30667   SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
30668   SDValue Ops[] = {
30669     DAG.getRegister(X86::ESP, MVT::i32),            // Base
30670     DAG.getTargetConstant(1, DL, MVT::i8),          // Scale
30671     DAG.getRegister(0, MVT::i32),                   // Index
30672     DAG.getTargetConstant(SPOffset, DL, MVT::i32),  // Disp
30673     DAG.getRegister(0, MVT::i16),                   // Segment.
30674     Zero,
30675     Chain
30676   };
30677   SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
30678                                    MVT::Other, Ops);
30679   return SDValue(Res, 1);
30680 }
30681 
30682 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
30683                                  SelectionDAG &DAG) {
30684   SDLoc dl(Op);
30685   AtomicOrdering FenceOrdering =
30686       static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
30687   SyncScope::ID FenceSSID =
30688       static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
30689 
30690   // The only fence that needs an instruction is a sequentially-consistent
30691   // cross-thread fence.
30692   if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
30693       FenceSSID == SyncScope::System) {
30694     if (Subtarget.hasMFence())
30695       return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
30696 
30697     SDValue Chain = Op.getOperand(0);
30698     return emitLockedStackOp(DAG, Subtarget, Chain, dl);
30699   }
30700 
30701   // MEMBARRIER is a compiler barrier; it codegens to a no-op.
30702   return DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
30703 }
30704 
30705 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
30706                              SelectionDAG &DAG) {
30707   MVT T = Op.getSimpleValueType();
30708   SDLoc DL(Op);
30709   unsigned Reg = 0;
30710   unsigned size = 0;
30711   switch(T.SimpleTy) {
30712   default: llvm_unreachable("Invalid value type!");
30713   case MVT::i8:  Reg = X86::AL;  size = 1; break;
30714   case MVT::i16: Reg = X86::AX;  size = 2; break;
30715   case MVT::i32: Reg = X86::EAX; size = 4; break;
30716   case MVT::i64:
30717     assert(Subtarget.is64Bit() && "Node not type legal!");
30718     Reg = X86::RAX; size = 8;
30719     break;
30720   }
30721   SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
30722                                   Op.getOperand(2), SDValue());
30723   SDValue Ops[] = { cpIn.getValue(0),
30724                     Op.getOperand(1),
30725                     Op.getOperand(3),
30726                     DAG.getTargetConstant(size, DL, MVT::i8),
30727                     cpIn.getValue(1) };
30728   SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
30729   MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
30730   SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
30731                                            Ops, T, MMO);
30732 
30733   SDValue cpOut =
30734     DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
30735   SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
30736                                       MVT::i32, cpOut.getValue(2));
30737   SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
30738 
30739   return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
30740                      cpOut, Success, EFLAGS.getValue(1));
30741 }
30742 
30743 // Create MOVMSKB, taking into account whether we need to split for AVX1.
30744 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
30745                            const X86Subtarget &Subtarget) {
30746   MVT InVT = V.getSimpleValueType();
30747 
30748   if (InVT == MVT::v64i8) {
30749     SDValue Lo, Hi;
30750     std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
30751     Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
30752     Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
30753     Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
30754     Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
30755     Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
30756                      DAG.getConstant(32, DL, MVT::i8));
30757     return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
30758   }
30759   if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
30760     SDValue Lo, Hi;
30761     std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
30762     Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
30763     Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
30764     Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
30765                      DAG.getConstant(16, DL, MVT::i8));
30766     return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
30767   }
30768 
30769   return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
30770 }
30771 
30772 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
30773                             SelectionDAG &DAG) {
30774   SDValue Src = Op.getOperand(0);
30775   MVT SrcVT = Src.getSimpleValueType();
30776   MVT DstVT = Op.getSimpleValueType();
30777 
30778   // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
30779   // half to v32i1 and concatenating the result.
30780   if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
30781     assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
30782     assert(Subtarget.hasBWI() && "Expected BWI target");
30783     SDLoc dl(Op);
30784     SDValue Lo, Hi;
30785     std::tie(Lo, Hi) = DAG.SplitScalar(Src, dl, MVT::i32, MVT::i32);
30786     Lo = DAG.getBitcast(MVT::v32i1, Lo);
30787     Hi = DAG.getBitcast(MVT::v32i1, Hi);
30788     return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
30789   }
30790 
30791   // Use MOVMSK for vector to scalar conversion to prevent scalarization.
30792   if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
30793     assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
30794     MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
30795     SDLoc DL(Op);
30796     SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
30797     V = getPMOVMSKB(DL, V, DAG, Subtarget);
30798     return DAG.getZExtOrTrunc(V, DL, DstVT);
30799   }
30800 
30801   assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
30802           SrcVT == MVT::i64) && "Unexpected VT!");
30803 
30804   assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
30805   if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
30806       !(DstVT == MVT::x86mmx && SrcVT.isVector()))
30807     // This conversion needs to be expanded.
30808     return SDValue();
30809 
30810   SDLoc dl(Op);
30811   if (SrcVT.isVector()) {
30812     // Widen the vector in input in the case of MVT::v2i32.
30813     // Example: from MVT::v2i32 to MVT::v4i32.
30814     MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
30815                                  SrcVT.getVectorNumElements() * 2);
30816     Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
30817                       DAG.getUNDEF(SrcVT));
30818   } else {
30819     assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
30820            "Unexpected source type in LowerBITCAST");
30821     Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
30822   }
30823 
30824   MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
30825   Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
30826 
30827   if (DstVT == MVT::x86mmx)
30828     return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
30829 
30830   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
30831                      DAG.getIntPtrConstant(0, dl));
30832 }
30833 
30834 /// Compute the horizontal sum of bytes in V for the elements of VT.
30835 ///
30836 /// Requires V to be a byte vector and VT to be an integer vector type with
30837 /// wider elements than V's type. The width of the elements of VT determines
30838 /// how many bytes of V are summed horizontally to produce each element of the
30839 /// result.
30840 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
30841                                       const X86Subtarget &Subtarget,
30842                                       SelectionDAG &DAG) {
30843   SDLoc DL(V);
30844   MVT ByteVecVT = V.getSimpleValueType();
30845   MVT EltVT = VT.getVectorElementType();
30846   assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
30847          "Expected value to have byte element type.");
30848   assert(EltVT != MVT::i8 &&
30849          "Horizontal byte sum only makes sense for wider elements!");
30850   unsigned VecSize = VT.getSizeInBits();
30851   assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
30852 
30853   // PSADBW instruction horizontally add all bytes and leave the result in i64
30854   // chunks, thus directly computes the pop count for v2i64 and v4i64.
30855   if (EltVT == MVT::i64) {
30856     SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
30857     MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
30858     V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
30859     return DAG.getBitcast(VT, V);
30860   }
30861 
30862   if (EltVT == MVT::i32) {
30863     // We unpack the low half and high half into i32s interleaved with zeros so
30864     // that we can use PSADBW to horizontally sum them. The most useful part of
30865     // this is that it lines up the results of two PSADBW instructions to be
30866     // two v2i64 vectors which concatenated are the 4 population counts. We can
30867     // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
30868     SDValue Zeros = DAG.getConstant(0, DL, VT);
30869     SDValue V32 = DAG.getBitcast(VT, V);
30870     SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
30871     SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
30872 
30873     // Do the horizontal sums into two v2i64s.
30874     Zeros = DAG.getConstant(0, DL, ByteVecVT);
30875     MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
30876     Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
30877                       DAG.getBitcast(ByteVecVT, Low), Zeros);
30878     High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
30879                        DAG.getBitcast(ByteVecVT, High), Zeros);
30880 
30881     // Merge them together.
30882     MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
30883     V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
30884                     DAG.getBitcast(ShortVecVT, Low),
30885                     DAG.getBitcast(ShortVecVT, High));
30886 
30887     return DAG.getBitcast(VT, V);
30888   }
30889 
30890   // The only element type left is i16.
30891   assert(EltVT == MVT::i16 && "Unknown how to handle type");
30892 
30893   // To obtain pop count for each i16 element starting from the pop count for
30894   // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
30895   // right by 8. It is important to shift as i16s as i8 vector shift isn't
30896   // directly supported.
30897   SDValue ShifterV = DAG.getConstant(8, DL, VT);
30898   SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
30899   V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
30900                   DAG.getBitcast(ByteVecVT, V));
30901   return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
30902 }
30903 
30904 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
30905                                         const X86Subtarget &Subtarget,
30906                                         SelectionDAG &DAG) {
30907   MVT VT = Op.getSimpleValueType();
30908   MVT EltVT = VT.getVectorElementType();
30909   int NumElts = VT.getVectorNumElements();
30910   (void)EltVT;
30911   assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
30912 
30913   // Implement a lookup table in register by using an algorithm based on:
30914   // http://wm.ite.pl/articles/sse-popcount.html
30915   //
30916   // The general idea is that every lower byte nibble in the input vector is an
30917   // index into a in-register pre-computed pop count table. We then split up the
30918   // input vector in two new ones: (1) a vector with only the shifted-right
30919   // higher nibbles for each byte and (2) a vector with the lower nibbles (and
30920   // masked out higher ones) for each byte. PSHUFB is used separately with both
30921   // to index the in-register table. Next, both are added and the result is a
30922   // i8 vector where each element contains the pop count for input byte.
30923   const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
30924                        /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
30925                        /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
30926                        /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
30927 
30928   SmallVector<SDValue, 64> LUTVec;
30929   for (int i = 0; i < NumElts; ++i)
30930     LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
30931   SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
30932   SDValue M0F = DAG.getConstant(0x0F, DL, VT);
30933 
30934   // High nibbles
30935   SDValue FourV = DAG.getConstant(4, DL, VT);
30936   SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
30937 
30938   // Low nibbles
30939   SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
30940 
30941   // The input vector is used as the shuffle mask that index elements into the
30942   // LUT. After counting low and high nibbles, add the vector to obtain the
30943   // final pop count per i8 element.
30944   SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
30945   SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
30946   return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
30947 }
30948 
30949 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
30950 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
30951 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
30952                                 SelectionDAG &DAG) {
30953   MVT VT = Op.getSimpleValueType();
30954   assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
30955          "Unknown CTPOP type to handle");
30956   SDLoc DL(Op.getNode());
30957   SDValue Op0 = Op.getOperand(0);
30958 
30959   // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
30960   if (Subtarget.hasVPOPCNTDQ()) {
30961     unsigned NumElems = VT.getVectorNumElements();
30962     assert((VT.getVectorElementType() == MVT::i8 ||
30963             VT.getVectorElementType() == MVT::i16) && "Unexpected type");
30964     if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
30965       MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
30966       Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
30967       Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
30968       return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
30969     }
30970   }
30971 
30972   // Decompose 256-bit ops into smaller 128-bit ops.
30973   if (VT.is256BitVector() && !Subtarget.hasInt256())
30974     return splitVectorIntUnary(Op, DAG);
30975 
30976   // Decompose 512-bit ops into smaller 256-bit ops.
30977   if (VT.is512BitVector() && !Subtarget.hasBWI())
30978     return splitVectorIntUnary(Op, DAG);
30979 
30980   // For element types greater than i8, do vXi8 pop counts and a bytesum.
30981   if (VT.getScalarType() != MVT::i8) {
30982     MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
30983     SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
30984     SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
30985     return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
30986   }
30987 
30988   // We can't use the fast LUT approach, so fall back on LegalizeDAG.
30989   if (!Subtarget.hasSSSE3())
30990     return SDValue();
30991 
30992   return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
30993 }
30994 
30995 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
30996                           SelectionDAG &DAG) {
30997   assert(Op.getSimpleValueType().isVector() &&
30998          "We only do custom lowering for vector population count.");
30999   return LowerVectorCTPOP(Op, Subtarget, DAG);
31000 }
31001 
31002 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
31003   MVT VT = Op.getSimpleValueType();
31004   SDValue In = Op.getOperand(0);
31005   SDLoc DL(Op);
31006 
31007   // For scalars, its still beneficial to transfer to/from the SIMD unit to
31008   // perform the BITREVERSE.
31009   if (!VT.isVector()) {
31010     MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
31011     SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
31012     Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
31013     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
31014                        DAG.getIntPtrConstant(0, DL));
31015   }
31016 
31017   int NumElts = VT.getVectorNumElements();
31018   int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
31019 
31020   // Decompose 256-bit ops into smaller 128-bit ops.
31021   if (VT.is256BitVector())
31022     return splitVectorIntUnary(Op, DAG);
31023 
31024   assert(VT.is128BitVector() &&
31025          "Only 128-bit vector bitreverse lowering supported.");
31026 
31027   // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
31028   // perform the BSWAP in the shuffle.
31029   // Its best to shuffle using the second operand as this will implicitly allow
31030   // memory folding for multiple vectors.
31031   SmallVector<SDValue, 16> MaskElts;
31032   for (int i = 0; i != NumElts; ++i) {
31033     for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
31034       int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
31035       int PermuteByte = SourceByte | (2 << 5);
31036       MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
31037     }
31038   }
31039 
31040   SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
31041   SDValue Res = DAG.getBitcast(MVT::v16i8, In);
31042   Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
31043                     Res, Mask);
31044   return DAG.getBitcast(VT, Res);
31045 }
31046 
31047 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
31048                                SelectionDAG &DAG) {
31049   MVT VT = Op.getSimpleValueType();
31050 
31051   if (Subtarget.hasXOP() && !VT.is512BitVector())
31052     return LowerBITREVERSE_XOP(Op, DAG);
31053 
31054   assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
31055 
31056   SDValue In = Op.getOperand(0);
31057   SDLoc DL(Op);
31058 
31059   assert(VT.getScalarType() == MVT::i8 &&
31060          "Only byte vector BITREVERSE supported");
31061 
31062   // Split v64i8 without BWI so that we can still use the PSHUFB lowering.
31063   if (VT == MVT::v64i8 && !Subtarget.hasBWI())
31064     return splitVectorIntUnary(Op, DAG);
31065 
31066   // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
31067   if (VT == MVT::v32i8 && !Subtarget.hasInt256())
31068     return splitVectorIntUnary(Op, DAG);
31069 
31070   unsigned NumElts = VT.getVectorNumElements();
31071 
31072   // If we have GFNI, we can use GF2P8AFFINEQB to reverse the bits.
31073   if (Subtarget.hasGFNI()) {
31074     MVT MatrixVT = MVT::getVectorVT(MVT::i64, NumElts / 8);
31075     SDValue Matrix = DAG.getConstant(0x8040201008040201ULL, DL, MatrixVT);
31076     Matrix = DAG.getBitcast(VT, Matrix);
31077     return DAG.getNode(X86ISD::GF2P8AFFINEQB, DL, VT, In, Matrix,
31078                        DAG.getTargetConstant(0, DL, MVT::i8));
31079   }
31080 
31081   // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
31082   // two nibbles and a PSHUFB lookup to find the bitreverse of each
31083   // 0-15 value (moved to the other nibble).
31084   SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
31085   SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
31086   SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
31087 
31088   const int LoLUT[16] = {
31089       /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
31090       /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
31091       /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
31092       /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
31093   const int HiLUT[16] = {
31094       /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
31095       /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
31096       /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
31097       /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
31098 
31099   SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
31100   for (unsigned i = 0; i < NumElts; ++i) {
31101     LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
31102     HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
31103   }
31104 
31105   SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
31106   SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
31107   Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
31108   Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
31109   return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
31110 }
31111 
31112 static SDValue LowerPARITY(SDValue Op, const X86Subtarget &Subtarget,
31113                            SelectionDAG &DAG) {
31114   SDLoc DL(Op);
31115   SDValue X = Op.getOperand(0);
31116   MVT VT = Op.getSimpleValueType();
31117 
31118   // Special case. If the input fits in 8-bits we can use a single 8-bit TEST.
31119   if (VT == MVT::i8 ||
31120       DAG.MaskedValueIsZero(X, APInt::getBitsSetFrom(VT.getSizeInBits(), 8))) {
31121     X = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
31122     SDValue Flags = DAG.getNode(X86ISD::CMP, DL, MVT::i32, X,
31123                                 DAG.getConstant(0, DL, MVT::i8));
31124     // Copy the inverse of the parity flag into a register with setcc.
31125     SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
31126     // Extend to the original type.
31127     return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
31128   }
31129 
31130   // If we have POPCNT, use the default expansion.
31131   if (Subtarget.hasPOPCNT())
31132     return SDValue();
31133 
31134   if (VT == MVT::i64) {
31135     // Xor the high and low 16-bits together using a 32-bit operation.
31136     SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
31137                              DAG.getNode(ISD::SRL, DL, MVT::i64, X,
31138                                          DAG.getConstant(32, DL, MVT::i8)));
31139     SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
31140     X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
31141   }
31142 
31143   if (VT != MVT::i16) {
31144     // Xor the high and low 16-bits together using a 32-bit operation.
31145     SDValue Hi16 = DAG.getNode(ISD::SRL, DL, MVT::i32, X,
31146                                DAG.getConstant(16, DL, MVT::i8));
31147     X = DAG.getNode(ISD::XOR, DL, MVT::i32, X, Hi16);
31148   } else {
31149     // If the input is 16-bits, we need to extend to use an i32 shift below.
31150     X = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, X);
31151   }
31152 
31153   // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
31154   // This should allow an h-reg to be used to save a shift.
31155   SDValue Hi = DAG.getNode(
31156       ISD::TRUNCATE, DL, MVT::i8,
31157       DAG.getNode(ISD::SRL, DL, MVT::i32, X, DAG.getConstant(8, DL, MVT::i8)));
31158   SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
31159   SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
31160   SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
31161 
31162   // Copy the inverse of the parity flag into a register with setcc.
31163   SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
31164   // Extend to the original type.
31165   return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
31166 }
31167 
31168 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
31169                                         const X86Subtarget &Subtarget) {
31170   unsigned NewOpc = 0;
31171   switch (N->getOpcode()) {
31172   case ISD::ATOMIC_LOAD_ADD:
31173     NewOpc = X86ISD::LADD;
31174     break;
31175   case ISD::ATOMIC_LOAD_SUB:
31176     NewOpc = X86ISD::LSUB;
31177     break;
31178   case ISD::ATOMIC_LOAD_OR:
31179     NewOpc = X86ISD::LOR;
31180     break;
31181   case ISD::ATOMIC_LOAD_XOR:
31182     NewOpc = X86ISD::LXOR;
31183     break;
31184   case ISD::ATOMIC_LOAD_AND:
31185     NewOpc = X86ISD::LAND;
31186     break;
31187   default:
31188     llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
31189   }
31190 
31191   MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
31192 
31193   return DAG.getMemIntrinsicNode(
31194       NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
31195       {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
31196       /*MemVT=*/N->getSimpleValueType(0), MMO);
31197 }
31198 
31199 /// Lower atomic_load_ops into LOCK-prefixed operations.
31200 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
31201                                 const X86Subtarget &Subtarget) {
31202   AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
31203   SDValue Chain = N->getOperand(0);
31204   SDValue LHS = N->getOperand(1);
31205   SDValue RHS = N->getOperand(2);
31206   unsigned Opc = N->getOpcode();
31207   MVT VT = N->getSimpleValueType(0);
31208   SDLoc DL(N);
31209 
31210   // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
31211   // can only be lowered when the result is unused.  They should have already
31212   // been transformed into a cmpxchg loop in AtomicExpand.
31213   if (N->hasAnyUseOfValue(0)) {
31214     // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
31215     // select LXADD if LOCK_SUB can't be selected.
31216     // Handle (atomic_load_xor p, SignBit) as (atomic_load_add p, SignBit) so we
31217     // can use LXADD as opposed to cmpxchg.
31218     if (Opc == ISD::ATOMIC_LOAD_SUB ||
31219         (Opc == ISD::ATOMIC_LOAD_XOR && isMinSignedConstant(RHS))) {
31220       RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
31221       return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS, RHS,
31222                            AN->getMemOperand());
31223     }
31224     assert(Opc == ISD::ATOMIC_LOAD_ADD &&
31225            "Used AtomicRMW ops other than Add should have been expanded!");
31226     return N;
31227   }
31228 
31229   // Specialized lowering for the canonical form of an idemptotent atomicrmw.
31230   // The core idea here is that since the memory location isn't actually
31231   // changing, all we need is a lowering for the *ordering* impacts of the
31232   // atomicrmw.  As such, we can chose a different operation and memory
31233   // location to minimize impact on other code.
31234   // The above holds unless the node is marked volatile in which
31235   // case it needs to be preserved according to the langref.
31236   if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS) && !AN->isVolatile()) {
31237     // On X86, the only ordering which actually requires an instruction is
31238     // seq_cst which isn't SingleThread, everything just needs to be preserved
31239     // during codegen and then dropped. Note that we expect (but don't assume),
31240     // that orderings other than seq_cst and acq_rel have been canonicalized to
31241     // a store or load.
31242     if (AN->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent &&
31243         AN->getSyncScopeID() == SyncScope::System) {
31244       // Prefer a locked operation against a stack location to minimize cache
31245       // traffic.  This assumes that stack locations are very likely to be
31246       // accessed only by the owning thread.
31247       SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
31248       assert(!N->hasAnyUseOfValue(0));
31249       // NOTE: The getUNDEF is needed to give something for the unused result 0.
31250       return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
31251                          DAG.getUNDEF(VT), NewChain);
31252     }
31253     // MEMBARRIER is a compiler barrier; it codegens to a no-op.
31254     SDValue NewChain = DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Chain);
31255     assert(!N->hasAnyUseOfValue(0));
31256     // NOTE: The getUNDEF is needed to give something for the unused result 0.
31257     return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
31258                        DAG.getUNDEF(VT), NewChain);
31259   }
31260 
31261   SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
31262   // RAUW the chain, but don't worry about the result, as it's unused.
31263   assert(!N->hasAnyUseOfValue(0));
31264   // NOTE: The getUNDEF is needed to give something for the unused result 0.
31265   return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
31266                      DAG.getUNDEF(VT), LockOp.getValue(1));
31267 }
31268 
31269 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
31270                                  const X86Subtarget &Subtarget) {
31271   auto *Node = cast<AtomicSDNode>(Op.getNode());
31272   SDLoc dl(Node);
31273   EVT VT = Node->getMemoryVT();
31274 
31275   bool IsSeqCst =
31276       Node->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent;
31277   bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
31278 
31279   // If this store is not sequentially consistent and the type is legal
31280   // we can just keep it.
31281   if (!IsSeqCst && IsTypeLegal)
31282     return Op;
31283 
31284   if (VT == MVT::i64 && !IsTypeLegal) {
31285     // For illegal i64 atomic_stores, we can try to use MOVQ or MOVLPS if SSE
31286     // is enabled.
31287     bool NoImplicitFloatOps =
31288         DAG.getMachineFunction().getFunction().hasFnAttribute(
31289             Attribute::NoImplicitFloat);
31290     if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
31291       SDValue Chain;
31292       if (Subtarget.hasSSE1()) {
31293         SDValue SclToVec =
31294             DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Node->getVal());
31295         MVT StVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
31296         SclToVec = DAG.getBitcast(StVT, SclToVec);
31297         SDVTList Tys = DAG.getVTList(MVT::Other);
31298         SDValue Ops[] = {Node->getChain(), SclToVec, Node->getBasePtr()};
31299         Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops,
31300                                         MVT::i64, Node->getMemOperand());
31301       } else if (Subtarget.hasX87()) {
31302         // First load this into an 80-bit X87 register using a stack temporary.
31303         // This will put the whole integer into the significand.
31304         SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
31305         int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
31306         MachinePointerInfo MPI =
31307             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
31308         Chain = DAG.getStore(Node->getChain(), dl, Node->getVal(), StackPtr,
31309                              MPI, MaybeAlign(), MachineMemOperand::MOStore);
31310         SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
31311         SDValue LdOps[] = {Chain, StackPtr};
31312         SDValue Value = DAG.getMemIntrinsicNode(
31313             X86ISD::FILD, dl, Tys, LdOps, MVT::i64, MPI,
31314             /*Align*/ std::nullopt, MachineMemOperand::MOLoad);
31315         Chain = Value.getValue(1);
31316 
31317         // Now use an FIST to do the atomic store.
31318         SDValue StoreOps[] = {Chain, Value, Node->getBasePtr()};
31319         Chain =
31320             DAG.getMemIntrinsicNode(X86ISD::FIST, dl, DAG.getVTList(MVT::Other),
31321                                     StoreOps, MVT::i64, Node->getMemOperand());
31322       }
31323 
31324       if (Chain) {
31325         // If this is a sequentially consistent store, also emit an appropriate
31326         // barrier.
31327         if (IsSeqCst)
31328           Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
31329 
31330         return Chain;
31331       }
31332     }
31333   }
31334 
31335   // Convert seq_cst store -> xchg
31336   // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
31337   // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
31338   SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, Node->getMemoryVT(),
31339                                Node->getOperand(0), Node->getOperand(2),
31340                                Node->getOperand(1), Node->getMemOperand());
31341   return Swap.getValue(1);
31342 }
31343 
31344 static SDValue LowerADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) {
31345   SDNode *N = Op.getNode();
31346   MVT VT = N->getSimpleValueType(0);
31347   unsigned Opc = Op.getOpcode();
31348 
31349   // Let legalize expand this if it isn't a legal type yet.
31350   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
31351     return SDValue();
31352 
31353   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
31354   SDLoc DL(N);
31355 
31356   // Set the carry flag.
31357   SDValue Carry = Op.getOperand(2);
31358   EVT CarryVT = Carry.getValueType();
31359   Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
31360                       Carry, DAG.getAllOnesConstant(DL, CarryVT));
31361 
31362   bool IsAdd = Opc == ISD::UADDO_CARRY || Opc == ISD::SADDO_CARRY;
31363   SDValue Sum = DAG.getNode(IsAdd ? X86ISD::ADC : X86ISD::SBB, DL, VTs,
31364                             Op.getOperand(0), Op.getOperand(1),
31365                             Carry.getValue(1));
31366 
31367   bool IsSigned = Opc == ISD::SADDO_CARRY || Opc == ISD::SSUBO_CARRY;
31368   SDValue SetCC = getSETCC(IsSigned ? X86::COND_O : X86::COND_B,
31369                            Sum.getValue(1), DL, DAG);
31370   if (N->getValueType(1) == MVT::i1)
31371     SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
31372 
31373   return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
31374 }
31375 
31376 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
31377                             SelectionDAG &DAG) {
31378   assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
31379 
31380   // For MacOSX, we want to call an alternative entry point: __sincos_stret,
31381   // which returns the values as { float, float } (in XMM0) or
31382   // { double, double } (which is returned in XMM0, XMM1).
31383   SDLoc dl(Op);
31384   SDValue Arg = Op.getOperand(0);
31385   EVT ArgVT = Arg.getValueType();
31386   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
31387 
31388   TargetLowering::ArgListTy Args;
31389   TargetLowering::ArgListEntry Entry;
31390 
31391   Entry.Node = Arg;
31392   Entry.Ty = ArgTy;
31393   Entry.IsSExt = false;
31394   Entry.IsZExt = false;
31395   Args.push_back(Entry);
31396 
31397   bool isF64 = ArgVT == MVT::f64;
31398   // Only optimize x86_64 for now. i386 is a bit messy. For f32,
31399   // the small struct {f32, f32} is returned in (eax, edx). For f64,
31400   // the results are returned via SRet in memory.
31401   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
31402   RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
31403   const char *LibcallName = TLI.getLibcallName(LC);
31404   SDValue Callee =
31405       DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
31406 
31407   Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
31408                       : (Type *)FixedVectorType::get(ArgTy, 4);
31409 
31410   TargetLowering::CallLoweringInfo CLI(DAG);
31411   CLI.setDebugLoc(dl)
31412       .setChain(DAG.getEntryNode())
31413       .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
31414 
31415   std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
31416 
31417   if (isF64)
31418     // Returned in xmm0 and xmm1.
31419     return CallResult.first;
31420 
31421   // Returned in bits 0:31 and 32:64 xmm0.
31422   SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
31423                                CallResult.first, DAG.getIntPtrConstant(0, dl));
31424   SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
31425                                CallResult.first, DAG.getIntPtrConstant(1, dl));
31426   SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
31427   return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
31428 }
31429 
31430 /// Widen a vector input to a vector of NVT.  The
31431 /// input vector must have the same element type as NVT.
31432 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
31433                             bool FillWithZeroes = false) {
31434   // Check if InOp already has the right width.
31435   MVT InVT = InOp.getSimpleValueType();
31436   if (InVT == NVT)
31437     return InOp;
31438 
31439   if (InOp.isUndef())
31440     return DAG.getUNDEF(NVT);
31441 
31442   assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
31443          "input and widen element type must match");
31444 
31445   unsigned InNumElts = InVT.getVectorNumElements();
31446   unsigned WidenNumElts = NVT.getVectorNumElements();
31447   assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
31448          "Unexpected request for vector widening");
31449 
31450   SDLoc dl(InOp);
31451   if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
31452       InOp.getNumOperands() == 2) {
31453     SDValue N1 = InOp.getOperand(1);
31454     if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
31455         N1.isUndef()) {
31456       InOp = InOp.getOperand(0);
31457       InVT = InOp.getSimpleValueType();
31458       InNumElts = InVT.getVectorNumElements();
31459     }
31460   }
31461   if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
31462       ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
31463     SmallVector<SDValue, 16> Ops;
31464     for (unsigned i = 0; i < InNumElts; ++i)
31465       Ops.push_back(InOp.getOperand(i));
31466 
31467     EVT EltVT = InOp.getOperand(0).getValueType();
31468 
31469     SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
31470       DAG.getUNDEF(EltVT);
31471     for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
31472       Ops.push_back(FillVal);
31473     return DAG.getBuildVector(NVT, dl, Ops);
31474   }
31475   SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
31476     DAG.getUNDEF(NVT);
31477   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
31478                      InOp, DAG.getIntPtrConstant(0, dl));
31479 }
31480 
31481 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
31482                              SelectionDAG &DAG) {
31483   assert(Subtarget.hasAVX512() &&
31484          "MGATHER/MSCATTER are supported on AVX-512 arch only");
31485 
31486   MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
31487   SDValue Src = N->getValue();
31488   MVT VT = Src.getSimpleValueType();
31489   assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
31490   SDLoc dl(Op);
31491 
31492   SDValue Scale = N->getScale();
31493   SDValue Index = N->getIndex();
31494   SDValue Mask = N->getMask();
31495   SDValue Chain = N->getChain();
31496   SDValue BasePtr = N->getBasePtr();
31497 
31498   if (VT == MVT::v2f32 || VT == MVT::v2i32) {
31499     assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
31500     // If the index is v2i64 and we have VLX we can use xmm for data and index.
31501     if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
31502       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
31503       EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
31504       Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
31505       SDVTList VTs = DAG.getVTList(MVT::Other);
31506       SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
31507       return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
31508                                      N->getMemoryVT(), N->getMemOperand());
31509     }
31510     return SDValue();
31511   }
31512 
31513   MVT IndexVT = Index.getSimpleValueType();
31514 
31515   // If the index is v2i32, we're being called by type legalization and we
31516   // should just let the default handling take care of it.
31517   if (IndexVT == MVT::v2i32)
31518     return SDValue();
31519 
31520   // If we don't have VLX and neither the passthru or index is 512-bits, we
31521   // need to widen until one is.
31522   if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
31523       !Index.getSimpleValueType().is512BitVector()) {
31524     // Determine how much we need to widen by to get a 512-bit type.
31525     unsigned Factor = std::min(512/VT.getSizeInBits(),
31526                                512/IndexVT.getSizeInBits());
31527     unsigned NumElts = VT.getVectorNumElements() * Factor;
31528 
31529     VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
31530     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
31531     MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
31532 
31533     Src = ExtendToType(Src, VT, DAG);
31534     Index = ExtendToType(Index, IndexVT, DAG);
31535     Mask = ExtendToType(Mask, MaskVT, DAG, true);
31536   }
31537 
31538   SDVTList VTs = DAG.getVTList(MVT::Other);
31539   SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
31540   return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
31541                                  N->getMemoryVT(), N->getMemOperand());
31542 }
31543 
31544 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
31545                           SelectionDAG &DAG) {
31546 
31547   MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
31548   MVT VT = Op.getSimpleValueType();
31549   MVT ScalarVT = VT.getScalarType();
31550   SDValue Mask = N->getMask();
31551   MVT MaskVT = Mask.getSimpleValueType();
31552   SDValue PassThru = N->getPassThru();
31553   SDLoc dl(Op);
31554 
31555   // Handle AVX masked loads which don't support passthru other than 0.
31556   if (MaskVT.getVectorElementType() != MVT::i1) {
31557     // We also allow undef in the isel pattern.
31558     if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
31559       return Op;
31560 
31561     SDValue NewLoad = DAG.getMaskedLoad(
31562         VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
31563         getZeroVector(VT, Subtarget, DAG, dl), N->getMemoryVT(),
31564         N->getMemOperand(), N->getAddressingMode(), N->getExtensionType(),
31565         N->isExpandingLoad());
31566     // Emit a blend.
31567     SDValue Select = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
31568     return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
31569   }
31570 
31571   assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
31572          "Expanding masked load is supported on AVX-512 target only!");
31573 
31574   assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
31575          "Expanding masked load is supported for 32 and 64-bit types only!");
31576 
31577   assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
31578          "Cannot lower masked load op.");
31579 
31580   assert((ScalarVT.getSizeInBits() >= 32 ||
31581           (Subtarget.hasBWI() &&
31582               (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
31583          "Unsupported masked load op.");
31584 
31585   // This operation is legal for targets with VLX, but without
31586   // VLX the vector should be widened to 512 bit
31587   unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
31588   MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
31589   PassThru = ExtendToType(PassThru, WideDataVT, DAG);
31590 
31591   // Mask element has to be i1.
31592   assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
31593          "Unexpected mask type");
31594 
31595   MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
31596 
31597   Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
31598   SDValue NewLoad = DAG.getMaskedLoad(
31599       WideDataVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
31600       PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
31601       N->getExtensionType(), N->isExpandingLoad());
31602 
31603   SDValue Extract =
31604       DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, NewLoad.getValue(0),
31605                   DAG.getIntPtrConstant(0, dl));
31606   SDValue RetOps[] = {Extract, NewLoad.getValue(1)};
31607   return DAG.getMergeValues(RetOps, dl);
31608 }
31609 
31610 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
31611                            SelectionDAG &DAG) {
31612   MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
31613   SDValue DataToStore = N->getValue();
31614   MVT VT = DataToStore.getSimpleValueType();
31615   MVT ScalarVT = VT.getScalarType();
31616   SDValue Mask = N->getMask();
31617   SDLoc dl(Op);
31618 
31619   assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
31620          "Expanding masked load is supported on AVX-512 target only!");
31621 
31622   assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
31623          "Expanding masked load is supported for 32 and 64-bit types only!");
31624 
31625   assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
31626          "Cannot lower masked store op.");
31627 
31628   assert((ScalarVT.getSizeInBits() >= 32 ||
31629           (Subtarget.hasBWI() &&
31630               (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
31631           "Unsupported masked store op.");
31632 
31633   // This operation is legal for targets with VLX, but without
31634   // VLX the vector should be widened to 512 bit
31635   unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
31636   MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
31637 
31638   // Mask element has to be i1.
31639   assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
31640          "Unexpected mask type");
31641 
31642   MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
31643 
31644   DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
31645   Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
31646   return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
31647                             N->getOffset(), Mask, N->getMemoryVT(),
31648                             N->getMemOperand(), N->getAddressingMode(),
31649                             N->isTruncatingStore(), N->isCompressingStore());
31650 }
31651 
31652 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
31653                             SelectionDAG &DAG) {
31654   assert(Subtarget.hasAVX2() &&
31655          "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
31656 
31657   MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
31658   SDLoc dl(Op);
31659   MVT VT = Op.getSimpleValueType();
31660   SDValue Index = N->getIndex();
31661   SDValue Mask = N->getMask();
31662   SDValue PassThru = N->getPassThru();
31663   MVT IndexVT = Index.getSimpleValueType();
31664 
31665   assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
31666 
31667   // If the index is v2i32, we're being called by type legalization.
31668   if (IndexVT == MVT::v2i32)
31669     return SDValue();
31670 
31671   // If we don't have VLX and neither the passthru or index is 512-bits, we
31672   // need to widen until one is.
31673   MVT OrigVT = VT;
31674   if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
31675       !IndexVT.is512BitVector()) {
31676     // Determine how much we need to widen by to get a 512-bit type.
31677     unsigned Factor = std::min(512/VT.getSizeInBits(),
31678                                512/IndexVT.getSizeInBits());
31679 
31680     unsigned NumElts = VT.getVectorNumElements() * Factor;
31681 
31682     VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
31683     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
31684     MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
31685 
31686     PassThru = ExtendToType(PassThru, VT, DAG);
31687     Index = ExtendToType(Index, IndexVT, DAG);
31688     Mask = ExtendToType(Mask, MaskVT, DAG, true);
31689   }
31690 
31691   // Break dependency on the data register.
31692   if (PassThru.isUndef())
31693     PassThru = getZeroVector(VT, Subtarget, DAG, dl);
31694 
31695   SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
31696                     N->getScale() };
31697   SDValue NewGather = DAG.getMemIntrinsicNode(
31698       X86ISD::MGATHER, dl, DAG.getVTList(VT, MVT::Other), Ops, N->getMemoryVT(),
31699       N->getMemOperand());
31700   SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
31701                                 NewGather, DAG.getIntPtrConstant(0, dl));
31702   return DAG.getMergeValues({Extract, NewGather.getValue(1)}, dl);
31703 }
31704 
31705 static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
31706   SDLoc dl(Op);
31707   SDValue Src = Op.getOperand(0);
31708   MVT DstVT = Op.getSimpleValueType();
31709 
31710   AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
31711   unsigned SrcAS = N->getSrcAddressSpace();
31712 
31713   assert(SrcAS != N->getDestAddressSpace() &&
31714          "addrspacecast must be between different address spaces");
31715 
31716   if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
31717     Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
31718   } else if (DstVT == MVT::i64) {
31719     Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
31720   } else if (DstVT == MVT::i32) {
31721     Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
31722   } else {
31723     report_fatal_error("Bad address space in addrspacecast");
31724   }
31725   return Op;
31726 }
31727 
31728 SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
31729                                               SelectionDAG &DAG) const {
31730   // TODO: Eventually, the lowering of these nodes should be informed by or
31731   // deferred to the GC strategy for the function in which they appear. For
31732   // now, however, they must be lowered to something. Since they are logically
31733   // no-ops in the case of a null GC strategy (or a GC strategy which does not
31734   // require special handling for these nodes), lower them as literal NOOPs for
31735   // the time being.
31736   SmallVector<SDValue, 2> Ops;
31737   Ops.push_back(Op.getOperand(0));
31738   if (Op->getGluedNode())
31739     Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
31740 
31741   SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
31742   return SDValue(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
31743 }
31744 
31745 // Custom split CVTPS2PH with wide types.
31746 static SDValue LowerCVTPS2PH(SDValue Op, SelectionDAG &DAG) {
31747   SDLoc dl(Op);
31748   EVT VT = Op.getValueType();
31749   SDValue Lo, Hi;
31750   std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
31751   EVT LoVT, HiVT;
31752   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
31753   SDValue RC = Op.getOperand(1);
31754   Lo = DAG.getNode(X86ISD::CVTPS2PH, dl, LoVT, Lo, RC);
31755   Hi = DAG.getNode(X86ISD::CVTPS2PH, dl, HiVT, Hi, RC);
31756   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
31757 }
31758 
31759 static SDValue LowerPREFETCH(SDValue Op, const X86Subtarget &Subtarget,
31760                              SelectionDAG &DAG) {
31761   unsigned IsData = Op.getConstantOperandVal(4);
31762 
31763   // We don't support non-data prefetch without PREFETCHI.
31764   // Just preserve the chain.
31765   if (!IsData && !Subtarget.hasPREFETCHI())
31766     return Op.getOperand(0);
31767 
31768   return Op;
31769 }
31770 
31771 static StringRef getInstrStrFromOpNo(const SmallVectorImpl<StringRef> &AsmStrs,
31772                                      unsigned OpNo) {
31773   const APInt Operand(32, OpNo);
31774   std::string OpNoStr = llvm::toString(Operand, 10, false);
31775   std::string Str(" $");
31776 
31777   std::string OpNoStr1(Str + OpNoStr);             // e.g. " $1" (OpNo=1)
31778   std::string OpNoStr2(Str + "{" + OpNoStr + ":"); // With modifier, e.g. ${1:P}
31779 
31780   auto I = StringRef::npos;
31781   for (auto &AsmStr : AsmStrs) {
31782     // Match the OpNo string. We should match exactly to exclude match
31783     // sub-string, e.g. "$12" contain "$1"
31784     if (AsmStr.ends_with(OpNoStr1))
31785       I = AsmStr.size() - OpNoStr1.size();
31786 
31787     // Get the index of operand in AsmStr.
31788     if (I == StringRef::npos)
31789       I = AsmStr.find(OpNoStr1 + ",");
31790     if (I == StringRef::npos)
31791       I = AsmStr.find(OpNoStr2);
31792 
31793     if (I == StringRef::npos)
31794       continue;
31795 
31796     assert(I > 0 && "Unexpected inline asm string!");
31797     // Remove the operand string and label (if exsit).
31798     // For example:
31799     // ".L__MSASMLABEL_.${:uid}__l:call dword ptr ${0:P}"
31800     // ==>
31801     // ".L__MSASMLABEL_.${:uid}__l:call dword ptr "
31802     // ==>
31803     // "call dword ptr "
31804     auto TmpStr = AsmStr.substr(0, I);
31805     I = TmpStr.rfind(':');
31806     if (I != StringRef::npos)
31807       TmpStr = TmpStr.substr(I + 1);
31808     return TmpStr.take_while(llvm::isAlpha);
31809   }
31810 
31811   return StringRef();
31812 }
31813 
31814 bool X86TargetLowering::isInlineAsmTargetBranch(
31815     const SmallVectorImpl<StringRef> &AsmStrs, unsigned OpNo) const {
31816   // In a __asm block, __asm inst foo where inst is CALL or JMP should be
31817   // changed from indirect TargetLowering::C_Memory to direct
31818   // TargetLowering::C_Address.
31819   // We don't need to special case LOOP* and Jcc, which cannot target a memory
31820   // location.
31821   StringRef Inst = getInstrStrFromOpNo(AsmStrs, OpNo);
31822   return Inst.equals_insensitive("call") || Inst.equals_insensitive("jmp");
31823 }
31824 
31825 /// Provide custom lowering hooks for some operations.
31826 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
31827   switch (Op.getOpcode()) {
31828   default: llvm_unreachable("Should not custom lower this!");
31829   case ISD::ATOMIC_FENCE:       return LowerATOMIC_FENCE(Op, Subtarget, DAG);
31830   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
31831     return LowerCMP_SWAP(Op, Subtarget, DAG);
31832   case ISD::CTPOP:              return LowerCTPOP(Op, Subtarget, DAG);
31833   case ISD::ATOMIC_LOAD_ADD:
31834   case ISD::ATOMIC_LOAD_SUB:
31835   case ISD::ATOMIC_LOAD_OR:
31836   case ISD::ATOMIC_LOAD_XOR:
31837   case ISD::ATOMIC_LOAD_AND:    return lowerAtomicArith(Op, DAG, Subtarget);
31838   case ISD::ATOMIC_STORE:       return LowerATOMIC_STORE(Op, DAG, Subtarget);
31839   case ISD::BITREVERSE:         return LowerBITREVERSE(Op, Subtarget, DAG);
31840   case ISD::PARITY:             return LowerPARITY(Op, Subtarget, DAG);
31841   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
31842   case ISD::CONCAT_VECTORS:     return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
31843   case ISD::VECTOR_SHUFFLE:     return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
31844   case ISD::VSELECT:            return LowerVSELECT(Op, DAG);
31845   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
31846   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
31847   case ISD::INSERT_SUBVECTOR:   return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
31848   case ISD::EXTRACT_SUBVECTOR:  return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
31849   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
31850   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
31851   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
31852   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
31853   case ISD::ExternalSymbol:     return LowerExternalSymbol(Op, DAG);
31854   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
31855   case ISD::SHL_PARTS:
31856   case ISD::SRA_PARTS:
31857   case ISD::SRL_PARTS:          return LowerShiftParts(Op, DAG);
31858   case ISD::FSHL:
31859   case ISD::FSHR:               return LowerFunnelShift(Op, Subtarget, DAG);
31860   case ISD::STRICT_SINT_TO_FP:
31861   case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG);
31862   case ISD::STRICT_UINT_TO_FP:
31863   case ISD::UINT_TO_FP:         return LowerUINT_TO_FP(Op, DAG);
31864   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
31865   case ISD::ZERO_EXTEND:        return LowerZERO_EXTEND(Op, Subtarget, DAG);
31866   case ISD::SIGN_EXTEND:        return LowerSIGN_EXTEND(Op, Subtarget, DAG);
31867   case ISD::ANY_EXTEND:         return LowerANY_EXTEND(Op, Subtarget, DAG);
31868   case ISD::ZERO_EXTEND_VECTOR_INREG:
31869   case ISD::SIGN_EXTEND_VECTOR_INREG:
31870     return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
31871   case ISD::FP_TO_SINT:
31872   case ISD::STRICT_FP_TO_SINT:
31873   case ISD::FP_TO_UINT:
31874   case ISD::STRICT_FP_TO_UINT:  return LowerFP_TO_INT(Op, DAG);
31875   case ISD::FP_TO_SINT_SAT:
31876   case ISD::FP_TO_UINT_SAT:     return LowerFP_TO_INT_SAT(Op, DAG);
31877   case ISD::FP_EXTEND:
31878   case ISD::STRICT_FP_EXTEND:   return LowerFP_EXTEND(Op, DAG);
31879   case ISD::FP_ROUND:
31880   case ISD::STRICT_FP_ROUND:    return LowerFP_ROUND(Op, DAG);
31881   case ISD::FP16_TO_FP:
31882   case ISD::STRICT_FP16_TO_FP:  return LowerFP16_TO_FP(Op, DAG);
31883   case ISD::FP_TO_FP16:
31884   case ISD::STRICT_FP_TO_FP16:  return LowerFP_TO_FP16(Op, DAG);
31885   case ISD::FP_TO_BF16:         return LowerFP_TO_BF16(Op, DAG);
31886   case ISD::LOAD:               return LowerLoad(Op, Subtarget, DAG);
31887   case ISD::STORE:              return LowerStore(Op, Subtarget, DAG);
31888   case ISD::FADD:
31889   case ISD::FSUB:               return lowerFaddFsub(Op, DAG);
31890   case ISD::FROUND:             return LowerFROUND(Op, DAG);
31891   case ISD::FABS:
31892   case ISD::FNEG:               return LowerFABSorFNEG(Op, DAG);
31893   case ISD::FCOPYSIGN:          return LowerFCOPYSIGN(Op, DAG);
31894   case ISD::FGETSIGN:           return LowerFGETSIGN(Op, DAG);
31895   case ISD::LRINT:
31896   case ISD::LLRINT:             return LowerLRINT_LLRINT(Op, DAG);
31897   case ISD::SETCC:
31898   case ISD::STRICT_FSETCC:
31899   case ISD::STRICT_FSETCCS:     return LowerSETCC(Op, DAG);
31900   case ISD::SETCCCARRY:         return LowerSETCCCARRY(Op, DAG);
31901   case ISD::SELECT:             return LowerSELECT(Op, DAG);
31902   case ISD::BRCOND:             return LowerBRCOND(Op, DAG);
31903   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
31904   case ISD::VASTART:            return LowerVASTART(Op, DAG);
31905   case ISD::VAARG:              return LowerVAARG(Op, DAG);
31906   case ISD::VACOPY:             return LowerVACOPY(Op, Subtarget, DAG);
31907   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
31908   case ISD::INTRINSIC_VOID:
31909   case ISD::INTRINSIC_W_CHAIN:  return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
31910   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
31911   case ISD::ADDROFRETURNADDR:   return LowerADDROFRETURNADDR(Op, DAG);
31912   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
31913   case ISD::FRAME_TO_ARGS_OFFSET:
31914                                 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
31915   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
31916   case ISD::EH_RETURN:          return LowerEH_RETURN(Op, DAG);
31917   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
31918   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
31919   case ISD::EH_SJLJ_SETUP_DISPATCH:
31920     return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
31921   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
31922   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
31923   case ISD::GET_ROUNDING:       return LowerGET_ROUNDING(Op, DAG);
31924   case ISD::SET_ROUNDING:       return LowerSET_ROUNDING(Op, DAG);
31925   case ISD::GET_FPENV_MEM:      return LowerGET_FPENV_MEM(Op, DAG);
31926   case ISD::SET_FPENV_MEM:      return LowerSET_FPENV_MEM(Op, DAG);
31927   case ISD::RESET_FPENV:        return LowerRESET_FPENV(Op, DAG);
31928   case ISD::CTLZ:
31929   case ISD::CTLZ_ZERO_UNDEF:    return LowerCTLZ(Op, Subtarget, DAG);
31930   case ISD::CTTZ:
31931   case ISD::CTTZ_ZERO_UNDEF:    return LowerCTTZ(Op, Subtarget, DAG);
31932   case ISD::MUL:                return LowerMUL(Op, Subtarget, DAG);
31933   case ISD::MULHS:
31934   case ISD::MULHU:              return LowerMULH(Op, Subtarget, DAG);
31935   case ISD::ROTL:
31936   case ISD::ROTR:               return LowerRotate(Op, Subtarget, DAG);
31937   case ISD::SRA:
31938   case ISD::SRL:
31939   case ISD::SHL:                return LowerShift(Op, Subtarget, DAG);
31940   case ISD::SADDO:
31941   case ISD::UADDO:
31942   case ISD::SSUBO:
31943   case ISD::USUBO:              return LowerXALUO(Op, DAG);
31944   case ISD::SMULO:
31945   case ISD::UMULO:              return LowerMULO(Op, Subtarget, DAG);
31946   case ISD::READCYCLECOUNTER:   return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
31947   case ISD::BITCAST:            return LowerBITCAST(Op, Subtarget, DAG);
31948   case ISD::SADDO_CARRY:
31949   case ISD::SSUBO_CARRY:
31950   case ISD::UADDO_CARRY:
31951   case ISD::USUBO_CARRY:        return LowerADDSUBO_CARRY(Op, DAG);
31952   case ISD::ADD:
31953   case ISD::SUB:                return lowerAddSub(Op, DAG, Subtarget);
31954   case ISD::UADDSAT:
31955   case ISD::SADDSAT:
31956   case ISD::USUBSAT:
31957   case ISD::SSUBSAT:            return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
31958   case ISD::SMAX:
31959   case ISD::SMIN:
31960   case ISD::UMAX:
31961   case ISD::UMIN:               return LowerMINMAX(Op, Subtarget, DAG);
31962   case ISD::FMINIMUM:
31963   case ISD::FMAXIMUM:
31964     return LowerFMINIMUM_FMAXIMUM(Op, Subtarget, DAG);
31965   case ISD::ABS:                return LowerABS(Op, Subtarget, DAG);
31966   case ISD::ABDS:
31967   case ISD::ABDU:               return LowerABD(Op, Subtarget, DAG);
31968   case ISD::AVGCEILU:           return LowerAVG(Op, Subtarget, DAG);
31969   case ISD::FSINCOS:            return LowerFSINCOS(Op, Subtarget, DAG);
31970   case ISD::MLOAD:              return LowerMLOAD(Op, Subtarget, DAG);
31971   case ISD::MSTORE:             return LowerMSTORE(Op, Subtarget, DAG);
31972   case ISD::MGATHER:            return LowerMGATHER(Op, Subtarget, DAG);
31973   case ISD::MSCATTER:           return LowerMSCATTER(Op, Subtarget, DAG);
31974   case ISD::GC_TRANSITION_START:
31975   case ISD::GC_TRANSITION_END:  return LowerGC_TRANSITION(Op, DAG);
31976   case ISD::ADDRSPACECAST:      return LowerADDRSPACECAST(Op, DAG);
31977   case X86ISD::CVTPS2PH:        return LowerCVTPS2PH(Op, DAG);
31978   case ISD::PREFETCH:           return LowerPREFETCH(Op, Subtarget, DAG);
31979   }
31980 }
31981 
31982 /// Replace a node with an illegal result type with a new node built out of
31983 /// custom code.
31984 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
31985                                            SmallVectorImpl<SDValue>&Results,
31986                                            SelectionDAG &DAG) const {
31987   SDLoc dl(N);
31988   switch (N->getOpcode()) {
31989   default:
31990 #ifndef NDEBUG
31991     dbgs() << "ReplaceNodeResults: ";
31992     N->dump(&DAG);
31993 #endif
31994     llvm_unreachable("Do not know how to custom type legalize this operation!");
31995   case X86ISD::CVTPH2PS: {
31996     EVT VT = N->getValueType(0);
31997     SDValue Lo, Hi;
31998     std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
31999     EVT LoVT, HiVT;
32000     std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
32001     Lo = DAG.getNode(X86ISD::CVTPH2PS, dl, LoVT, Lo);
32002     Hi = DAG.getNode(X86ISD::CVTPH2PS, dl, HiVT, Hi);
32003     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32004     Results.push_back(Res);
32005     return;
32006   }
32007   case X86ISD::STRICT_CVTPH2PS: {
32008     EVT VT = N->getValueType(0);
32009     SDValue Lo, Hi;
32010     std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 1);
32011     EVT LoVT, HiVT;
32012     std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
32013     Lo = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {LoVT, MVT::Other},
32014                      {N->getOperand(0), Lo});
32015     Hi = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {HiVT, MVT::Other},
32016                      {N->getOperand(0), Hi});
32017     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
32018                                 Lo.getValue(1), Hi.getValue(1));
32019     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32020     Results.push_back(Res);
32021     Results.push_back(Chain);
32022     return;
32023   }
32024   case X86ISD::CVTPS2PH:
32025     Results.push_back(LowerCVTPS2PH(SDValue(N, 0), DAG));
32026     return;
32027   case ISD::CTPOP: {
32028     assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
32029     // Use a v2i64 if possible.
32030     bool NoImplicitFloatOps =
32031         DAG.getMachineFunction().getFunction().hasFnAttribute(
32032             Attribute::NoImplicitFloat);
32033     if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
32034       SDValue Wide =
32035           DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
32036       Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
32037       // Bit count should fit in 32-bits, extract it as that and then zero
32038       // extend to i64. Otherwise we end up extracting bits 63:32 separately.
32039       Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
32040       Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
32041                          DAG.getIntPtrConstant(0, dl));
32042       Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
32043       Results.push_back(Wide);
32044     }
32045     return;
32046   }
32047   case ISD::MUL: {
32048     EVT VT = N->getValueType(0);
32049     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32050            VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
32051     // Pre-promote these to vXi16 to avoid op legalization thinking all 16
32052     // elements are needed.
32053     MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
32054     SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
32055     SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
32056     SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
32057     Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
32058     unsigned NumConcats = 16 / VT.getVectorNumElements();
32059     SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
32060     ConcatOps[0] = Res;
32061     Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
32062     Results.push_back(Res);
32063     return;
32064   }
32065   case ISD::SMULO:
32066   case ISD::UMULO: {
32067     EVT VT = N->getValueType(0);
32068     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32069            VT == MVT::v2i32 && "Unexpected VT!");
32070     bool IsSigned = N->getOpcode() == ISD::SMULO;
32071     unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
32072     SDValue Op0 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(0));
32073     SDValue Op1 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(1));
32074     SDValue Res = DAG.getNode(ISD::MUL, dl, MVT::v2i64, Op0, Op1);
32075     // Extract the high 32 bits from each result using PSHUFD.
32076     // TODO: Could use SRL+TRUNCATE but that doesn't become a PSHUFD.
32077     SDValue Hi = DAG.getBitcast(MVT::v4i32, Res);
32078     Hi = DAG.getVectorShuffle(MVT::v4i32, dl, Hi, Hi, {1, 3, -1, -1});
32079     Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Hi,
32080                      DAG.getIntPtrConstant(0, dl));
32081 
32082     // Truncate the low bits of the result. This will become PSHUFD.
32083     Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
32084 
32085     SDValue HiCmp;
32086     if (IsSigned) {
32087       // SMULO overflows if the high bits don't match the sign of the low.
32088       HiCmp = DAG.getNode(ISD::SRA, dl, VT, Res, DAG.getConstant(31, dl, VT));
32089     } else {
32090       // UMULO overflows if the high bits are non-zero.
32091       HiCmp = DAG.getConstant(0, dl, VT);
32092     }
32093     SDValue Ovf = DAG.getSetCC(dl, N->getValueType(1), Hi, HiCmp, ISD::SETNE);
32094 
32095     // Widen the result with by padding with undef.
32096     Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
32097                       DAG.getUNDEF(VT));
32098     Results.push_back(Res);
32099     Results.push_back(Ovf);
32100     return;
32101   }
32102   case X86ISD::VPMADDWD: {
32103     // Legalize types for X86ISD::VPMADDWD by widening.
32104     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32105 
32106     EVT VT = N->getValueType(0);
32107     EVT InVT = N->getOperand(0).getValueType();
32108     assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
32109            "Expected a VT that divides into 128 bits.");
32110     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32111            "Unexpected type action!");
32112     unsigned NumConcat = 128 / InVT.getSizeInBits();
32113 
32114     EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
32115                                     InVT.getVectorElementType(),
32116                                     NumConcat * InVT.getVectorNumElements());
32117     EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
32118                                   VT.getVectorElementType(),
32119                                   NumConcat * VT.getVectorNumElements());
32120 
32121     SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
32122     Ops[0] = N->getOperand(0);
32123     SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
32124     Ops[0] = N->getOperand(1);
32125     SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
32126 
32127     SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
32128     Results.push_back(Res);
32129     return;
32130   }
32131   // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
32132   case X86ISD::FMINC:
32133   case X86ISD::FMIN:
32134   case X86ISD::FMAXC:
32135   case X86ISD::FMAX: {
32136     EVT VT = N->getValueType(0);
32137     assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
32138     SDValue UNDEF = DAG.getUNDEF(VT);
32139     SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
32140                               N->getOperand(0), UNDEF);
32141     SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
32142                               N->getOperand(1), UNDEF);
32143     Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
32144     return;
32145   }
32146   case ISD::SDIV:
32147   case ISD::UDIV:
32148   case ISD::SREM:
32149   case ISD::UREM: {
32150     EVT VT = N->getValueType(0);
32151     if (VT.isVector()) {
32152       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32153              "Unexpected type action!");
32154       // If this RHS is a constant splat vector we can widen this and let
32155       // division/remainder by constant optimize it.
32156       // TODO: Can we do something for non-splat?
32157       APInt SplatVal;
32158       if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
32159         unsigned NumConcats = 128 / VT.getSizeInBits();
32160         SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
32161         Ops0[0] = N->getOperand(0);
32162         EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
32163         SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
32164         SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
32165         SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
32166         Results.push_back(Res);
32167       }
32168       return;
32169     }
32170 
32171     SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
32172     Results.push_back(V);
32173     return;
32174   }
32175   case ISD::TRUNCATE: {
32176     MVT VT = N->getSimpleValueType(0);
32177     if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
32178       return;
32179 
32180     // The generic legalizer will try to widen the input type to the same
32181     // number of elements as the widened result type. But this isn't always
32182     // the best thing so do some custom legalization to avoid some cases.
32183     MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
32184     SDValue In = N->getOperand(0);
32185     EVT InVT = In.getValueType();
32186     EVT InEltVT = InVT.getVectorElementType();
32187     EVT EltVT = VT.getVectorElementType();
32188     unsigned MinElts = VT.getVectorNumElements();
32189     unsigned WidenNumElts = WidenVT.getVectorNumElements();
32190     unsigned InBits = InVT.getSizeInBits();
32191 
32192     // See if there are sufficient leading bits to perform a PACKUS/PACKSS.
32193     unsigned PackOpcode;
32194     if (SDValue Src =
32195             matchTruncateWithPACK(PackOpcode, VT, In, dl, DAG, Subtarget)) {
32196       if (SDValue Res = truncateVectorWithPACK(PackOpcode, VT, Src,
32197                                                dl, DAG, Subtarget)) {
32198         Res = widenSubVector(WidenVT, Res, false, Subtarget, DAG, dl);
32199         Results.push_back(Res);
32200         return;
32201       }
32202     }
32203 
32204     if (128 % InBits == 0) {
32205       // 128 bit and smaller inputs should avoid truncate all together and
32206       // just use a build_vector that will become a shuffle.
32207       // TODO: Widen and use a shuffle directly?
32208       SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
32209       // Use the original element count so we don't do more scalar opts than
32210       // necessary.
32211       for (unsigned i=0; i < MinElts; ++i) {
32212         SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
32213                                   DAG.getIntPtrConstant(i, dl));
32214         Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
32215       }
32216       Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
32217       return;
32218     }
32219 
32220     // With AVX512 there are some cases that can use a target specific
32221     // truncate node to go from 256/512 to less than 128 with zeros in the
32222     // upper elements of the 128 bit result.
32223     if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
32224       // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
32225       if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
32226         Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
32227         return;
32228       }
32229       // There's one case we can widen to 512 bits and use VTRUNC.
32230       if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
32231         In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
32232                          DAG.getUNDEF(MVT::v4i64));
32233         Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
32234         return;
32235       }
32236     }
32237     if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
32238         getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
32239         isTypeLegal(MVT::v4i64)) {
32240       // Input needs to be split and output needs to widened. Let's use two
32241       // VTRUNCs, and shuffle their results together into the wider type.
32242       SDValue Lo, Hi;
32243       std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
32244 
32245       Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
32246       Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
32247       SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
32248                                          { 0,  1,  2,  3, 16, 17, 18, 19,
32249                                           -1, -1, -1, -1, -1, -1, -1, -1 });
32250       Results.push_back(Res);
32251       return;
32252     }
32253 
32254     // Attempt to widen the truncation input vector to let LowerTRUNCATE handle
32255     // this via type legalization.
32256     if ((InEltVT == MVT::i16 || InEltVT == MVT::i32 || InEltVT == MVT::i64) &&
32257         (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32) &&
32258         (!Subtarget.hasSSSE3() ||
32259          (!isTypeLegal(InVT) &&
32260           !(MinElts <= 4 && InEltVT == MVT::i64 && EltVT == MVT::i8)))) {
32261       SDValue WidenIn = widenSubVector(In, false, Subtarget, DAG, dl,
32262                                        InEltVT.getSizeInBits() * WidenNumElts);
32263       Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, WidenVT, WidenIn));
32264       return;
32265     }
32266 
32267     return;
32268   }
32269   case ISD::ANY_EXTEND:
32270     // Right now, only MVT::v8i8 has Custom action for an illegal type.
32271     // It's intended to custom handle the input type.
32272     assert(N->getValueType(0) == MVT::v8i8 &&
32273            "Do not know how to legalize this Node");
32274     return;
32275   case ISD::SIGN_EXTEND:
32276   case ISD::ZERO_EXTEND: {
32277     EVT VT = N->getValueType(0);
32278     SDValue In = N->getOperand(0);
32279     EVT InVT = In.getValueType();
32280     if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
32281         (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
32282       assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
32283              "Unexpected type action!");
32284       assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
32285       // Custom split this so we can extend i8/i16->i32 invec. This is better
32286       // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
32287       // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
32288       // we allow the sra from the extend to i32 to be shared by the split.
32289       In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
32290 
32291       // Fill a vector with sign bits for each element.
32292       SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
32293       SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
32294 
32295       // Create an unpackl and unpackh to interleave the sign bits then bitcast
32296       // to v2i64.
32297       SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
32298                                         {0, 4, 1, 5});
32299       Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
32300       SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
32301                                         {2, 6, 3, 7});
32302       Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
32303 
32304       SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32305       Results.push_back(Res);
32306       return;
32307     }
32308 
32309     if (VT == MVT::v16i32 || VT == MVT::v8i64) {
32310       if (!InVT.is128BitVector()) {
32311         // Not a 128 bit vector, but maybe type legalization will promote
32312         // it to 128 bits.
32313         if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
32314           return;
32315         InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
32316         if (!InVT.is128BitVector())
32317           return;
32318 
32319         // Promote the input to 128 bits. Type legalization will turn this into
32320         // zext_inreg/sext_inreg.
32321         In = DAG.getNode(N->getOpcode(), dl, InVT, In);
32322       }
32323 
32324       // Perform custom splitting instead of the two stage extend we would get
32325       // by default.
32326       EVT LoVT, HiVT;
32327       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
32328       assert(isTypeLegal(LoVT) && "Split VT not legal?");
32329 
32330       SDValue Lo = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, LoVT, In, DAG);
32331 
32332       // We need to shift the input over by half the number of elements.
32333       unsigned NumElts = InVT.getVectorNumElements();
32334       unsigned HalfNumElts = NumElts / 2;
32335       SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
32336       for (unsigned i = 0; i != HalfNumElts; ++i)
32337         ShufMask[i] = i + HalfNumElts;
32338 
32339       SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
32340       Hi = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, HiVT, Hi, DAG);
32341 
32342       SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32343       Results.push_back(Res);
32344     }
32345     return;
32346   }
32347   case ISD::FP_TO_SINT:
32348   case ISD::STRICT_FP_TO_SINT:
32349   case ISD::FP_TO_UINT:
32350   case ISD::STRICT_FP_TO_UINT: {
32351     bool IsStrict = N->isStrictFPOpcode();
32352     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
32353                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
32354     EVT VT = N->getValueType(0);
32355     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32356     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
32357     EVT SrcVT = Src.getValueType();
32358 
32359     SDValue Res;
32360     if (isSoftF16(SrcVT, Subtarget)) {
32361       EVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
32362       if (IsStrict) {
32363         Res =
32364             DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
32365                         {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
32366                                             {NVT, MVT::Other}, {Chain, Src})});
32367         Chain = Res.getValue(1);
32368       } else {
32369         Res = DAG.getNode(N->getOpcode(), dl, VT,
32370                           DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
32371       }
32372       Results.push_back(Res);
32373       if (IsStrict)
32374         Results.push_back(Chain);
32375 
32376       return;
32377     }
32378 
32379     if (VT.isVector() && Subtarget.hasFP16() &&
32380         SrcVT.getVectorElementType() == MVT::f16) {
32381       EVT EleVT = VT.getVectorElementType();
32382       EVT ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
32383 
32384       if (SrcVT != MVT::v8f16) {
32385         SDValue Tmp =
32386             IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
32387         SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
32388         Ops[0] = Src;
32389         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
32390       }
32391 
32392       if (IsStrict) {
32393         unsigned Opc =
32394             IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
32395         Res =
32396             DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {N->getOperand(0), Src});
32397         Chain = Res.getValue(1);
32398       } else {
32399         unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
32400         Res = DAG.getNode(Opc, dl, ResVT, Src);
32401       }
32402 
32403       // TODO: Need to add exception check code for strict FP.
32404       if (EleVT.getSizeInBits() < 16) {
32405         MVT TmpVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8);
32406         Res = DAG.getNode(ISD::TRUNCATE, dl, TmpVT, Res);
32407 
32408         // Now widen to 128 bits.
32409         unsigned NumConcats = 128 / TmpVT.getSizeInBits();
32410         MVT ConcatVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8 * NumConcats);
32411         SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(TmpVT));
32412         ConcatOps[0] = Res;
32413         Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
32414       }
32415 
32416       Results.push_back(Res);
32417       if (IsStrict)
32418         Results.push_back(Chain);
32419 
32420       return;
32421     }
32422 
32423     if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
32424       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32425              "Unexpected type action!");
32426 
32427       // Try to create a 128 bit vector, but don't exceed a 32 bit element.
32428       unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
32429       MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
32430                                        VT.getVectorNumElements());
32431       SDValue Res;
32432       SDValue Chain;
32433       if (IsStrict) {
32434         Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {PromoteVT, MVT::Other},
32435                           {N->getOperand(0), Src});
32436         Chain = Res.getValue(1);
32437       } else
32438         Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
32439 
32440       // Preserve what we know about the size of the original result. If the
32441       // result is v2i32, we have to manually widen the assert.
32442       if (PromoteVT == MVT::v2i32)
32443         Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
32444                           DAG.getUNDEF(MVT::v2i32));
32445 
32446       Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext, dl,
32447                         Res.getValueType(), Res,
32448                         DAG.getValueType(VT.getVectorElementType()));
32449 
32450       if (PromoteVT == MVT::v2i32)
32451         Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
32452                           DAG.getIntPtrConstant(0, dl));
32453 
32454       // Truncate back to the original width.
32455       Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
32456 
32457       // Now widen to 128 bits.
32458       unsigned NumConcats = 128 / VT.getSizeInBits();
32459       MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
32460                                       VT.getVectorNumElements() * NumConcats);
32461       SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
32462       ConcatOps[0] = Res;
32463       Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
32464       Results.push_back(Res);
32465       if (IsStrict)
32466         Results.push_back(Chain);
32467       return;
32468     }
32469 
32470 
32471     if (VT == MVT::v2i32) {
32472       assert((!IsStrict || IsSigned || Subtarget.hasAVX512()) &&
32473              "Strict unsigned conversion requires AVX512");
32474       assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32475       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32476              "Unexpected type action!");
32477       if (Src.getValueType() == MVT::v2f64) {
32478         if (!IsSigned && !Subtarget.hasAVX512()) {
32479           SDValue Res =
32480               expandFP_TO_UINT_SSE(MVT::v4i32, Src, dl, DAG, Subtarget);
32481           Results.push_back(Res);
32482           return;
32483         }
32484 
32485         unsigned Opc;
32486         if (IsStrict)
32487           Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
32488         else
32489           Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
32490 
32491         // If we have VLX we can emit a target specific FP_TO_UINT node,.
32492         if (!IsSigned && !Subtarget.hasVLX()) {
32493           // Otherwise we can defer to the generic legalizer which will widen
32494           // the input as well. This will be further widened during op
32495           // legalization to v8i32<-v8f64.
32496           // For strict nodes we'll need to widen ourselves.
32497           // FIXME: Fix the type legalizer to safely widen strict nodes?
32498           if (!IsStrict)
32499             return;
32500           Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, Src,
32501                             DAG.getConstantFP(0.0, dl, MVT::v2f64));
32502           Opc = N->getOpcode();
32503         }
32504         SDValue Res;
32505         SDValue Chain;
32506         if (IsStrict) {
32507           Res = DAG.getNode(Opc, dl, {MVT::v4i32, MVT::Other},
32508                             {N->getOperand(0), Src});
32509           Chain = Res.getValue(1);
32510         } else {
32511           Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
32512         }
32513         Results.push_back(Res);
32514         if (IsStrict)
32515           Results.push_back(Chain);
32516         return;
32517       }
32518 
32519       // Custom widen strict v2f32->v2i32 by padding with zeros.
32520       // FIXME: Should generic type legalizer do this?
32521       if (Src.getValueType() == MVT::v2f32 && IsStrict) {
32522         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
32523                           DAG.getConstantFP(0.0, dl, MVT::v2f32));
32524         SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4i32, MVT::Other},
32525                                   {N->getOperand(0), Src});
32526         Results.push_back(Res);
32527         Results.push_back(Res.getValue(1));
32528         return;
32529       }
32530 
32531       // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
32532       // so early out here.
32533       return;
32534     }
32535 
32536     assert(!VT.isVector() && "Vectors should have been handled above!");
32537 
32538     if ((Subtarget.hasDQI() && VT == MVT::i64 &&
32539          (SrcVT == MVT::f32 || SrcVT == MVT::f64)) ||
32540         (Subtarget.hasFP16() && SrcVT == MVT::f16)) {
32541       assert(!Subtarget.is64Bit() && "i64 should be legal");
32542       unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
32543       // If we use a 128-bit result we might need to use a target specific node.
32544       unsigned SrcElts =
32545           std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
32546       MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
32547       MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), SrcElts);
32548       unsigned Opc = N->getOpcode();
32549       if (NumElts != SrcElts) {
32550         if (IsStrict)
32551           Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
32552         else
32553           Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
32554       }
32555 
32556       SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
32557       SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
32558                                 DAG.getConstantFP(0.0, dl, VecInVT), Src,
32559                                 ZeroIdx);
32560       SDValue Chain;
32561       if (IsStrict) {
32562         SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
32563         Res = DAG.getNode(Opc, SDLoc(N), Tys, N->getOperand(0), Res);
32564         Chain = Res.getValue(1);
32565       } else
32566         Res = DAG.getNode(Opc, SDLoc(N), VecVT, Res);
32567       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
32568       Results.push_back(Res);
32569       if (IsStrict)
32570         Results.push_back(Chain);
32571       return;
32572     }
32573 
32574     if (VT == MVT::i128 && Subtarget.isTargetWin64()) {
32575       SDValue Chain;
32576       SDValue V = LowerWin64_FP_TO_INT128(SDValue(N, 0), DAG, Chain);
32577       Results.push_back(V);
32578       if (IsStrict)
32579         Results.push_back(Chain);
32580       return;
32581     }
32582 
32583     if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
32584       Results.push_back(V);
32585       if (IsStrict)
32586         Results.push_back(Chain);
32587     }
32588     return;
32589   }
32590   case ISD::LRINT:
32591   case ISD::LLRINT: {
32592     if (SDValue V = LRINT_LLRINTHelper(N, DAG))
32593       Results.push_back(V);
32594     return;
32595   }
32596 
32597   case ISD::SINT_TO_FP:
32598   case ISD::STRICT_SINT_TO_FP:
32599   case ISD::UINT_TO_FP:
32600   case ISD::STRICT_UINT_TO_FP: {
32601     bool IsStrict = N->isStrictFPOpcode();
32602     bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
32603                     N->getOpcode() == ISD::STRICT_SINT_TO_FP;
32604     EVT VT = N->getValueType(0);
32605     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32606     if (VT.getVectorElementType() == MVT::f16 && Subtarget.hasFP16() &&
32607         Subtarget.hasVLX()) {
32608       if (Src.getValueType().getVectorElementType() == MVT::i16)
32609         return;
32610 
32611       if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2i32)
32612         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
32613                           IsStrict ? DAG.getConstant(0, dl, MVT::v2i32)
32614                                    : DAG.getUNDEF(MVT::v2i32));
32615       if (IsStrict) {
32616         unsigned Opc =
32617             IsSigned ? X86ISD::STRICT_CVTSI2P : X86ISD::STRICT_CVTUI2P;
32618         SDValue Res = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
32619                                   {N->getOperand(0), Src});
32620         Results.push_back(Res);
32621         Results.push_back(Res.getValue(1));
32622       } else {
32623         unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
32624         Results.push_back(DAG.getNode(Opc, dl, MVT::v8f16, Src));
32625       }
32626       return;
32627     }
32628     if (VT != MVT::v2f32)
32629       return;
32630     EVT SrcVT = Src.getValueType();
32631     if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
32632       if (IsStrict) {
32633         unsigned Opc = IsSigned ? X86ISD::STRICT_CVTSI2P
32634                                 : X86ISD::STRICT_CVTUI2P;
32635         SDValue Res = DAG.getNode(Opc, dl, {MVT::v4f32, MVT::Other},
32636                                   {N->getOperand(0), Src});
32637         Results.push_back(Res);
32638         Results.push_back(Res.getValue(1));
32639       } else {
32640         unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
32641         Results.push_back(DAG.getNode(Opc, dl, MVT::v4f32, Src));
32642       }
32643       return;
32644     }
32645     if (SrcVT == MVT::v2i64 && !IsSigned && Subtarget.is64Bit() &&
32646         Subtarget.hasSSE41() && !Subtarget.hasAVX512()) {
32647       SDValue Zero = DAG.getConstant(0, dl, SrcVT);
32648       SDValue One  = DAG.getConstant(1, dl, SrcVT);
32649       SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT,
32650                                  DAG.getNode(ISD::SRL, dl, SrcVT, Src, One),
32651                                  DAG.getNode(ISD::AND, dl, SrcVT, Src, One));
32652       SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i64, Src, Zero, ISD::SETLT);
32653       SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src);
32654       SmallVector<SDValue, 4> SignCvts(4, DAG.getConstantFP(0.0, dl, MVT::f32));
32655       for (int i = 0; i != 2; ++i) {
32656         SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
32657                                   SignSrc, DAG.getIntPtrConstant(i, dl));
32658         if (IsStrict)
32659           SignCvts[i] =
32660               DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {MVT::f32, MVT::Other},
32661                           {N->getOperand(0), Elt});
32662         else
32663           SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Elt);
32664       };
32665       SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts);
32666       SDValue Slow, Chain;
32667       if (IsStrict) {
32668         Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
32669                             SignCvts[0].getValue(1), SignCvts[1].getValue(1));
32670         Slow = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v4f32, MVT::Other},
32671                            {Chain, SignCvt, SignCvt});
32672         Chain = Slow.getValue(1);
32673       } else {
32674         Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt);
32675       }
32676       IsNeg = DAG.getBitcast(MVT::v4i32, IsNeg);
32677       IsNeg =
32678           DAG.getVectorShuffle(MVT::v4i32, dl, IsNeg, IsNeg, {1, 3, -1, -1});
32679       SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt);
32680       Results.push_back(Cvt);
32681       if (IsStrict)
32682         Results.push_back(Chain);
32683       return;
32684     }
32685 
32686     if (SrcVT != MVT::v2i32)
32687       return;
32688 
32689     if (IsSigned || Subtarget.hasAVX512()) {
32690       if (!IsStrict)
32691         return;
32692 
32693       // Custom widen strict v2i32->v2f32 to avoid scalarization.
32694       // FIXME: Should generic type legalizer do this?
32695       Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
32696                         DAG.getConstant(0, dl, MVT::v2i32));
32697       SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
32698                                 {N->getOperand(0), Src});
32699       Results.push_back(Res);
32700       Results.push_back(Res.getValue(1));
32701       return;
32702     }
32703 
32704     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32705     SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
32706     SDValue VBias = DAG.getConstantFP(
32707         llvm::bit_cast<double>(0x4330000000000000ULL), dl, MVT::v2f64);
32708     SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
32709                              DAG.getBitcast(MVT::v2i64, VBias));
32710     Or = DAG.getBitcast(MVT::v2f64, Or);
32711     if (IsStrict) {
32712       SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
32713                                 {N->getOperand(0), Or, VBias});
32714       SDValue Res = DAG.getNode(X86ISD::STRICT_VFPROUND, dl,
32715                                 {MVT::v4f32, MVT::Other},
32716                                 {Sub.getValue(1), Sub});
32717       Results.push_back(Res);
32718       Results.push_back(Res.getValue(1));
32719     } else {
32720       // TODO: Are there any fast-math-flags to propagate here?
32721       SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
32722       Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
32723     }
32724     return;
32725   }
32726   case ISD::STRICT_FP_ROUND:
32727   case ISD::FP_ROUND: {
32728     bool IsStrict = N->isStrictFPOpcode();
32729     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
32730     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32731     SDValue Rnd = N->getOperand(IsStrict ? 2 : 1);
32732     EVT SrcVT = Src.getValueType();
32733     EVT VT = N->getValueType(0);
32734     SDValue V;
32735     if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2f32) {
32736       SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f32)
32737                              : DAG.getUNDEF(MVT::v2f32);
32738       Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src, Ext);
32739     }
32740     if (!Subtarget.hasFP16() && VT.getVectorElementType() == MVT::f16) {
32741       assert(Subtarget.hasF16C() && "Cannot widen f16 without F16C");
32742       if (SrcVT.getVectorElementType() != MVT::f32)
32743         return;
32744 
32745       if (IsStrict)
32746         V = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
32747                         {Chain, Src, Rnd});
32748       else
32749         V = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Src, Rnd);
32750 
32751       Results.push_back(DAG.getBitcast(MVT::v8f16, V));
32752       if (IsStrict)
32753         Results.push_back(V.getValue(1));
32754       return;
32755     }
32756     if (!isTypeLegal(Src.getValueType()))
32757       return;
32758     EVT NewVT = VT.getVectorElementType() == MVT::f16 ? MVT::v8f16 : MVT::v4f32;
32759     if (IsStrict)
32760       V = DAG.getNode(X86ISD::STRICT_VFPROUND, dl, {NewVT, MVT::Other},
32761                       {Chain, Src});
32762     else
32763       V = DAG.getNode(X86ISD::VFPROUND, dl, NewVT, Src);
32764     Results.push_back(V);
32765     if (IsStrict)
32766       Results.push_back(V.getValue(1));
32767     return;
32768   }
32769   case ISD::FP_EXTEND:
32770   case ISD::STRICT_FP_EXTEND: {
32771     // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
32772     // No other ValueType for FP_EXTEND should reach this point.
32773     assert(N->getValueType(0) == MVT::v2f32 &&
32774            "Do not know how to legalize this Node");
32775     if (!Subtarget.hasFP16() || !Subtarget.hasVLX())
32776       return;
32777     bool IsStrict = N->isStrictFPOpcode();
32778     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32779     SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f16)
32780                            : DAG.getUNDEF(MVT::v2f16);
32781     SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f16, Src, Ext);
32782     if (IsStrict)
32783       V = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::v4f32, MVT::Other},
32784                       {N->getOperand(0), V});
32785     else
32786       V = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, V);
32787     Results.push_back(V);
32788     if (IsStrict)
32789       Results.push_back(V.getValue(1));
32790     return;
32791   }
32792   case ISD::INTRINSIC_W_CHAIN: {
32793     unsigned IntNo = N->getConstantOperandVal(1);
32794     switch (IntNo) {
32795     default : llvm_unreachable("Do not know how to custom type "
32796                                "legalize this intrinsic operation!");
32797     case Intrinsic::x86_rdtsc:
32798       return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
32799                                      Results);
32800     case Intrinsic::x86_rdtscp:
32801       return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
32802                                      Results);
32803     case Intrinsic::x86_rdpmc:
32804       expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
32805                                   Results);
32806       return;
32807     case Intrinsic::x86_rdpru:
32808       expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPRU, X86::ECX, Subtarget,
32809         Results);
32810       return;
32811     case Intrinsic::x86_xgetbv:
32812       expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
32813                                   Results);
32814       return;
32815     }
32816   }
32817   case ISD::READCYCLECOUNTER: {
32818     return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
32819   }
32820   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
32821     EVT T = N->getValueType(0);
32822     assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
32823     bool Regs64bit = T == MVT::i128;
32824     assert((!Regs64bit || Subtarget.canUseCMPXCHG16B()) &&
32825            "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
32826     MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
32827     SDValue cpInL, cpInH;
32828     std::tie(cpInL, cpInH) =
32829         DAG.SplitScalar(N->getOperand(2), dl, HalfT, HalfT);
32830     cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
32831                              Regs64bit ? X86::RAX : X86::EAX, cpInL, SDValue());
32832     cpInH =
32833         DAG.getCopyToReg(cpInL.getValue(0), dl, Regs64bit ? X86::RDX : X86::EDX,
32834                          cpInH, cpInL.getValue(1));
32835     SDValue swapInL, swapInH;
32836     std::tie(swapInL, swapInH) =
32837         DAG.SplitScalar(N->getOperand(3), dl, HalfT, HalfT);
32838     swapInH =
32839         DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
32840                          swapInH, cpInH.getValue(1));
32841 
32842     // In 64-bit mode we might need the base pointer in RBX, but we can't know
32843     // until later. So we keep the RBX input in a vreg and use a custom
32844     // inserter.
32845     // Since RBX will be a reserved register the register allocator will not
32846     // make sure its value will be properly saved and restored around this
32847     // live-range.
32848     SDValue Result;
32849     SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
32850     MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
32851     if (Regs64bit) {
32852       SDValue Ops[] = {swapInH.getValue(0), N->getOperand(1), swapInL,
32853                        swapInH.getValue(1)};
32854       Result =
32855           DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG16_DAG, dl, Tys, Ops, T, MMO);
32856     } else {
32857       swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl, X86::EBX, swapInL,
32858                                  swapInH.getValue(1));
32859       SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
32860                        swapInL.getValue(1)};
32861       Result =
32862           DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, Ops, T, MMO);
32863     }
32864 
32865     SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
32866                                         Regs64bit ? X86::RAX : X86::EAX,
32867                                         HalfT, Result.getValue(1));
32868     SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
32869                                         Regs64bit ? X86::RDX : X86::EDX,
32870                                         HalfT, cpOutL.getValue(2));
32871     SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
32872 
32873     SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
32874                                         MVT::i32, cpOutH.getValue(2));
32875     SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
32876     Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
32877 
32878     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
32879     Results.push_back(Success);
32880     Results.push_back(EFLAGS.getValue(1));
32881     return;
32882   }
32883   case ISD::ATOMIC_LOAD: {
32884     assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
32885     bool NoImplicitFloatOps =
32886         DAG.getMachineFunction().getFunction().hasFnAttribute(
32887             Attribute::NoImplicitFloat);
32888     if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
32889       auto *Node = cast<AtomicSDNode>(N);
32890       if (Subtarget.hasSSE1()) {
32891         // Use a VZEXT_LOAD which will be selected as MOVQ or XORPS+MOVLPS.
32892         // Then extract the lower 64-bits.
32893         MVT LdVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
32894         SDVTList Tys = DAG.getVTList(LdVT, MVT::Other);
32895         SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
32896         SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
32897                                              MVT::i64, Node->getMemOperand());
32898         if (Subtarget.hasSSE2()) {
32899           SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
32900                                     DAG.getIntPtrConstant(0, dl));
32901           Results.push_back(Res);
32902           Results.push_back(Ld.getValue(1));
32903           return;
32904         }
32905         // We use an alternative sequence for SSE1 that extracts as v2f32 and
32906         // then casts to i64. This avoids a 128-bit stack temporary being
32907         // created by type legalization if we were to cast v4f32->v2i64.
32908         SDValue Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Ld,
32909                                   DAG.getIntPtrConstant(0, dl));
32910         Res = DAG.getBitcast(MVT::i64, Res);
32911         Results.push_back(Res);
32912         Results.push_back(Ld.getValue(1));
32913         return;
32914       }
32915       if (Subtarget.hasX87()) {
32916         // First load this into an 80-bit X87 register. This will put the whole
32917         // integer into the significand.
32918         SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
32919         SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
32920         SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD,
32921                                                  dl, Tys, Ops, MVT::i64,
32922                                                  Node->getMemOperand());
32923         SDValue Chain = Result.getValue(1);
32924 
32925         // Now store the X87 register to a stack temporary and convert to i64.
32926         // This store is not atomic and doesn't need to be.
32927         // FIXME: We don't need a stack temporary if the result of the load
32928         // is already being stored. We could just directly store there.
32929         SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
32930         int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
32931         MachinePointerInfo MPI =
32932             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
32933         SDValue StoreOps[] = { Chain, Result, StackPtr };
32934         Chain = DAG.getMemIntrinsicNode(
32935             X86ISD::FIST, dl, DAG.getVTList(MVT::Other), StoreOps, MVT::i64,
32936             MPI, std::nullopt /*Align*/, MachineMemOperand::MOStore);
32937 
32938         // Finally load the value back from the stack temporary and return it.
32939         // This load is not atomic and doesn't need to be.
32940         // This load will be further type legalized.
32941         Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
32942         Results.push_back(Result);
32943         Results.push_back(Result.getValue(1));
32944         return;
32945       }
32946     }
32947     // TODO: Use MOVLPS when SSE1 is available?
32948     // Delegate to generic TypeLegalization. Situations we can really handle
32949     // should have already been dealt with by AtomicExpandPass.cpp.
32950     break;
32951   }
32952   case ISD::ATOMIC_SWAP:
32953   case ISD::ATOMIC_LOAD_ADD:
32954   case ISD::ATOMIC_LOAD_SUB:
32955   case ISD::ATOMIC_LOAD_AND:
32956   case ISD::ATOMIC_LOAD_OR:
32957   case ISD::ATOMIC_LOAD_XOR:
32958   case ISD::ATOMIC_LOAD_NAND:
32959   case ISD::ATOMIC_LOAD_MIN:
32960   case ISD::ATOMIC_LOAD_MAX:
32961   case ISD::ATOMIC_LOAD_UMIN:
32962   case ISD::ATOMIC_LOAD_UMAX:
32963     // Delegate to generic TypeLegalization. Situations we can really handle
32964     // should have already been dealt with by AtomicExpandPass.cpp.
32965     break;
32966 
32967   case ISD::BITCAST: {
32968     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32969     EVT DstVT = N->getValueType(0);
32970     EVT SrcVT = N->getOperand(0).getValueType();
32971 
32972     // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
32973     // we can split using the k-register rather than memory.
32974     if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
32975       assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
32976       SDValue Lo, Hi;
32977       std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
32978       Lo = DAG.getBitcast(MVT::i32, Lo);
32979       Hi = DAG.getBitcast(MVT::i32, Hi);
32980       SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
32981       Results.push_back(Res);
32982       return;
32983     }
32984 
32985     if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
32986       // FIXME: Use v4f32 for SSE1?
32987       assert(Subtarget.hasSSE2() && "Requires SSE2");
32988       assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
32989              "Unexpected type action!");
32990       EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
32991       SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64,
32992                                 N->getOperand(0));
32993       Res = DAG.getBitcast(WideVT, Res);
32994       Results.push_back(Res);
32995       return;
32996     }
32997 
32998     return;
32999   }
33000   case ISD::MGATHER: {
33001     EVT VT = N->getValueType(0);
33002     if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
33003         (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
33004       auto *Gather = cast<MaskedGatherSDNode>(N);
33005       SDValue Index = Gather->getIndex();
33006       if (Index.getValueType() != MVT::v2i64)
33007         return;
33008       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33009              "Unexpected type action!");
33010       EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
33011       SDValue Mask = Gather->getMask();
33012       assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
33013       SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
33014                                      Gather->getPassThru(),
33015                                      DAG.getUNDEF(VT));
33016       if (!Subtarget.hasVLX()) {
33017         // We need to widen the mask, but the instruction will only use 2
33018         // of its elements. So we can use undef.
33019         Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
33020                            DAG.getUNDEF(MVT::v2i1));
33021         Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
33022       }
33023       SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
33024                         Gather->getBasePtr(), Index, Gather->getScale() };
33025       SDValue Res = DAG.getMemIntrinsicNode(
33026           X86ISD::MGATHER, dl, DAG.getVTList(WideVT, MVT::Other), Ops,
33027           Gather->getMemoryVT(), Gather->getMemOperand());
33028       Results.push_back(Res);
33029       Results.push_back(Res.getValue(1));
33030       return;
33031     }
33032     return;
33033   }
33034   case ISD::LOAD: {
33035     // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
33036     // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
33037     // cast since type legalization will try to use an i64 load.
33038     MVT VT = N->getSimpleValueType(0);
33039     assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
33040     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33041            "Unexpected type action!");
33042     if (!ISD::isNON_EXTLoad(N))
33043       return;
33044     auto *Ld = cast<LoadSDNode>(N);
33045     if (Subtarget.hasSSE2()) {
33046       MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
33047       SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
33048                                 Ld->getPointerInfo(), Ld->getOriginalAlign(),
33049                                 Ld->getMemOperand()->getFlags());
33050       SDValue Chain = Res.getValue(1);
33051       MVT VecVT = MVT::getVectorVT(LdVT, 2);
33052       Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
33053       EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
33054       Res = DAG.getBitcast(WideVT, Res);
33055       Results.push_back(Res);
33056       Results.push_back(Chain);
33057       return;
33058     }
33059     assert(Subtarget.hasSSE1() && "Expected SSE");
33060     SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
33061     SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
33062     SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
33063                                           MVT::i64, Ld->getMemOperand());
33064     Results.push_back(Res);
33065     Results.push_back(Res.getValue(1));
33066     return;
33067   }
33068   case ISD::ADDRSPACECAST: {
33069     SDValue V = LowerADDRSPACECAST(SDValue(N,0), DAG);
33070     Results.push_back(V);
33071     return;
33072   }
33073   case ISD::BITREVERSE: {
33074     assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
33075     assert(Subtarget.hasXOP() && "Expected XOP");
33076     // We can use VPPERM by copying to a vector register and back. We'll need
33077     // to move the scalar in two i32 pieces.
33078     Results.push_back(LowerBITREVERSE(SDValue(N, 0), Subtarget, DAG));
33079     return;
33080   }
33081   case ISD::EXTRACT_VECTOR_ELT: {
33082     // f16 = extract vXf16 %vec, i64 %idx
33083     assert(N->getSimpleValueType(0) == MVT::f16 &&
33084            "Unexpected Value type of EXTRACT_VECTOR_ELT!");
33085     assert(Subtarget.hasFP16() && "Expected FP16");
33086     SDValue VecOp = N->getOperand(0);
33087     EVT ExtVT = VecOp.getValueType().changeVectorElementTypeToInteger();
33088     SDValue Split = DAG.getBitcast(ExtVT, N->getOperand(0));
33089     Split = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Split,
33090                         N->getOperand(1));
33091     Split = DAG.getBitcast(MVT::f16, Split);
33092     Results.push_back(Split);
33093     return;
33094   }
33095   }
33096 }
33097 
33098 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
33099   switch ((X86ISD::NodeType)Opcode) {
33100   case X86ISD::FIRST_NUMBER:       break;
33101 #define NODE_NAME_CASE(NODE) case X86ISD::NODE: return "X86ISD::" #NODE;
33102   NODE_NAME_CASE(BSF)
33103   NODE_NAME_CASE(BSR)
33104   NODE_NAME_CASE(FSHL)
33105   NODE_NAME_CASE(FSHR)
33106   NODE_NAME_CASE(FAND)
33107   NODE_NAME_CASE(FANDN)
33108   NODE_NAME_CASE(FOR)
33109   NODE_NAME_CASE(FXOR)
33110   NODE_NAME_CASE(FILD)
33111   NODE_NAME_CASE(FIST)
33112   NODE_NAME_CASE(FP_TO_INT_IN_MEM)
33113   NODE_NAME_CASE(FLD)
33114   NODE_NAME_CASE(FST)
33115   NODE_NAME_CASE(CALL)
33116   NODE_NAME_CASE(CALL_RVMARKER)
33117   NODE_NAME_CASE(BT)
33118   NODE_NAME_CASE(CMP)
33119   NODE_NAME_CASE(FCMP)
33120   NODE_NAME_CASE(STRICT_FCMP)
33121   NODE_NAME_CASE(STRICT_FCMPS)
33122   NODE_NAME_CASE(COMI)
33123   NODE_NAME_CASE(UCOMI)
33124   NODE_NAME_CASE(CMPM)
33125   NODE_NAME_CASE(CMPMM)
33126   NODE_NAME_CASE(STRICT_CMPM)
33127   NODE_NAME_CASE(CMPMM_SAE)
33128   NODE_NAME_CASE(SETCC)
33129   NODE_NAME_CASE(SETCC_CARRY)
33130   NODE_NAME_CASE(FSETCC)
33131   NODE_NAME_CASE(FSETCCM)
33132   NODE_NAME_CASE(FSETCCM_SAE)
33133   NODE_NAME_CASE(CMOV)
33134   NODE_NAME_CASE(BRCOND)
33135   NODE_NAME_CASE(RET_GLUE)
33136   NODE_NAME_CASE(IRET)
33137   NODE_NAME_CASE(REP_STOS)
33138   NODE_NAME_CASE(REP_MOVS)
33139   NODE_NAME_CASE(GlobalBaseReg)
33140   NODE_NAME_CASE(Wrapper)
33141   NODE_NAME_CASE(WrapperRIP)
33142   NODE_NAME_CASE(MOVQ2DQ)
33143   NODE_NAME_CASE(MOVDQ2Q)
33144   NODE_NAME_CASE(MMX_MOVD2W)
33145   NODE_NAME_CASE(MMX_MOVW2D)
33146   NODE_NAME_CASE(PEXTRB)
33147   NODE_NAME_CASE(PEXTRW)
33148   NODE_NAME_CASE(INSERTPS)
33149   NODE_NAME_CASE(PINSRB)
33150   NODE_NAME_CASE(PINSRW)
33151   NODE_NAME_CASE(PSHUFB)
33152   NODE_NAME_CASE(ANDNP)
33153   NODE_NAME_CASE(BLENDI)
33154   NODE_NAME_CASE(BLENDV)
33155   NODE_NAME_CASE(HADD)
33156   NODE_NAME_CASE(HSUB)
33157   NODE_NAME_CASE(FHADD)
33158   NODE_NAME_CASE(FHSUB)
33159   NODE_NAME_CASE(CONFLICT)
33160   NODE_NAME_CASE(FMAX)
33161   NODE_NAME_CASE(FMAXS)
33162   NODE_NAME_CASE(FMAX_SAE)
33163   NODE_NAME_CASE(FMAXS_SAE)
33164   NODE_NAME_CASE(FMIN)
33165   NODE_NAME_CASE(FMINS)
33166   NODE_NAME_CASE(FMIN_SAE)
33167   NODE_NAME_CASE(FMINS_SAE)
33168   NODE_NAME_CASE(FMAXC)
33169   NODE_NAME_CASE(FMINC)
33170   NODE_NAME_CASE(FRSQRT)
33171   NODE_NAME_CASE(FRCP)
33172   NODE_NAME_CASE(EXTRQI)
33173   NODE_NAME_CASE(INSERTQI)
33174   NODE_NAME_CASE(TLSADDR)
33175   NODE_NAME_CASE(TLSBASEADDR)
33176   NODE_NAME_CASE(TLSCALL)
33177   NODE_NAME_CASE(EH_SJLJ_SETJMP)
33178   NODE_NAME_CASE(EH_SJLJ_LONGJMP)
33179   NODE_NAME_CASE(EH_SJLJ_SETUP_DISPATCH)
33180   NODE_NAME_CASE(EH_RETURN)
33181   NODE_NAME_CASE(TC_RETURN)
33182   NODE_NAME_CASE(FNSTCW16m)
33183   NODE_NAME_CASE(FLDCW16m)
33184   NODE_NAME_CASE(FNSTENVm)
33185   NODE_NAME_CASE(FLDENVm)
33186   NODE_NAME_CASE(LCMPXCHG_DAG)
33187   NODE_NAME_CASE(LCMPXCHG8_DAG)
33188   NODE_NAME_CASE(LCMPXCHG16_DAG)
33189   NODE_NAME_CASE(LCMPXCHG16_SAVE_RBX_DAG)
33190   NODE_NAME_CASE(LADD)
33191   NODE_NAME_CASE(LSUB)
33192   NODE_NAME_CASE(LOR)
33193   NODE_NAME_CASE(LXOR)
33194   NODE_NAME_CASE(LAND)
33195   NODE_NAME_CASE(LBTS)
33196   NODE_NAME_CASE(LBTC)
33197   NODE_NAME_CASE(LBTR)
33198   NODE_NAME_CASE(LBTS_RM)
33199   NODE_NAME_CASE(LBTC_RM)
33200   NODE_NAME_CASE(LBTR_RM)
33201   NODE_NAME_CASE(AADD)
33202   NODE_NAME_CASE(AOR)
33203   NODE_NAME_CASE(AXOR)
33204   NODE_NAME_CASE(AAND)
33205   NODE_NAME_CASE(VZEXT_MOVL)
33206   NODE_NAME_CASE(VZEXT_LOAD)
33207   NODE_NAME_CASE(VEXTRACT_STORE)
33208   NODE_NAME_CASE(VTRUNC)
33209   NODE_NAME_CASE(VTRUNCS)
33210   NODE_NAME_CASE(VTRUNCUS)
33211   NODE_NAME_CASE(VMTRUNC)
33212   NODE_NAME_CASE(VMTRUNCS)
33213   NODE_NAME_CASE(VMTRUNCUS)
33214   NODE_NAME_CASE(VTRUNCSTORES)
33215   NODE_NAME_CASE(VTRUNCSTOREUS)
33216   NODE_NAME_CASE(VMTRUNCSTORES)
33217   NODE_NAME_CASE(VMTRUNCSTOREUS)
33218   NODE_NAME_CASE(VFPEXT)
33219   NODE_NAME_CASE(STRICT_VFPEXT)
33220   NODE_NAME_CASE(VFPEXT_SAE)
33221   NODE_NAME_CASE(VFPEXTS)
33222   NODE_NAME_CASE(VFPEXTS_SAE)
33223   NODE_NAME_CASE(VFPROUND)
33224   NODE_NAME_CASE(STRICT_VFPROUND)
33225   NODE_NAME_CASE(VMFPROUND)
33226   NODE_NAME_CASE(VFPROUND_RND)
33227   NODE_NAME_CASE(VFPROUNDS)
33228   NODE_NAME_CASE(VFPROUNDS_RND)
33229   NODE_NAME_CASE(VSHLDQ)
33230   NODE_NAME_CASE(VSRLDQ)
33231   NODE_NAME_CASE(VSHL)
33232   NODE_NAME_CASE(VSRL)
33233   NODE_NAME_CASE(VSRA)
33234   NODE_NAME_CASE(VSHLI)
33235   NODE_NAME_CASE(VSRLI)
33236   NODE_NAME_CASE(VSRAI)
33237   NODE_NAME_CASE(VSHLV)
33238   NODE_NAME_CASE(VSRLV)
33239   NODE_NAME_CASE(VSRAV)
33240   NODE_NAME_CASE(VROTLI)
33241   NODE_NAME_CASE(VROTRI)
33242   NODE_NAME_CASE(VPPERM)
33243   NODE_NAME_CASE(CMPP)
33244   NODE_NAME_CASE(STRICT_CMPP)
33245   NODE_NAME_CASE(PCMPEQ)
33246   NODE_NAME_CASE(PCMPGT)
33247   NODE_NAME_CASE(PHMINPOS)
33248   NODE_NAME_CASE(ADD)
33249   NODE_NAME_CASE(SUB)
33250   NODE_NAME_CASE(ADC)
33251   NODE_NAME_CASE(SBB)
33252   NODE_NAME_CASE(SMUL)
33253   NODE_NAME_CASE(UMUL)
33254   NODE_NAME_CASE(OR)
33255   NODE_NAME_CASE(XOR)
33256   NODE_NAME_CASE(AND)
33257   NODE_NAME_CASE(BEXTR)
33258   NODE_NAME_CASE(BEXTRI)
33259   NODE_NAME_CASE(BZHI)
33260   NODE_NAME_CASE(PDEP)
33261   NODE_NAME_CASE(PEXT)
33262   NODE_NAME_CASE(MUL_IMM)
33263   NODE_NAME_CASE(MOVMSK)
33264   NODE_NAME_CASE(PTEST)
33265   NODE_NAME_CASE(TESTP)
33266   NODE_NAME_CASE(KORTEST)
33267   NODE_NAME_CASE(KTEST)
33268   NODE_NAME_CASE(KADD)
33269   NODE_NAME_CASE(KSHIFTL)
33270   NODE_NAME_CASE(KSHIFTR)
33271   NODE_NAME_CASE(PACKSS)
33272   NODE_NAME_CASE(PACKUS)
33273   NODE_NAME_CASE(PALIGNR)
33274   NODE_NAME_CASE(VALIGN)
33275   NODE_NAME_CASE(VSHLD)
33276   NODE_NAME_CASE(VSHRD)
33277   NODE_NAME_CASE(VSHLDV)
33278   NODE_NAME_CASE(VSHRDV)
33279   NODE_NAME_CASE(PSHUFD)
33280   NODE_NAME_CASE(PSHUFHW)
33281   NODE_NAME_CASE(PSHUFLW)
33282   NODE_NAME_CASE(SHUFP)
33283   NODE_NAME_CASE(SHUF128)
33284   NODE_NAME_CASE(MOVLHPS)
33285   NODE_NAME_CASE(MOVHLPS)
33286   NODE_NAME_CASE(MOVDDUP)
33287   NODE_NAME_CASE(MOVSHDUP)
33288   NODE_NAME_CASE(MOVSLDUP)
33289   NODE_NAME_CASE(MOVSD)
33290   NODE_NAME_CASE(MOVSS)
33291   NODE_NAME_CASE(MOVSH)
33292   NODE_NAME_CASE(UNPCKL)
33293   NODE_NAME_CASE(UNPCKH)
33294   NODE_NAME_CASE(VBROADCAST)
33295   NODE_NAME_CASE(VBROADCAST_LOAD)
33296   NODE_NAME_CASE(VBROADCASTM)
33297   NODE_NAME_CASE(SUBV_BROADCAST_LOAD)
33298   NODE_NAME_CASE(VPERMILPV)
33299   NODE_NAME_CASE(VPERMILPI)
33300   NODE_NAME_CASE(VPERM2X128)
33301   NODE_NAME_CASE(VPERMV)
33302   NODE_NAME_CASE(VPERMV3)
33303   NODE_NAME_CASE(VPERMI)
33304   NODE_NAME_CASE(VPTERNLOG)
33305   NODE_NAME_CASE(VFIXUPIMM)
33306   NODE_NAME_CASE(VFIXUPIMM_SAE)
33307   NODE_NAME_CASE(VFIXUPIMMS)
33308   NODE_NAME_CASE(VFIXUPIMMS_SAE)
33309   NODE_NAME_CASE(VRANGE)
33310   NODE_NAME_CASE(VRANGE_SAE)
33311   NODE_NAME_CASE(VRANGES)
33312   NODE_NAME_CASE(VRANGES_SAE)
33313   NODE_NAME_CASE(PMULUDQ)
33314   NODE_NAME_CASE(PMULDQ)
33315   NODE_NAME_CASE(PSADBW)
33316   NODE_NAME_CASE(DBPSADBW)
33317   NODE_NAME_CASE(VASTART_SAVE_XMM_REGS)
33318   NODE_NAME_CASE(VAARG_64)
33319   NODE_NAME_CASE(VAARG_X32)
33320   NODE_NAME_CASE(DYN_ALLOCA)
33321   NODE_NAME_CASE(MFENCE)
33322   NODE_NAME_CASE(SEG_ALLOCA)
33323   NODE_NAME_CASE(PROBED_ALLOCA)
33324   NODE_NAME_CASE(RDRAND)
33325   NODE_NAME_CASE(RDSEED)
33326   NODE_NAME_CASE(RDPKRU)
33327   NODE_NAME_CASE(WRPKRU)
33328   NODE_NAME_CASE(VPMADDUBSW)
33329   NODE_NAME_CASE(VPMADDWD)
33330   NODE_NAME_CASE(VPSHA)
33331   NODE_NAME_CASE(VPSHL)
33332   NODE_NAME_CASE(VPCOM)
33333   NODE_NAME_CASE(VPCOMU)
33334   NODE_NAME_CASE(VPERMIL2)
33335   NODE_NAME_CASE(FMSUB)
33336   NODE_NAME_CASE(STRICT_FMSUB)
33337   NODE_NAME_CASE(FNMADD)
33338   NODE_NAME_CASE(STRICT_FNMADD)
33339   NODE_NAME_CASE(FNMSUB)
33340   NODE_NAME_CASE(STRICT_FNMSUB)
33341   NODE_NAME_CASE(FMADDSUB)
33342   NODE_NAME_CASE(FMSUBADD)
33343   NODE_NAME_CASE(FMADD_RND)
33344   NODE_NAME_CASE(FNMADD_RND)
33345   NODE_NAME_CASE(FMSUB_RND)
33346   NODE_NAME_CASE(FNMSUB_RND)
33347   NODE_NAME_CASE(FMADDSUB_RND)
33348   NODE_NAME_CASE(FMSUBADD_RND)
33349   NODE_NAME_CASE(VFMADDC)
33350   NODE_NAME_CASE(VFMADDC_RND)
33351   NODE_NAME_CASE(VFCMADDC)
33352   NODE_NAME_CASE(VFCMADDC_RND)
33353   NODE_NAME_CASE(VFMULC)
33354   NODE_NAME_CASE(VFMULC_RND)
33355   NODE_NAME_CASE(VFCMULC)
33356   NODE_NAME_CASE(VFCMULC_RND)
33357   NODE_NAME_CASE(VFMULCSH)
33358   NODE_NAME_CASE(VFMULCSH_RND)
33359   NODE_NAME_CASE(VFCMULCSH)
33360   NODE_NAME_CASE(VFCMULCSH_RND)
33361   NODE_NAME_CASE(VFMADDCSH)
33362   NODE_NAME_CASE(VFMADDCSH_RND)
33363   NODE_NAME_CASE(VFCMADDCSH)
33364   NODE_NAME_CASE(VFCMADDCSH_RND)
33365   NODE_NAME_CASE(VPMADD52H)
33366   NODE_NAME_CASE(VPMADD52L)
33367   NODE_NAME_CASE(VRNDSCALE)
33368   NODE_NAME_CASE(STRICT_VRNDSCALE)
33369   NODE_NAME_CASE(VRNDSCALE_SAE)
33370   NODE_NAME_CASE(VRNDSCALES)
33371   NODE_NAME_CASE(VRNDSCALES_SAE)
33372   NODE_NAME_CASE(VREDUCE)
33373   NODE_NAME_CASE(VREDUCE_SAE)
33374   NODE_NAME_CASE(VREDUCES)
33375   NODE_NAME_CASE(VREDUCES_SAE)
33376   NODE_NAME_CASE(VGETMANT)
33377   NODE_NAME_CASE(VGETMANT_SAE)
33378   NODE_NAME_CASE(VGETMANTS)
33379   NODE_NAME_CASE(VGETMANTS_SAE)
33380   NODE_NAME_CASE(PCMPESTR)
33381   NODE_NAME_CASE(PCMPISTR)
33382   NODE_NAME_CASE(XTEST)
33383   NODE_NAME_CASE(COMPRESS)
33384   NODE_NAME_CASE(EXPAND)
33385   NODE_NAME_CASE(SELECTS)
33386   NODE_NAME_CASE(ADDSUB)
33387   NODE_NAME_CASE(RCP14)
33388   NODE_NAME_CASE(RCP14S)
33389   NODE_NAME_CASE(RCP28)
33390   NODE_NAME_CASE(RCP28_SAE)
33391   NODE_NAME_CASE(RCP28S)
33392   NODE_NAME_CASE(RCP28S_SAE)
33393   NODE_NAME_CASE(EXP2)
33394   NODE_NAME_CASE(EXP2_SAE)
33395   NODE_NAME_CASE(RSQRT14)
33396   NODE_NAME_CASE(RSQRT14S)
33397   NODE_NAME_CASE(RSQRT28)
33398   NODE_NAME_CASE(RSQRT28_SAE)
33399   NODE_NAME_CASE(RSQRT28S)
33400   NODE_NAME_CASE(RSQRT28S_SAE)
33401   NODE_NAME_CASE(FADD_RND)
33402   NODE_NAME_CASE(FADDS)
33403   NODE_NAME_CASE(FADDS_RND)
33404   NODE_NAME_CASE(FSUB_RND)
33405   NODE_NAME_CASE(FSUBS)
33406   NODE_NAME_CASE(FSUBS_RND)
33407   NODE_NAME_CASE(FMUL_RND)
33408   NODE_NAME_CASE(FMULS)
33409   NODE_NAME_CASE(FMULS_RND)
33410   NODE_NAME_CASE(FDIV_RND)
33411   NODE_NAME_CASE(FDIVS)
33412   NODE_NAME_CASE(FDIVS_RND)
33413   NODE_NAME_CASE(FSQRT_RND)
33414   NODE_NAME_CASE(FSQRTS)
33415   NODE_NAME_CASE(FSQRTS_RND)
33416   NODE_NAME_CASE(FGETEXP)
33417   NODE_NAME_CASE(FGETEXP_SAE)
33418   NODE_NAME_CASE(FGETEXPS)
33419   NODE_NAME_CASE(FGETEXPS_SAE)
33420   NODE_NAME_CASE(SCALEF)
33421   NODE_NAME_CASE(SCALEF_RND)
33422   NODE_NAME_CASE(SCALEFS)
33423   NODE_NAME_CASE(SCALEFS_RND)
33424   NODE_NAME_CASE(MULHRS)
33425   NODE_NAME_CASE(SINT_TO_FP_RND)
33426   NODE_NAME_CASE(UINT_TO_FP_RND)
33427   NODE_NAME_CASE(CVTTP2SI)
33428   NODE_NAME_CASE(CVTTP2UI)
33429   NODE_NAME_CASE(STRICT_CVTTP2SI)
33430   NODE_NAME_CASE(STRICT_CVTTP2UI)
33431   NODE_NAME_CASE(MCVTTP2SI)
33432   NODE_NAME_CASE(MCVTTP2UI)
33433   NODE_NAME_CASE(CVTTP2SI_SAE)
33434   NODE_NAME_CASE(CVTTP2UI_SAE)
33435   NODE_NAME_CASE(CVTTS2SI)
33436   NODE_NAME_CASE(CVTTS2UI)
33437   NODE_NAME_CASE(CVTTS2SI_SAE)
33438   NODE_NAME_CASE(CVTTS2UI_SAE)
33439   NODE_NAME_CASE(CVTSI2P)
33440   NODE_NAME_CASE(CVTUI2P)
33441   NODE_NAME_CASE(STRICT_CVTSI2P)
33442   NODE_NAME_CASE(STRICT_CVTUI2P)
33443   NODE_NAME_CASE(MCVTSI2P)
33444   NODE_NAME_CASE(MCVTUI2P)
33445   NODE_NAME_CASE(VFPCLASS)
33446   NODE_NAME_CASE(VFPCLASSS)
33447   NODE_NAME_CASE(MULTISHIFT)
33448   NODE_NAME_CASE(SCALAR_SINT_TO_FP)
33449   NODE_NAME_CASE(SCALAR_SINT_TO_FP_RND)
33450   NODE_NAME_CASE(SCALAR_UINT_TO_FP)
33451   NODE_NAME_CASE(SCALAR_UINT_TO_FP_RND)
33452   NODE_NAME_CASE(CVTPS2PH)
33453   NODE_NAME_CASE(STRICT_CVTPS2PH)
33454   NODE_NAME_CASE(CVTPS2PH_SAE)
33455   NODE_NAME_CASE(MCVTPS2PH)
33456   NODE_NAME_CASE(MCVTPS2PH_SAE)
33457   NODE_NAME_CASE(CVTPH2PS)
33458   NODE_NAME_CASE(STRICT_CVTPH2PS)
33459   NODE_NAME_CASE(CVTPH2PS_SAE)
33460   NODE_NAME_CASE(CVTP2SI)
33461   NODE_NAME_CASE(CVTP2UI)
33462   NODE_NAME_CASE(MCVTP2SI)
33463   NODE_NAME_CASE(MCVTP2UI)
33464   NODE_NAME_CASE(CVTP2SI_RND)
33465   NODE_NAME_CASE(CVTP2UI_RND)
33466   NODE_NAME_CASE(CVTS2SI)
33467   NODE_NAME_CASE(CVTS2UI)
33468   NODE_NAME_CASE(CVTS2SI_RND)
33469   NODE_NAME_CASE(CVTS2UI_RND)
33470   NODE_NAME_CASE(CVTNE2PS2BF16)
33471   NODE_NAME_CASE(CVTNEPS2BF16)
33472   NODE_NAME_CASE(MCVTNEPS2BF16)
33473   NODE_NAME_CASE(DPBF16PS)
33474   NODE_NAME_CASE(LWPINS)
33475   NODE_NAME_CASE(MGATHER)
33476   NODE_NAME_CASE(MSCATTER)
33477   NODE_NAME_CASE(VPDPBUSD)
33478   NODE_NAME_CASE(VPDPBUSDS)
33479   NODE_NAME_CASE(VPDPWSSD)
33480   NODE_NAME_CASE(VPDPWSSDS)
33481   NODE_NAME_CASE(VPSHUFBITQMB)
33482   NODE_NAME_CASE(GF2P8MULB)
33483   NODE_NAME_CASE(GF2P8AFFINEQB)
33484   NODE_NAME_CASE(GF2P8AFFINEINVQB)
33485   NODE_NAME_CASE(NT_CALL)
33486   NODE_NAME_CASE(NT_BRIND)
33487   NODE_NAME_CASE(UMWAIT)
33488   NODE_NAME_CASE(TPAUSE)
33489   NODE_NAME_CASE(ENQCMD)
33490   NODE_NAME_CASE(ENQCMDS)
33491   NODE_NAME_CASE(VP2INTERSECT)
33492   NODE_NAME_CASE(VPDPBSUD)
33493   NODE_NAME_CASE(VPDPBSUDS)
33494   NODE_NAME_CASE(VPDPBUUD)
33495   NODE_NAME_CASE(VPDPBUUDS)
33496   NODE_NAME_CASE(VPDPBSSD)
33497   NODE_NAME_CASE(VPDPBSSDS)
33498   NODE_NAME_CASE(AESENC128KL)
33499   NODE_NAME_CASE(AESDEC128KL)
33500   NODE_NAME_CASE(AESENC256KL)
33501   NODE_NAME_CASE(AESDEC256KL)
33502   NODE_NAME_CASE(AESENCWIDE128KL)
33503   NODE_NAME_CASE(AESDECWIDE128KL)
33504   NODE_NAME_CASE(AESENCWIDE256KL)
33505   NODE_NAME_CASE(AESDECWIDE256KL)
33506   NODE_NAME_CASE(CMPCCXADD)
33507   NODE_NAME_CASE(TESTUI)
33508   NODE_NAME_CASE(FP80_ADD)
33509   NODE_NAME_CASE(STRICT_FP80_ADD)
33510   }
33511   return nullptr;
33512 #undef NODE_NAME_CASE
33513 }
33514 
33515 /// Return true if the addressing mode represented by AM is legal for this
33516 /// target, for a load/store of the specified type.
33517 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
33518                                               const AddrMode &AM, Type *Ty,
33519                                               unsigned AS,
33520                                               Instruction *I) const {
33521   // X86 supports extremely general addressing modes.
33522   CodeModel::Model M = getTargetMachine().getCodeModel();
33523 
33524   // X86 allows a sign-extended 32-bit immediate field as a displacement.
33525   if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
33526     return false;
33527 
33528   if (AM.BaseGV) {
33529     unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
33530 
33531     // If a reference to this global requires an extra load, we can't fold it.
33532     if (isGlobalStubReference(GVFlags))
33533       return false;
33534 
33535     // If BaseGV requires a register for the PIC base, we cannot also have a
33536     // BaseReg specified.
33537     if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
33538       return false;
33539 
33540     // If lower 4G is not available, then we must use rip-relative addressing.
33541     if ((M != CodeModel::Small || isPositionIndependent()) &&
33542         Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
33543       return false;
33544   }
33545 
33546   switch (AM.Scale) {
33547   case 0:
33548   case 1:
33549   case 2:
33550   case 4:
33551   case 8:
33552     // These scales always work.
33553     break;
33554   case 3:
33555   case 5:
33556   case 9:
33557     // These scales are formed with basereg+scalereg.  Only accept if there is
33558     // no basereg yet.
33559     if (AM.HasBaseReg)
33560       return false;
33561     break;
33562   default:  // Other stuff never works.
33563     return false;
33564   }
33565 
33566   return true;
33567 }
33568 
33569 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
33570   unsigned Bits = Ty->getScalarSizeInBits();
33571 
33572   // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
33573   // Splitting for v32i8/v16i16 on XOP+AVX2 targets is still preferred.
33574   if (Subtarget.hasXOP() &&
33575       (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
33576     return false;
33577 
33578   // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
33579   // shifts just as cheap as scalar ones.
33580   if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
33581     return false;
33582 
33583   // AVX512BW has shifts such as vpsllvw.
33584   if (Subtarget.hasBWI() && Bits == 16)
33585     return false;
33586 
33587   // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
33588   // fully general vector.
33589   return true;
33590 }
33591 
33592 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
33593   switch (Opcode) {
33594   // These are non-commutative binops.
33595   // TODO: Add more X86ISD opcodes once we have test coverage.
33596   case X86ISD::ANDNP:
33597   case X86ISD::PCMPGT:
33598   case X86ISD::FMAX:
33599   case X86ISD::FMIN:
33600   case X86ISD::FANDN:
33601   case X86ISD::VPSHA:
33602   case X86ISD::VPSHL:
33603   case X86ISD::VSHLV:
33604   case X86ISD::VSRLV:
33605   case X86ISD::VSRAV:
33606     return true;
33607   }
33608 
33609   return TargetLoweringBase::isBinOp(Opcode);
33610 }
33611 
33612 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
33613   switch (Opcode) {
33614   // TODO: Add more X86ISD opcodes once we have test coverage.
33615   case X86ISD::PCMPEQ:
33616   case X86ISD::PMULDQ:
33617   case X86ISD::PMULUDQ:
33618   case X86ISD::FMAXC:
33619   case X86ISD::FMINC:
33620   case X86ISD::FAND:
33621   case X86ISD::FOR:
33622   case X86ISD::FXOR:
33623     return true;
33624   }
33625 
33626   return TargetLoweringBase::isCommutativeBinOp(Opcode);
33627 }
33628 
33629 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
33630   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
33631     return false;
33632   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
33633   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
33634   return NumBits1 > NumBits2;
33635 }
33636 
33637 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
33638   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
33639     return false;
33640 
33641   if (!isTypeLegal(EVT::getEVT(Ty1)))
33642     return false;
33643 
33644   assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
33645 
33646   // Assuming the caller doesn't have a zeroext or signext return parameter,
33647   // truncation all the way down to i1 is valid.
33648   return true;
33649 }
33650 
33651 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
33652   return isInt<32>(Imm);
33653 }
33654 
33655 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
33656   // Can also use sub to handle negated immediates.
33657   return isInt<32>(Imm);
33658 }
33659 
33660 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
33661   return isInt<32>(Imm);
33662 }
33663 
33664 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
33665   if (!VT1.isScalarInteger() || !VT2.isScalarInteger())
33666     return false;
33667   unsigned NumBits1 = VT1.getSizeInBits();
33668   unsigned NumBits2 = VT2.getSizeInBits();
33669   return NumBits1 > NumBits2;
33670 }
33671 
33672 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
33673   // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
33674   return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
33675 }
33676 
33677 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
33678   // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
33679   return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
33680 }
33681 
33682 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
33683   EVT VT1 = Val.getValueType();
33684   if (isZExtFree(VT1, VT2))
33685     return true;
33686 
33687   if (Val.getOpcode() != ISD::LOAD)
33688     return false;
33689 
33690   if (!VT1.isSimple() || !VT1.isInteger() ||
33691       !VT2.isSimple() || !VT2.isInteger())
33692     return false;
33693 
33694   switch (VT1.getSimpleVT().SimpleTy) {
33695   default: break;
33696   case MVT::i8:
33697   case MVT::i16:
33698   case MVT::i32:
33699     // X86 has 8, 16, and 32-bit zero-extending loads.
33700     return true;
33701   }
33702 
33703   return false;
33704 }
33705 
33706 bool X86TargetLowering::shouldSinkOperands(Instruction *I,
33707                                            SmallVectorImpl<Use *> &Ops) const {
33708   using namespace llvm::PatternMatch;
33709 
33710   FixedVectorType *VTy = dyn_cast<FixedVectorType>(I->getType());
33711   if (!VTy)
33712     return false;
33713 
33714   if (I->getOpcode() == Instruction::Mul &&
33715       VTy->getElementType()->isIntegerTy(64)) {
33716     for (auto &Op : I->operands()) {
33717       // Make sure we are not already sinking this operand
33718       if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
33719         continue;
33720 
33721       // Look for PMULDQ pattern where the input is a sext_inreg from vXi32 or
33722       // the PMULUDQ pattern where the input is a zext_inreg from vXi32.
33723       if (Subtarget.hasSSE41() &&
33724           match(Op.get(), m_AShr(m_Shl(m_Value(), m_SpecificInt(32)),
33725                                  m_SpecificInt(32)))) {
33726         Ops.push_back(&cast<Instruction>(Op)->getOperandUse(0));
33727         Ops.push_back(&Op);
33728       } else if (Subtarget.hasSSE2() &&
33729                  match(Op.get(),
33730                        m_And(m_Value(), m_SpecificInt(UINT64_C(0xffffffff))))) {
33731         Ops.push_back(&Op);
33732       }
33733     }
33734 
33735     return !Ops.empty();
33736   }
33737 
33738   // A uniform shift amount in a vector shift or funnel shift may be much
33739   // cheaper than a generic variable vector shift, so make that pattern visible
33740   // to SDAG by sinking the shuffle instruction next to the shift.
33741   int ShiftAmountOpNum = -1;
33742   if (I->isShift())
33743     ShiftAmountOpNum = 1;
33744   else if (auto *II = dyn_cast<IntrinsicInst>(I)) {
33745     if (II->getIntrinsicID() == Intrinsic::fshl ||
33746         II->getIntrinsicID() == Intrinsic::fshr)
33747       ShiftAmountOpNum = 2;
33748   }
33749 
33750   if (ShiftAmountOpNum == -1)
33751     return false;
33752 
33753   auto *Shuf = dyn_cast<ShuffleVectorInst>(I->getOperand(ShiftAmountOpNum));
33754   if (Shuf && getSplatIndex(Shuf->getShuffleMask()) >= 0 &&
33755       isVectorShiftByScalarCheap(I->getType())) {
33756     Ops.push_back(&I->getOperandUse(ShiftAmountOpNum));
33757     return true;
33758   }
33759 
33760   return false;
33761 }
33762 
33763 bool X86TargetLowering::shouldConvertPhiType(Type *From, Type *To) const {
33764   if (!Subtarget.is64Bit())
33765     return false;
33766   return TargetLowering::shouldConvertPhiType(From, To);
33767 }
33768 
33769 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
33770   if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
33771     return false;
33772 
33773   EVT SrcVT = ExtVal.getOperand(0).getValueType();
33774 
33775   // There is no extending load for vXi1.
33776   if (SrcVT.getScalarType() == MVT::i1)
33777     return false;
33778 
33779   return true;
33780 }
33781 
33782 bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
33783                                                    EVT VT) const {
33784   if (!Subtarget.hasAnyFMA())
33785     return false;
33786 
33787   VT = VT.getScalarType();
33788 
33789   if (!VT.isSimple())
33790     return false;
33791 
33792   switch (VT.getSimpleVT().SimpleTy) {
33793   case MVT::f16:
33794     return Subtarget.hasFP16();
33795   case MVT::f32:
33796   case MVT::f64:
33797     return true;
33798   default:
33799     break;
33800   }
33801 
33802   return false;
33803 }
33804 
33805 bool X86TargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
33806   // i16 instructions are longer (0x66 prefix) and potentially slower.
33807   return !(SrcVT == MVT::i32 && DestVT == MVT::i16);
33808 }
33809 
33810 bool X86TargetLowering::shouldFoldSelectWithIdentityConstant(unsigned Opcode,
33811                                                              EVT VT) const {
33812   // TODO: This is too general. There are cases where pre-AVX512 codegen would
33813   //       benefit. The transform may also be profitable for scalar code.
33814   if (!Subtarget.hasAVX512())
33815     return false;
33816   if (!Subtarget.hasVLX() && !VT.is512BitVector())
33817     return false;
33818   if (!VT.isVector() || VT.getScalarType() == MVT::i1)
33819     return false;
33820 
33821   return true;
33822 }
33823 
33824 /// Targets can use this to indicate that they only support *some*
33825 /// VECTOR_SHUFFLE operations, those with specific masks.
33826 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
33827 /// are assumed to be legal.
33828 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const {
33829   if (!VT.isSimple())
33830     return false;
33831 
33832   // Not for i1 vectors
33833   if (VT.getSimpleVT().getScalarType() == MVT::i1)
33834     return false;
33835 
33836   // Very little shuffling can be done for 64-bit vectors right now.
33837   if (VT.getSimpleVT().getSizeInBits() == 64)
33838     return false;
33839 
33840   // We only care that the types being shuffled are legal. The lowering can
33841   // handle any possible shuffle mask that results.
33842   return isTypeLegal(VT.getSimpleVT());
33843 }
33844 
33845 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
33846                                                EVT VT) const {
33847   // Don't convert an 'and' into a shuffle that we don't directly support.
33848   // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
33849   if (!Subtarget.hasAVX2())
33850     if (VT == MVT::v32i8 || VT == MVT::v16i16)
33851       return false;
33852 
33853   // Just delegate to the generic legality, clear masks aren't special.
33854   return isShuffleMaskLegal(Mask, VT);
33855 }
33856 
33857 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
33858   // If the subtarget is using thunks, we need to not generate jump tables.
33859   if (Subtarget.useIndirectThunkBranches())
33860     return false;
33861 
33862   // Otherwise, fallback on the generic logic.
33863   return TargetLowering::areJTsAllowed(Fn);
33864 }
33865 
33866 MVT X86TargetLowering::getPreferredSwitchConditionType(LLVMContext &Context,
33867                                                        EVT ConditionVT) const {
33868   // Avoid 8 and 16 bit types because they increase the chance for unnecessary
33869   // zero-extensions.
33870   if (ConditionVT.getSizeInBits() < 32)
33871     return MVT::i32;
33872   return TargetLoweringBase::getPreferredSwitchConditionType(Context,
33873                                                              ConditionVT);
33874 }
33875 
33876 //===----------------------------------------------------------------------===//
33877 //                           X86 Scheduler Hooks
33878 //===----------------------------------------------------------------------===//
33879 
33880 // Returns true if EFLAG is consumed after this iterator in the rest of the
33881 // basic block or any successors of the basic block.
33882 static bool isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,
33883                               MachineBasicBlock *BB) {
33884   // Scan forward through BB for a use/def of EFLAGS.
33885   for (const MachineInstr &mi : llvm::make_range(std::next(Itr), BB->end())) {
33886     if (mi.readsRegister(X86::EFLAGS))
33887       return true;
33888     // If we found a def, we can stop searching.
33889     if (mi.definesRegister(X86::EFLAGS))
33890       return false;
33891   }
33892 
33893   // If we hit the end of the block, check whether EFLAGS is live into a
33894   // successor.
33895   for (MachineBasicBlock *Succ : BB->successors())
33896     if (Succ->isLiveIn(X86::EFLAGS))
33897       return true;
33898 
33899   return false;
33900 }
33901 
33902 /// Utility function to emit xbegin specifying the start of an RTM region.
33903 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
33904                                      const TargetInstrInfo *TII) {
33905   const MIMetadata MIMD(MI);
33906 
33907   const BasicBlock *BB = MBB->getBasicBlock();
33908   MachineFunction::iterator I = ++MBB->getIterator();
33909 
33910   // For the v = xbegin(), we generate
33911   //
33912   // thisMBB:
33913   //  xbegin sinkMBB
33914   //
33915   // mainMBB:
33916   //  s0 = -1
33917   //
33918   // fallBB:
33919   //  eax = # XABORT_DEF
33920   //  s1 = eax
33921   //
33922   // sinkMBB:
33923   //  v = phi(s0/mainBB, s1/fallBB)
33924 
33925   MachineBasicBlock *thisMBB = MBB;
33926   MachineFunction *MF = MBB->getParent();
33927   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
33928   MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
33929   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
33930   MF->insert(I, mainMBB);
33931   MF->insert(I, fallMBB);
33932   MF->insert(I, sinkMBB);
33933 
33934   if (isEFLAGSLiveAfter(MI, MBB)) {
33935     mainMBB->addLiveIn(X86::EFLAGS);
33936     fallMBB->addLiveIn(X86::EFLAGS);
33937     sinkMBB->addLiveIn(X86::EFLAGS);
33938   }
33939 
33940   // Transfer the remainder of BB and its successor edges to sinkMBB.
33941   sinkMBB->splice(sinkMBB->begin(), MBB,
33942                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
33943   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
33944 
33945   MachineRegisterInfo &MRI = MF->getRegInfo();
33946   Register DstReg = MI.getOperand(0).getReg();
33947   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
33948   Register mainDstReg = MRI.createVirtualRegister(RC);
33949   Register fallDstReg = MRI.createVirtualRegister(RC);
33950 
33951   // thisMBB:
33952   //  xbegin fallMBB
33953   //  # fallthrough to mainMBB
33954   //  # abortion to fallMBB
33955   BuildMI(thisMBB, MIMD, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
33956   thisMBB->addSuccessor(mainMBB);
33957   thisMBB->addSuccessor(fallMBB);
33958 
33959   // mainMBB:
33960   //  mainDstReg := -1
33961   BuildMI(mainMBB, MIMD, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
33962   BuildMI(mainMBB, MIMD, TII->get(X86::JMP_1)).addMBB(sinkMBB);
33963   mainMBB->addSuccessor(sinkMBB);
33964 
33965   // fallMBB:
33966   //  ; pseudo instruction to model hardware's definition from XABORT
33967   //  EAX := XABORT_DEF
33968   //  fallDstReg := EAX
33969   BuildMI(fallMBB, MIMD, TII->get(X86::XABORT_DEF));
33970   BuildMI(fallMBB, MIMD, TII->get(TargetOpcode::COPY), fallDstReg)
33971       .addReg(X86::EAX);
33972   fallMBB->addSuccessor(sinkMBB);
33973 
33974   // sinkMBB:
33975   //  DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
33976   BuildMI(*sinkMBB, sinkMBB->begin(), MIMD, TII->get(X86::PHI), DstReg)
33977       .addReg(mainDstReg).addMBB(mainMBB)
33978       .addReg(fallDstReg).addMBB(fallMBB);
33979 
33980   MI.eraseFromParent();
33981   return sinkMBB;
33982 }
33983 
33984 MachineBasicBlock *
33985 X86TargetLowering::EmitVAARGWithCustomInserter(MachineInstr &MI,
33986                                                MachineBasicBlock *MBB) const {
33987   // Emit va_arg instruction on X86-64.
33988 
33989   // Operands to this pseudo-instruction:
33990   // 0  ) Output        : destination address (reg)
33991   // 1-5) Input         : va_list address (addr, i64mem)
33992   // 6  ) ArgSize       : Size (in bytes) of vararg type
33993   // 7  ) ArgMode       : 0=overflow only, 1=use gp_offset, 2=use fp_offset
33994   // 8  ) Align         : Alignment of type
33995   // 9  ) EFLAGS (implicit-def)
33996 
33997   assert(MI.getNumOperands() == 10 && "VAARG should have 10 operands!");
33998   static_assert(X86::AddrNumOperands == 5, "VAARG assumes 5 address operands");
33999 
34000   Register DestReg = MI.getOperand(0).getReg();
34001   MachineOperand &Base = MI.getOperand(1);
34002   MachineOperand &Scale = MI.getOperand(2);
34003   MachineOperand &Index = MI.getOperand(3);
34004   MachineOperand &Disp = MI.getOperand(4);
34005   MachineOperand &Segment = MI.getOperand(5);
34006   unsigned ArgSize = MI.getOperand(6).getImm();
34007   unsigned ArgMode = MI.getOperand(7).getImm();
34008   Align Alignment = Align(MI.getOperand(8).getImm());
34009 
34010   MachineFunction *MF = MBB->getParent();
34011 
34012   // Memory Reference
34013   assert(MI.hasOneMemOperand() && "Expected VAARG to have one memoperand");
34014 
34015   MachineMemOperand *OldMMO = MI.memoperands().front();
34016 
34017   // Clone the MMO into two separate MMOs for loading and storing
34018   MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
34019       OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
34020   MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
34021       OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
34022 
34023   // Machine Information
34024   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34025   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
34026   const TargetRegisterClass *AddrRegClass =
34027       getRegClassFor(getPointerTy(MBB->getParent()->getDataLayout()));
34028   const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
34029   const MIMetadata MIMD(MI);
34030 
34031   // struct va_list {
34032   //   i32   gp_offset
34033   //   i32   fp_offset
34034   //   i64   overflow_area (address)
34035   //   i64   reg_save_area (address)
34036   // }
34037   // sizeof(va_list) = 24
34038   // alignment(va_list) = 8
34039 
34040   unsigned TotalNumIntRegs = 6;
34041   unsigned TotalNumXMMRegs = 8;
34042   bool UseGPOffset = (ArgMode == 1);
34043   bool UseFPOffset = (ArgMode == 2);
34044   unsigned MaxOffset = TotalNumIntRegs * 8 +
34045                        (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
34046 
34047   /* Align ArgSize to a multiple of 8 */
34048   unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
34049   bool NeedsAlign = (Alignment > 8);
34050 
34051   MachineBasicBlock *thisMBB = MBB;
34052   MachineBasicBlock *overflowMBB;
34053   MachineBasicBlock *offsetMBB;
34054   MachineBasicBlock *endMBB;
34055 
34056   unsigned OffsetDestReg = 0;    // Argument address computed by offsetMBB
34057   unsigned OverflowDestReg = 0;  // Argument address computed by overflowMBB
34058   unsigned OffsetReg = 0;
34059 
34060   if (!UseGPOffset && !UseFPOffset) {
34061     // If we only pull from the overflow region, we don't create a branch.
34062     // We don't need to alter control flow.
34063     OffsetDestReg = 0; // unused
34064     OverflowDestReg = DestReg;
34065 
34066     offsetMBB = nullptr;
34067     overflowMBB = thisMBB;
34068     endMBB = thisMBB;
34069   } else {
34070     // First emit code to check if gp_offset (or fp_offset) is below the bound.
34071     // If so, pull the argument from reg_save_area. (branch to offsetMBB)
34072     // If not, pull from overflow_area. (branch to overflowMBB)
34073     //
34074     //       thisMBB
34075     //         |     .
34076     //         |        .
34077     //     offsetMBB   overflowMBB
34078     //         |        .
34079     //         |     .
34080     //        endMBB
34081 
34082     // Registers for the PHI in endMBB
34083     OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
34084     OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
34085 
34086     const BasicBlock *LLVM_BB = MBB->getBasicBlock();
34087     overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34088     offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34089     endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34090 
34091     MachineFunction::iterator MBBIter = ++MBB->getIterator();
34092 
34093     // Insert the new basic blocks
34094     MF->insert(MBBIter, offsetMBB);
34095     MF->insert(MBBIter, overflowMBB);
34096     MF->insert(MBBIter, endMBB);
34097 
34098     // Transfer the remainder of MBB and its successor edges to endMBB.
34099     endMBB->splice(endMBB->begin(), thisMBB,
34100                    std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
34101     endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
34102 
34103     // Make offsetMBB and overflowMBB successors of thisMBB
34104     thisMBB->addSuccessor(offsetMBB);
34105     thisMBB->addSuccessor(overflowMBB);
34106 
34107     // endMBB is a successor of both offsetMBB and overflowMBB
34108     offsetMBB->addSuccessor(endMBB);
34109     overflowMBB->addSuccessor(endMBB);
34110 
34111     // Load the offset value into a register
34112     OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
34113     BuildMI(thisMBB, MIMD, TII->get(X86::MOV32rm), OffsetReg)
34114         .add(Base)
34115         .add(Scale)
34116         .add(Index)
34117         .addDisp(Disp, UseFPOffset ? 4 : 0)
34118         .add(Segment)
34119         .setMemRefs(LoadOnlyMMO);
34120 
34121     // Check if there is enough room left to pull this argument.
34122     BuildMI(thisMBB, MIMD, TII->get(X86::CMP32ri))
34123       .addReg(OffsetReg)
34124       .addImm(MaxOffset + 8 - ArgSizeA8);
34125 
34126     // Branch to "overflowMBB" if offset >= max
34127     // Fall through to "offsetMBB" otherwise
34128     BuildMI(thisMBB, MIMD, TII->get(X86::JCC_1))
34129       .addMBB(overflowMBB).addImm(X86::COND_AE);
34130   }
34131 
34132   // In offsetMBB, emit code to use the reg_save_area.
34133   if (offsetMBB) {
34134     assert(OffsetReg != 0);
34135 
34136     // Read the reg_save_area address.
34137     Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
34138     BuildMI(
34139         offsetMBB, MIMD,
34140         TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
34141         RegSaveReg)
34142         .add(Base)
34143         .add(Scale)
34144         .add(Index)
34145         .addDisp(Disp, Subtarget.isTarget64BitLP64() ? 16 : 12)
34146         .add(Segment)
34147         .setMemRefs(LoadOnlyMMO);
34148 
34149     if (Subtarget.isTarget64BitLP64()) {
34150       // Zero-extend the offset
34151       Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
34152       BuildMI(offsetMBB, MIMD, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
34153           .addImm(0)
34154           .addReg(OffsetReg)
34155           .addImm(X86::sub_32bit);
34156 
34157       // Add the offset to the reg_save_area to get the final address.
34158       BuildMI(offsetMBB, MIMD, TII->get(X86::ADD64rr), OffsetDestReg)
34159           .addReg(OffsetReg64)
34160           .addReg(RegSaveReg);
34161     } else {
34162       // Add the offset to the reg_save_area to get the final address.
34163       BuildMI(offsetMBB, MIMD, TII->get(X86::ADD32rr), OffsetDestReg)
34164           .addReg(OffsetReg)
34165           .addReg(RegSaveReg);
34166     }
34167 
34168     // Compute the offset for the next argument
34169     Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
34170     BuildMI(offsetMBB, MIMD, TII->get(X86::ADD32ri), NextOffsetReg)
34171       .addReg(OffsetReg)
34172       .addImm(UseFPOffset ? 16 : 8);
34173 
34174     // Store it back into the va_list.
34175     BuildMI(offsetMBB, MIMD, TII->get(X86::MOV32mr))
34176         .add(Base)
34177         .add(Scale)
34178         .add(Index)
34179         .addDisp(Disp, UseFPOffset ? 4 : 0)
34180         .add(Segment)
34181         .addReg(NextOffsetReg)
34182         .setMemRefs(StoreOnlyMMO);
34183 
34184     // Jump to endMBB
34185     BuildMI(offsetMBB, MIMD, TII->get(X86::JMP_1))
34186       .addMBB(endMBB);
34187   }
34188 
34189   //
34190   // Emit code to use overflow area
34191   //
34192 
34193   // Load the overflow_area address into a register.
34194   Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
34195   BuildMI(overflowMBB, MIMD,
34196           TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
34197           OverflowAddrReg)
34198       .add(Base)
34199       .add(Scale)
34200       .add(Index)
34201       .addDisp(Disp, 8)
34202       .add(Segment)
34203       .setMemRefs(LoadOnlyMMO);
34204 
34205   // If we need to align it, do so. Otherwise, just copy the address
34206   // to OverflowDestReg.
34207   if (NeedsAlign) {
34208     // Align the overflow address
34209     Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
34210 
34211     // aligned_addr = (addr + (align-1)) & ~(align-1)
34212     BuildMI(
34213         overflowMBB, MIMD,
34214         TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
34215         TmpReg)
34216         .addReg(OverflowAddrReg)
34217         .addImm(Alignment.value() - 1);
34218 
34219     BuildMI(
34220         overflowMBB, MIMD,
34221         TII->get(Subtarget.isTarget64BitLP64() ? X86::AND64ri32 : X86::AND32ri),
34222         OverflowDestReg)
34223         .addReg(TmpReg)
34224         .addImm(~(uint64_t)(Alignment.value() - 1));
34225   } else {
34226     BuildMI(overflowMBB, MIMD, TII->get(TargetOpcode::COPY), OverflowDestReg)
34227       .addReg(OverflowAddrReg);
34228   }
34229 
34230   // Compute the next overflow address after this argument.
34231   // (the overflow address should be kept 8-byte aligned)
34232   Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
34233   BuildMI(
34234       overflowMBB, MIMD,
34235       TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
34236       NextAddrReg)
34237       .addReg(OverflowDestReg)
34238       .addImm(ArgSizeA8);
34239 
34240   // Store the new overflow address.
34241   BuildMI(overflowMBB, MIMD,
34242           TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64mr : X86::MOV32mr))
34243       .add(Base)
34244       .add(Scale)
34245       .add(Index)
34246       .addDisp(Disp, 8)
34247       .add(Segment)
34248       .addReg(NextAddrReg)
34249       .setMemRefs(StoreOnlyMMO);
34250 
34251   // If we branched, emit the PHI to the front of endMBB.
34252   if (offsetMBB) {
34253     BuildMI(*endMBB, endMBB->begin(), MIMD,
34254             TII->get(X86::PHI), DestReg)
34255       .addReg(OffsetDestReg).addMBB(offsetMBB)
34256       .addReg(OverflowDestReg).addMBB(overflowMBB);
34257   }
34258 
34259   // Erase the pseudo instruction
34260   MI.eraseFromParent();
34261 
34262   return endMBB;
34263 }
34264 
34265 // The EFLAGS operand of SelectItr might be missing a kill marker
34266 // because there were multiple uses of EFLAGS, and ISel didn't know
34267 // which to mark. Figure out whether SelectItr should have had a
34268 // kill marker, and set it if it should. Returns the correct kill
34269 // marker value.
34270 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
34271                                      MachineBasicBlock* BB,
34272                                      const TargetRegisterInfo* TRI) {
34273   if (isEFLAGSLiveAfter(SelectItr, BB))
34274     return false;
34275 
34276   // We found a def, or hit the end of the basic block and EFLAGS wasn't live
34277   // out. SelectMI should have a kill flag on EFLAGS.
34278   SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
34279   return true;
34280 }
34281 
34282 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
34283 // together with other CMOV pseudo-opcodes into a single basic-block with
34284 // conditional jump around it.
34285 static bool isCMOVPseudo(MachineInstr &MI) {
34286   switch (MI.getOpcode()) {
34287   case X86::CMOV_FR16:
34288   case X86::CMOV_FR16X:
34289   case X86::CMOV_FR32:
34290   case X86::CMOV_FR32X:
34291   case X86::CMOV_FR64:
34292   case X86::CMOV_FR64X:
34293   case X86::CMOV_GR8:
34294   case X86::CMOV_GR16:
34295   case X86::CMOV_GR32:
34296   case X86::CMOV_RFP32:
34297   case X86::CMOV_RFP64:
34298   case X86::CMOV_RFP80:
34299   case X86::CMOV_VR64:
34300   case X86::CMOV_VR128:
34301   case X86::CMOV_VR128X:
34302   case X86::CMOV_VR256:
34303   case X86::CMOV_VR256X:
34304   case X86::CMOV_VR512:
34305   case X86::CMOV_VK1:
34306   case X86::CMOV_VK2:
34307   case X86::CMOV_VK4:
34308   case X86::CMOV_VK8:
34309   case X86::CMOV_VK16:
34310   case X86::CMOV_VK32:
34311   case X86::CMOV_VK64:
34312     return true;
34313 
34314   default:
34315     return false;
34316   }
34317 }
34318 
34319 // Helper function, which inserts PHI functions into SinkMBB:
34320 //   %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
34321 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
34322 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
34323 // the last PHI function inserted.
34324 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
34325     MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
34326     MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
34327     MachineBasicBlock *SinkMBB) {
34328   MachineFunction *MF = TrueMBB->getParent();
34329   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
34330   const MIMetadata MIMD(*MIItBegin);
34331 
34332   X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
34333   X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
34334 
34335   MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
34336 
34337   // As we are creating the PHIs, we have to be careful if there is more than
34338   // one.  Later CMOVs may reference the results of earlier CMOVs, but later
34339   // PHIs have to reference the individual true/false inputs from earlier PHIs.
34340   // That also means that PHI construction must work forward from earlier to
34341   // later, and that the code must maintain a mapping from earlier PHI's
34342   // destination registers, and the registers that went into the PHI.
34343   DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
34344   MachineInstrBuilder MIB;
34345 
34346   for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
34347     Register DestReg = MIIt->getOperand(0).getReg();
34348     Register Op1Reg = MIIt->getOperand(1).getReg();
34349     Register Op2Reg = MIIt->getOperand(2).getReg();
34350 
34351     // If this CMOV we are generating is the opposite condition from
34352     // the jump we generated, then we have to swap the operands for the
34353     // PHI that is going to be generated.
34354     if (MIIt->getOperand(3).getImm() == OppCC)
34355       std::swap(Op1Reg, Op2Reg);
34356 
34357     if (RegRewriteTable.contains(Op1Reg))
34358       Op1Reg = RegRewriteTable[Op1Reg].first;
34359 
34360     if (RegRewriteTable.contains(Op2Reg))
34361       Op2Reg = RegRewriteTable[Op2Reg].second;
34362 
34363     MIB =
34364         BuildMI(*SinkMBB, SinkInsertionPoint, MIMD, TII->get(X86::PHI), DestReg)
34365             .addReg(Op1Reg)
34366             .addMBB(FalseMBB)
34367             .addReg(Op2Reg)
34368             .addMBB(TrueMBB);
34369 
34370     // Add this PHI to the rewrite table.
34371     RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
34372   }
34373 
34374   return MIB;
34375 }
34376 
34377 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
34378 MachineBasicBlock *
34379 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
34380                                              MachineInstr &SecondCascadedCMOV,
34381                                              MachineBasicBlock *ThisMBB) const {
34382   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34383   const MIMetadata MIMD(FirstCMOV);
34384 
34385   // We lower cascaded CMOVs such as
34386   //
34387   //   (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
34388   //
34389   // to two successive branches.
34390   //
34391   // Without this, we would add a PHI between the two jumps, which ends up
34392   // creating a few copies all around. For instance, for
34393   //
34394   //    (sitofp (zext (fcmp une)))
34395   //
34396   // we would generate:
34397   //
34398   //         ucomiss %xmm1, %xmm0
34399   //         movss  <1.0f>, %xmm0
34400   //         movaps  %xmm0, %xmm1
34401   //         jne     .LBB5_2
34402   //         xorps   %xmm1, %xmm1
34403   // .LBB5_2:
34404   //         jp      .LBB5_4
34405   //         movaps  %xmm1, %xmm0
34406   // .LBB5_4:
34407   //         retq
34408   //
34409   // because this custom-inserter would have generated:
34410   //
34411   //   A
34412   //   | \
34413   //   |  B
34414   //   | /
34415   //   C
34416   //   | \
34417   //   |  D
34418   //   | /
34419   //   E
34420   //
34421   // A: X = ...; Y = ...
34422   // B: empty
34423   // C: Z = PHI [X, A], [Y, B]
34424   // D: empty
34425   // E: PHI [X, C], [Z, D]
34426   //
34427   // If we lower both CMOVs in a single step, we can instead generate:
34428   //
34429   //   A
34430   //   | \
34431   //   |  C
34432   //   | /|
34433   //   |/ |
34434   //   |  |
34435   //   |  D
34436   //   | /
34437   //   E
34438   //
34439   // A: X = ...; Y = ...
34440   // D: empty
34441   // E: PHI [X, A], [X, C], [Y, D]
34442   //
34443   // Which, in our sitofp/fcmp example, gives us something like:
34444   //
34445   //         ucomiss %xmm1, %xmm0
34446   //         movss  <1.0f>, %xmm0
34447   //         jne     .LBB5_4
34448   //         jp      .LBB5_4
34449   //         xorps   %xmm0, %xmm0
34450   // .LBB5_4:
34451   //         retq
34452   //
34453 
34454   // We lower cascaded CMOV into two successive branches to the same block.
34455   // EFLAGS is used by both, so mark it as live in the second.
34456   const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
34457   MachineFunction *F = ThisMBB->getParent();
34458   MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
34459   MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
34460   MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
34461 
34462   MachineFunction::iterator It = ++ThisMBB->getIterator();
34463   F->insert(It, FirstInsertedMBB);
34464   F->insert(It, SecondInsertedMBB);
34465   F->insert(It, SinkMBB);
34466 
34467   // For a cascaded CMOV, we lower it to two successive branches to
34468   // the same block (SinkMBB).  EFLAGS is used by both, so mark it as live in
34469   // the FirstInsertedMBB.
34470   FirstInsertedMBB->addLiveIn(X86::EFLAGS);
34471 
34472   // If the EFLAGS register isn't dead in the terminator, then claim that it's
34473   // live into the sink and copy blocks.
34474   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
34475   if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
34476       !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
34477     SecondInsertedMBB->addLiveIn(X86::EFLAGS);
34478     SinkMBB->addLiveIn(X86::EFLAGS);
34479   }
34480 
34481   // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
34482   SinkMBB->splice(SinkMBB->begin(), ThisMBB,
34483                   std::next(MachineBasicBlock::iterator(FirstCMOV)),
34484                   ThisMBB->end());
34485   SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
34486 
34487   // Fallthrough block for ThisMBB.
34488   ThisMBB->addSuccessor(FirstInsertedMBB);
34489   // The true block target of the first branch is always SinkMBB.
34490   ThisMBB->addSuccessor(SinkMBB);
34491   // Fallthrough block for FirstInsertedMBB.
34492   FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
34493   // The true block for the branch of FirstInsertedMBB.
34494   FirstInsertedMBB->addSuccessor(SinkMBB);
34495   // This is fallthrough.
34496   SecondInsertedMBB->addSuccessor(SinkMBB);
34497 
34498   // Create the conditional branch instructions.
34499   X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
34500   BuildMI(ThisMBB, MIMD, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
34501 
34502   X86::CondCode SecondCC =
34503       X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
34504   BuildMI(FirstInsertedMBB, MIMD, TII->get(X86::JCC_1))
34505       .addMBB(SinkMBB)
34506       .addImm(SecondCC);
34507 
34508   //  SinkMBB:
34509   //   %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
34510   Register DestReg = SecondCascadedCMOV.getOperand(0).getReg();
34511   Register Op1Reg = FirstCMOV.getOperand(1).getReg();
34512   Register Op2Reg = FirstCMOV.getOperand(2).getReg();
34513   MachineInstrBuilder MIB =
34514       BuildMI(*SinkMBB, SinkMBB->begin(), MIMD, TII->get(X86::PHI), DestReg)
34515           .addReg(Op1Reg)
34516           .addMBB(SecondInsertedMBB)
34517           .addReg(Op2Reg)
34518           .addMBB(ThisMBB);
34519 
34520   // The second SecondInsertedMBB provides the same incoming value as the
34521   // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
34522   MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
34523 
34524   // Now remove the CMOVs.
34525   FirstCMOV.eraseFromParent();
34526   SecondCascadedCMOV.eraseFromParent();
34527 
34528   return SinkMBB;
34529 }
34530 
34531 MachineBasicBlock *
34532 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
34533                                      MachineBasicBlock *ThisMBB) const {
34534   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34535   const MIMetadata MIMD(MI);
34536 
34537   // To "insert" a SELECT_CC instruction, we actually have to insert the
34538   // diamond control-flow pattern.  The incoming instruction knows the
34539   // destination vreg to set, the condition code register to branch on, the
34540   // true/false values to select between and a branch opcode to use.
34541 
34542   //  ThisMBB:
34543   //  ...
34544   //   TrueVal = ...
34545   //   cmpTY ccX, r1, r2
34546   //   bCC copy1MBB
34547   //   fallthrough --> FalseMBB
34548 
34549   // This code lowers all pseudo-CMOV instructions. Generally it lowers these
34550   // as described above, by inserting a BB, and then making a PHI at the join
34551   // point to select the true and false operands of the CMOV in the PHI.
34552   //
34553   // The code also handles two different cases of multiple CMOV opcodes
34554   // in a row.
34555   //
34556   // Case 1:
34557   // In this case, there are multiple CMOVs in a row, all which are based on
34558   // the same condition setting (or the exact opposite condition setting).
34559   // In this case we can lower all the CMOVs using a single inserted BB, and
34560   // then make a number of PHIs at the join point to model the CMOVs. The only
34561   // trickiness here, is that in a case like:
34562   //
34563   // t2 = CMOV cond1 t1, f1
34564   // t3 = CMOV cond1 t2, f2
34565   //
34566   // when rewriting this into PHIs, we have to perform some renaming on the
34567   // temps since you cannot have a PHI operand refer to a PHI result earlier
34568   // in the same block.  The "simple" but wrong lowering would be:
34569   //
34570   // t2 = PHI t1(BB1), f1(BB2)
34571   // t3 = PHI t2(BB1), f2(BB2)
34572   //
34573   // but clearly t2 is not defined in BB1, so that is incorrect. The proper
34574   // renaming is to note that on the path through BB1, t2 is really just a
34575   // copy of t1, and do that renaming, properly generating:
34576   //
34577   // t2 = PHI t1(BB1), f1(BB2)
34578   // t3 = PHI t1(BB1), f2(BB2)
34579   //
34580   // Case 2:
34581   // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
34582   // function - EmitLoweredCascadedSelect.
34583 
34584   X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
34585   X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
34586   MachineInstr *LastCMOV = &MI;
34587   MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
34588 
34589   // Check for case 1, where there are multiple CMOVs with the same condition
34590   // first.  Of the two cases of multiple CMOV lowerings, case 1 reduces the
34591   // number of jumps the most.
34592 
34593   if (isCMOVPseudo(MI)) {
34594     // See if we have a string of CMOVS with the same condition. Skip over
34595     // intervening debug insts.
34596     while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
34597            (NextMIIt->getOperand(3).getImm() == CC ||
34598             NextMIIt->getOperand(3).getImm() == OppCC)) {
34599       LastCMOV = &*NextMIIt;
34600       NextMIIt = next_nodbg(NextMIIt, ThisMBB->end());
34601     }
34602   }
34603 
34604   // This checks for case 2, but only do this if we didn't already find
34605   // case 1, as indicated by LastCMOV == MI.
34606   if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
34607       NextMIIt->getOpcode() == MI.getOpcode() &&
34608       NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
34609       NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
34610       NextMIIt->getOperand(1).isKill()) {
34611     return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
34612   }
34613 
34614   const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
34615   MachineFunction *F = ThisMBB->getParent();
34616   MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
34617   MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
34618 
34619   MachineFunction::iterator It = ++ThisMBB->getIterator();
34620   F->insert(It, FalseMBB);
34621   F->insert(It, SinkMBB);
34622 
34623   // Set the call frame size on entry to the new basic blocks.
34624   unsigned CallFrameSize = TII->getCallFrameSizeAt(MI);
34625   FalseMBB->setCallFrameSize(CallFrameSize);
34626   SinkMBB->setCallFrameSize(CallFrameSize);
34627 
34628   // If the EFLAGS register isn't dead in the terminator, then claim that it's
34629   // live into the sink and copy blocks.
34630   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
34631   if (!LastCMOV->killsRegister(X86::EFLAGS) &&
34632       !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
34633     FalseMBB->addLiveIn(X86::EFLAGS);
34634     SinkMBB->addLiveIn(X86::EFLAGS);
34635   }
34636 
34637   // Transfer any debug instructions inside the CMOV sequence to the sunk block.
34638   auto DbgRange = llvm::make_range(MachineBasicBlock::iterator(MI),
34639                                    MachineBasicBlock::iterator(LastCMOV));
34640   for (MachineInstr &MI : llvm::make_early_inc_range(DbgRange))
34641     if (MI.isDebugInstr())
34642       SinkMBB->push_back(MI.removeFromParent());
34643 
34644   // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
34645   SinkMBB->splice(SinkMBB->end(), ThisMBB,
34646                   std::next(MachineBasicBlock::iterator(LastCMOV)),
34647                   ThisMBB->end());
34648   SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
34649 
34650   // Fallthrough block for ThisMBB.
34651   ThisMBB->addSuccessor(FalseMBB);
34652   // The true block target of the first (or only) branch is always a SinkMBB.
34653   ThisMBB->addSuccessor(SinkMBB);
34654   // Fallthrough block for FalseMBB.
34655   FalseMBB->addSuccessor(SinkMBB);
34656 
34657   // Create the conditional branch instruction.
34658   BuildMI(ThisMBB, MIMD, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
34659 
34660   //  SinkMBB:
34661   //   %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
34662   //  ...
34663   MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
34664   MachineBasicBlock::iterator MIItEnd =
34665       std::next(MachineBasicBlock::iterator(LastCMOV));
34666   createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
34667 
34668   // Now remove the CMOV(s).
34669   ThisMBB->erase(MIItBegin, MIItEnd);
34670 
34671   return SinkMBB;
34672 }
34673 
34674 static unsigned getSUBriOpcode(bool IsLP64) {
34675   if (IsLP64)
34676     return X86::SUB64ri32;
34677   else
34678     return X86::SUB32ri;
34679 }
34680 
34681 MachineBasicBlock *
34682 X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
34683                                            MachineBasicBlock *MBB) const {
34684   MachineFunction *MF = MBB->getParent();
34685   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34686   const X86FrameLowering &TFI = *Subtarget.getFrameLowering();
34687   const MIMetadata MIMD(MI);
34688   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
34689 
34690   const unsigned ProbeSize = getStackProbeSize(*MF);
34691 
34692   MachineRegisterInfo &MRI = MF->getRegInfo();
34693   MachineBasicBlock *testMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34694   MachineBasicBlock *tailMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34695   MachineBasicBlock *blockMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34696 
34697   MachineFunction::iterator MBBIter = ++MBB->getIterator();
34698   MF->insert(MBBIter, testMBB);
34699   MF->insert(MBBIter, blockMBB);
34700   MF->insert(MBBIter, tailMBB);
34701 
34702   Register sizeVReg = MI.getOperand(1).getReg();
34703 
34704   Register physSPReg = TFI.Uses64BitFramePtr ? X86::RSP : X86::ESP;
34705 
34706   Register TmpStackPtr = MRI.createVirtualRegister(
34707       TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
34708   Register FinalStackPtr = MRI.createVirtualRegister(
34709       TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
34710 
34711   BuildMI(*MBB, {MI}, MIMD, TII->get(TargetOpcode::COPY), TmpStackPtr)
34712       .addReg(physSPReg);
34713   {
34714     const unsigned Opc = TFI.Uses64BitFramePtr ? X86::SUB64rr : X86::SUB32rr;
34715     BuildMI(*MBB, {MI}, MIMD, TII->get(Opc), FinalStackPtr)
34716         .addReg(TmpStackPtr)
34717         .addReg(sizeVReg);
34718   }
34719 
34720   // test rsp size
34721 
34722   BuildMI(testMBB, MIMD,
34723           TII->get(TFI.Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
34724       .addReg(FinalStackPtr)
34725       .addReg(physSPReg);
34726 
34727   BuildMI(testMBB, MIMD, TII->get(X86::JCC_1))
34728       .addMBB(tailMBB)
34729       .addImm(X86::COND_GE);
34730   testMBB->addSuccessor(blockMBB);
34731   testMBB->addSuccessor(tailMBB);
34732 
34733   // Touch the block then extend it. This is done on the opposite side of
34734   // static probe where we allocate then touch, to avoid the need of probing the
34735   // tail of the static alloca. Possible scenarios are:
34736   //
34737   //       + ---- <- ------------ <- ------------- <- ------------ +
34738   //       |                                                       |
34739   // [free probe] -> [page alloc] -> [alloc probe] -> [tail alloc] + -> [dyn probe] -> [page alloc] -> [dyn probe] -> [tail alloc] +
34740   //                                                               |                                                               |
34741   //                                                               + <- ----------- <- ------------ <- ----------- <- ------------ +
34742   //
34743   // The property we want to enforce is to never have more than [page alloc] between two probes.
34744 
34745   const unsigned XORMIOpc =
34746       TFI.Uses64BitFramePtr ? X86::XOR64mi32 : X86::XOR32mi;
34747   addRegOffset(BuildMI(blockMBB, MIMD, TII->get(XORMIOpc)), physSPReg, false, 0)
34748       .addImm(0);
34749 
34750   BuildMI(blockMBB, MIMD, TII->get(getSUBriOpcode(TFI.Uses64BitFramePtr)),
34751           physSPReg)
34752       .addReg(physSPReg)
34753       .addImm(ProbeSize);
34754 
34755   BuildMI(blockMBB, MIMD, TII->get(X86::JMP_1)).addMBB(testMBB);
34756   blockMBB->addSuccessor(testMBB);
34757 
34758   // Replace original instruction by the expected stack ptr
34759   BuildMI(tailMBB, MIMD, TII->get(TargetOpcode::COPY),
34760           MI.getOperand(0).getReg())
34761       .addReg(FinalStackPtr);
34762 
34763   tailMBB->splice(tailMBB->end(), MBB,
34764                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
34765   tailMBB->transferSuccessorsAndUpdatePHIs(MBB);
34766   MBB->addSuccessor(testMBB);
34767 
34768   // Delete the original pseudo instruction.
34769   MI.eraseFromParent();
34770 
34771   // And we're done.
34772   return tailMBB;
34773 }
34774 
34775 MachineBasicBlock *
34776 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
34777                                         MachineBasicBlock *BB) const {
34778   MachineFunction *MF = BB->getParent();
34779   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34780   const MIMetadata MIMD(MI);
34781   const BasicBlock *LLVM_BB = BB->getBasicBlock();
34782 
34783   assert(MF->shouldSplitStack());
34784 
34785   const bool Is64Bit = Subtarget.is64Bit();
34786   const bool IsLP64 = Subtarget.isTarget64BitLP64();
34787 
34788   const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
34789   const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
34790 
34791   // BB:
34792   //  ... [Till the alloca]
34793   // If stacklet is not large enough, jump to mallocMBB
34794   //
34795   // bumpMBB:
34796   //  Allocate by subtracting from RSP
34797   //  Jump to continueMBB
34798   //
34799   // mallocMBB:
34800   //  Allocate by call to runtime
34801   //
34802   // continueMBB:
34803   //  ...
34804   //  [rest of original BB]
34805   //
34806 
34807   MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34808   MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34809   MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34810 
34811   MachineRegisterInfo &MRI = MF->getRegInfo();
34812   const TargetRegisterClass *AddrRegClass =
34813       getRegClassFor(getPointerTy(MF->getDataLayout()));
34814 
34815   Register mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
34816            bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
34817            tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
34818            SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
34819            sizeVReg = MI.getOperand(1).getReg(),
34820            physSPReg =
34821                IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
34822 
34823   MachineFunction::iterator MBBIter = ++BB->getIterator();
34824 
34825   MF->insert(MBBIter, bumpMBB);
34826   MF->insert(MBBIter, mallocMBB);
34827   MF->insert(MBBIter, continueMBB);
34828 
34829   continueMBB->splice(continueMBB->begin(), BB,
34830                       std::next(MachineBasicBlock::iterator(MI)), BB->end());
34831   continueMBB->transferSuccessorsAndUpdatePHIs(BB);
34832 
34833   // Add code to the main basic block to check if the stack limit has been hit,
34834   // and if so, jump to mallocMBB otherwise to bumpMBB.
34835   BuildMI(BB, MIMD, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
34836   BuildMI(BB, MIMD, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
34837     .addReg(tmpSPVReg).addReg(sizeVReg);
34838   BuildMI(BB, MIMD, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
34839     .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
34840     .addReg(SPLimitVReg);
34841   BuildMI(BB, MIMD, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
34842 
34843   // bumpMBB simply decreases the stack pointer, since we know the current
34844   // stacklet has enough space.
34845   BuildMI(bumpMBB, MIMD, TII->get(TargetOpcode::COPY), physSPReg)
34846     .addReg(SPLimitVReg);
34847   BuildMI(bumpMBB, MIMD, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
34848     .addReg(SPLimitVReg);
34849   BuildMI(bumpMBB, MIMD, TII->get(X86::JMP_1)).addMBB(continueMBB);
34850 
34851   // Calls into a routine in libgcc to allocate more space from the heap.
34852   const uint32_t *RegMask =
34853       Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
34854   if (IsLP64) {
34855     BuildMI(mallocMBB, MIMD, TII->get(X86::MOV64rr), X86::RDI)
34856       .addReg(sizeVReg);
34857     BuildMI(mallocMBB, MIMD, TII->get(X86::CALL64pcrel32))
34858       .addExternalSymbol("__morestack_allocate_stack_space")
34859       .addRegMask(RegMask)
34860       .addReg(X86::RDI, RegState::Implicit)
34861       .addReg(X86::RAX, RegState::ImplicitDefine);
34862   } else if (Is64Bit) {
34863     BuildMI(mallocMBB, MIMD, TII->get(X86::MOV32rr), X86::EDI)
34864       .addReg(sizeVReg);
34865     BuildMI(mallocMBB, MIMD, TII->get(X86::CALL64pcrel32))
34866       .addExternalSymbol("__morestack_allocate_stack_space")
34867       .addRegMask(RegMask)
34868       .addReg(X86::EDI, RegState::Implicit)
34869       .addReg(X86::EAX, RegState::ImplicitDefine);
34870   } else {
34871     BuildMI(mallocMBB, MIMD, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
34872       .addImm(12);
34873     BuildMI(mallocMBB, MIMD, TII->get(X86::PUSH32r)).addReg(sizeVReg);
34874     BuildMI(mallocMBB, MIMD, TII->get(X86::CALLpcrel32))
34875       .addExternalSymbol("__morestack_allocate_stack_space")
34876       .addRegMask(RegMask)
34877       .addReg(X86::EAX, RegState::ImplicitDefine);
34878   }
34879 
34880   if (!Is64Bit)
34881     BuildMI(mallocMBB, MIMD, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
34882       .addImm(16);
34883 
34884   BuildMI(mallocMBB, MIMD, TII->get(TargetOpcode::COPY), mallocPtrVReg)
34885     .addReg(IsLP64 ? X86::RAX : X86::EAX);
34886   BuildMI(mallocMBB, MIMD, TII->get(X86::JMP_1)).addMBB(continueMBB);
34887 
34888   // Set up the CFG correctly.
34889   BB->addSuccessor(bumpMBB);
34890   BB->addSuccessor(mallocMBB);
34891   mallocMBB->addSuccessor(continueMBB);
34892   bumpMBB->addSuccessor(continueMBB);
34893 
34894   // Take care of the PHI nodes.
34895   BuildMI(*continueMBB, continueMBB->begin(), MIMD, TII->get(X86::PHI),
34896           MI.getOperand(0).getReg())
34897       .addReg(mallocPtrVReg)
34898       .addMBB(mallocMBB)
34899       .addReg(bumpSPPtrVReg)
34900       .addMBB(bumpMBB);
34901 
34902   // Delete the original pseudo instruction.
34903   MI.eraseFromParent();
34904 
34905   // And we're done.
34906   return continueMBB;
34907 }
34908 
34909 MachineBasicBlock *
34910 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
34911                                        MachineBasicBlock *BB) const {
34912   MachineFunction *MF = BB->getParent();
34913   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
34914   MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
34915   const MIMetadata MIMD(MI);
34916 
34917   assert(!isAsynchronousEHPersonality(
34918              classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
34919          "SEH does not use catchret!");
34920 
34921   // Only 32-bit EH needs to worry about manually restoring stack pointers.
34922   if (!Subtarget.is32Bit())
34923     return BB;
34924 
34925   // C++ EH creates a new target block to hold the restore code, and wires up
34926   // the new block to the return destination with a normal JMP_4.
34927   MachineBasicBlock *RestoreMBB =
34928       MF->CreateMachineBasicBlock(BB->getBasicBlock());
34929   assert(BB->succ_size() == 1);
34930   MF->insert(std::next(BB->getIterator()), RestoreMBB);
34931   RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
34932   BB->addSuccessor(RestoreMBB);
34933   MI.getOperand(0).setMBB(RestoreMBB);
34934 
34935   // Marking this as an EH pad but not a funclet entry block causes PEI to
34936   // restore stack pointers in the block.
34937   RestoreMBB->setIsEHPad(true);
34938 
34939   auto RestoreMBBI = RestoreMBB->begin();
34940   BuildMI(*RestoreMBB, RestoreMBBI, MIMD, TII.get(X86::JMP_4)).addMBB(TargetMBB);
34941   return BB;
34942 }
34943 
34944 MachineBasicBlock *
34945 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
34946                                       MachineBasicBlock *BB) const {
34947   // So, here we replace TLSADDR with the sequence:
34948   // adjust_stackdown -> TLSADDR -> adjust_stackup.
34949   // We need this because TLSADDR is lowered into calls
34950   // inside MC, therefore without the two markers shrink-wrapping
34951   // may push the prologue/epilogue pass them.
34952   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
34953   const MIMetadata MIMD(MI);
34954   MachineFunction &MF = *BB->getParent();
34955 
34956   // Emit CALLSEQ_START right before the instruction.
34957   unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
34958   MachineInstrBuilder CallseqStart =
34959       BuildMI(MF, MIMD, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
34960   BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
34961 
34962   // Emit CALLSEQ_END right after the instruction.
34963   // We don't call erase from parent because we want to keep the
34964   // original instruction around.
34965   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
34966   MachineInstrBuilder CallseqEnd =
34967       BuildMI(MF, MIMD, TII.get(AdjStackUp)).addImm(0).addImm(0);
34968   BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
34969 
34970   return BB;
34971 }
34972 
34973 MachineBasicBlock *
34974 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
34975                                       MachineBasicBlock *BB) const {
34976   // This is pretty easy.  We're taking the value that we received from
34977   // our load from the relocation, sticking it in either RDI (x86-64)
34978   // or EAX and doing an indirect call.  The return value will then
34979   // be in the normal return register.
34980   MachineFunction *F = BB->getParent();
34981   const X86InstrInfo *TII = Subtarget.getInstrInfo();
34982   const MIMetadata MIMD(MI);
34983 
34984   assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
34985   assert(MI.getOperand(3).isGlobal() && "This should be a global");
34986 
34987   // Get a register mask for the lowered call.
34988   // FIXME: The 32-bit calls have non-standard calling conventions. Use a
34989   // proper register mask.
34990   const uint32_t *RegMask =
34991       Subtarget.is64Bit() ?
34992       Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
34993       Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
34994   if (Subtarget.is64Bit()) {
34995     MachineInstrBuilder MIB =
34996         BuildMI(*BB, MI, MIMD, TII->get(X86::MOV64rm), X86::RDI)
34997             .addReg(X86::RIP)
34998             .addImm(0)
34999             .addReg(0)
35000             .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
35001                               MI.getOperand(3).getTargetFlags())
35002             .addReg(0);
35003     MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL64m));
35004     addDirectMem(MIB, X86::RDI);
35005     MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
35006   } else if (!isPositionIndependent()) {
35007     MachineInstrBuilder MIB =
35008         BuildMI(*BB, MI, MIMD, TII->get(X86::MOV32rm), X86::EAX)
35009             .addReg(0)
35010             .addImm(0)
35011             .addReg(0)
35012             .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
35013                               MI.getOperand(3).getTargetFlags())
35014             .addReg(0);
35015     MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL32m));
35016     addDirectMem(MIB, X86::EAX);
35017     MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
35018   } else {
35019     MachineInstrBuilder MIB =
35020         BuildMI(*BB, MI, MIMD, TII->get(X86::MOV32rm), X86::EAX)
35021             .addReg(TII->getGlobalBaseReg(F))
35022             .addImm(0)
35023             .addReg(0)
35024             .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
35025                               MI.getOperand(3).getTargetFlags())
35026             .addReg(0);
35027     MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL32m));
35028     addDirectMem(MIB, X86::EAX);
35029     MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
35030   }
35031 
35032   MI.eraseFromParent(); // The pseudo instruction is gone now.
35033   return BB;
35034 }
35035 
35036 static unsigned getOpcodeForIndirectThunk(unsigned RPOpc) {
35037   switch (RPOpc) {
35038   case X86::INDIRECT_THUNK_CALL32:
35039     return X86::CALLpcrel32;
35040   case X86::INDIRECT_THUNK_CALL64:
35041     return X86::CALL64pcrel32;
35042   case X86::INDIRECT_THUNK_TCRETURN32:
35043     return X86::TCRETURNdi;
35044   case X86::INDIRECT_THUNK_TCRETURN64:
35045     return X86::TCRETURNdi64;
35046   }
35047   llvm_unreachable("not indirect thunk opcode");
35048 }
35049 
35050 static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
35051                                           unsigned Reg) {
35052   if (Subtarget.useRetpolineExternalThunk()) {
35053     // When using an external thunk for retpolines, we pick names that match the
35054     // names GCC happens to use as well. This helps simplify the implementation
35055     // of the thunks for kernels where they have no easy ability to create
35056     // aliases and are doing non-trivial configuration of the thunk's body. For
35057     // example, the Linux kernel will do boot-time hot patching of the thunk
35058     // bodies and cannot easily export aliases of these to loaded modules.
35059     //
35060     // Note that at any point in the future, we may need to change the semantics
35061     // of how we implement retpolines and at that time will likely change the
35062     // name of the called thunk. Essentially, there is no hard guarantee that
35063     // LLVM will generate calls to specific thunks, we merely make a best-effort
35064     // attempt to help out kernels and other systems where duplicating the
35065     // thunks is costly.
35066     switch (Reg) {
35067     case X86::EAX:
35068       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35069       return "__x86_indirect_thunk_eax";
35070     case X86::ECX:
35071       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35072       return "__x86_indirect_thunk_ecx";
35073     case X86::EDX:
35074       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35075       return "__x86_indirect_thunk_edx";
35076     case X86::EDI:
35077       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35078       return "__x86_indirect_thunk_edi";
35079     case X86::R11:
35080       assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
35081       return "__x86_indirect_thunk_r11";
35082     }
35083     llvm_unreachable("unexpected reg for external indirect thunk");
35084   }
35085 
35086   if (Subtarget.useRetpolineIndirectCalls() ||
35087       Subtarget.useRetpolineIndirectBranches()) {
35088     // When targeting an internal COMDAT thunk use an LLVM-specific name.
35089     switch (Reg) {
35090     case X86::EAX:
35091       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35092       return "__llvm_retpoline_eax";
35093     case X86::ECX:
35094       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35095       return "__llvm_retpoline_ecx";
35096     case X86::EDX:
35097       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35098       return "__llvm_retpoline_edx";
35099     case X86::EDI:
35100       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35101       return "__llvm_retpoline_edi";
35102     case X86::R11:
35103       assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
35104       return "__llvm_retpoline_r11";
35105     }
35106     llvm_unreachable("unexpected reg for retpoline");
35107   }
35108 
35109   if (Subtarget.useLVIControlFlowIntegrity()) {
35110     assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
35111     return "__llvm_lvi_thunk_r11";
35112   }
35113   llvm_unreachable("getIndirectThunkSymbol() invoked without thunk feature");
35114 }
35115 
35116 MachineBasicBlock *
35117 X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
35118                                             MachineBasicBlock *BB) const {
35119   // Copy the virtual register into the R11 physical register and
35120   // call the retpoline thunk.
35121   const MIMetadata MIMD(MI);
35122   const X86InstrInfo *TII = Subtarget.getInstrInfo();
35123   Register CalleeVReg = MI.getOperand(0).getReg();
35124   unsigned Opc = getOpcodeForIndirectThunk(MI.getOpcode());
35125 
35126   // Find an available scratch register to hold the callee. On 64-bit, we can
35127   // just use R11, but we scan for uses anyway to ensure we don't generate
35128   // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
35129   // already a register use operand to the call to hold the callee. If none
35130   // are available, use EDI instead. EDI is chosen because EBX is the PIC base
35131   // register and ESI is the base pointer to realigned stack frames with VLAs.
35132   SmallVector<unsigned, 3> AvailableRegs;
35133   if (Subtarget.is64Bit())
35134     AvailableRegs.push_back(X86::R11);
35135   else
35136     AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
35137 
35138   // Zero out any registers that are already used.
35139   for (const auto &MO : MI.operands()) {
35140     if (MO.isReg() && MO.isUse())
35141       for (unsigned &Reg : AvailableRegs)
35142         if (Reg == MO.getReg())
35143           Reg = 0;
35144   }
35145 
35146   // Choose the first remaining non-zero available register.
35147   unsigned AvailableReg = 0;
35148   for (unsigned MaybeReg : AvailableRegs) {
35149     if (MaybeReg) {
35150       AvailableReg = MaybeReg;
35151       break;
35152     }
35153   }
35154   if (!AvailableReg)
35155     report_fatal_error("calling convention incompatible with retpoline, no "
35156                        "available registers");
35157 
35158   const char *Symbol = getIndirectThunkSymbol(Subtarget, AvailableReg);
35159 
35160   BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), AvailableReg)
35161       .addReg(CalleeVReg);
35162   MI.getOperand(0).ChangeToES(Symbol);
35163   MI.setDesc(TII->get(Opc));
35164   MachineInstrBuilder(*BB->getParent(), &MI)
35165       .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
35166   return BB;
35167 }
35168 
35169 /// SetJmp implies future control flow change upon calling the corresponding
35170 /// LongJmp.
35171 /// Instead of using the 'return' instruction, the long jump fixes the stack and
35172 /// performs an indirect branch. To do so it uses the registers that were stored
35173 /// in the jump buffer (when calling SetJmp).
35174 /// In case the shadow stack is enabled we need to fix it as well, because some
35175 /// return addresses will be skipped.
35176 /// The function will save the SSP for future fixing in the function
35177 /// emitLongJmpShadowStackFix.
35178 /// \sa emitLongJmpShadowStackFix
35179 /// \param [in] MI The temporary Machine Instruction for the builtin.
35180 /// \param [in] MBB The Machine Basic Block that will be modified.
35181 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
35182                                                  MachineBasicBlock *MBB) const {
35183   const MIMetadata MIMD(MI);
35184   MachineFunction *MF = MBB->getParent();
35185   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35186   MachineRegisterInfo &MRI = MF->getRegInfo();
35187   MachineInstrBuilder MIB;
35188 
35189   // Memory Reference.
35190   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35191                                            MI.memoperands_end());
35192 
35193   // Initialize a register with zero.
35194   MVT PVT = getPointerTy(MF->getDataLayout());
35195   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
35196   Register ZReg = MRI.createVirtualRegister(PtrRC);
35197   unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
35198   BuildMI(*MBB, MI, MIMD, TII->get(XorRROpc))
35199       .addDef(ZReg)
35200       .addReg(ZReg, RegState::Undef)
35201       .addReg(ZReg, RegState::Undef);
35202 
35203   // Read the current SSP Register value to the zeroed register.
35204   Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
35205   unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
35206   BuildMI(*MBB, MI, MIMD, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
35207 
35208   // Write the SSP register value to offset 3 in input memory buffer.
35209   unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35210   MIB = BuildMI(*MBB, MI, MIMD, TII->get(PtrStoreOpc));
35211   const int64_t SSPOffset = 3 * PVT.getStoreSize();
35212   const unsigned MemOpndSlot = 1;
35213   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35214     if (i == X86::AddrDisp)
35215       MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
35216     else
35217       MIB.add(MI.getOperand(MemOpndSlot + i));
35218   }
35219   MIB.addReg(SSPCopyReg);
35220   MIB.setMemRefs(MMOs);
35221 }
35222 
35223 MachineBasicBlock *
35224 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
35225                                     MachineBasicBlock *MBB) const {
35226   const MIMetadata MIMD(MI);
35227   MachineFunction *MF = MBB->getParent();
35228   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35229   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
35230   MachineRegisterInfo &MRI = MF->getRegInfo();
35231 
35232   const BasicBlock *BB = MBB->getBasicBlock();
35233   MachineFunction::iterator I = ++MBB->getIterator();
35234 
35235   // Memory Reference
35236   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35237                                            MI.memoperands_end());
35238 
35239   unsigned DstReg;
35240   unsigned MemOpndSlot = 0;
35241 
35242   unsigned CurOp = 0;
35243 
35244   DstReg = MI.getOperand(CurOp++).getReg();
35245   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
35246   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
35247   (void)TRI;
35248   Register mainDstReg = MRI.createVirtualRegister(RC);
35249   Register restoreDstReg = MRI.createVirtualRegister(RC);
35250 
35251   MemOpndSlot = CurOp;
35252 
35253   MVT PVT = getPointerTy(MF->getDataLayout());
35254   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
35255          "Invalid Pointer Size!");
35256 
35257   // For v = setjmp(buf), we generate
35258   //
35259   // thisMBB:
35260   //  buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
35261   //  SjLjSetup restoreMBB
35262   //
35263   // mainMBB:
35264   //  v_main = 0
35265   //
35266   // sinkMBB:
35267   //  v = phi(main, restore)
35268   //
35269   // restoreMBB:
35270   //  if base pointer being used, load it from frame
35271   //  v_restore = 1
35272 
35273   MachineBasicBlock *thisMBB = MBB;
35274   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
35275   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
35276   MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
35277   MF->insert(I, mainMBB);
35278   MF->insert(I, sinkMBB);
35279   MF->push_back(restoreMBB);
35280   restoreMBB->setMachineBlockAddressTaken();
35281 
35282   MachineInstrBuilder MIB;
35283 
35284   // Transfer the remainder of BB and its successor edges to sinkMBB.
35285   sinkMBB->splice(sinkMBB->begin(), MBB,
35286                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
35287   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
35288 
35289   // thisMBB:
35290   unsigned PtrStoreOpc = 0;
35291   unsigned LabelReg = 0;
35292   const int64_t LabelOffset = 1 * PVT.getStoreSize();
35293   bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
35294                      !isPositionIndependent();
35295 
35296   // Prepare IP either in reg or imm.
35297   if (!UseImmLabel) {
35298     PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35299     const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
35300     LabelReg = MRI.createVirtualRegister(PtrRC);
35301     if (Subtarget.is64Bit()) {
35302       MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::LEA64r), LabelReg)
35303               .addReg(X86::RIP)
35304               .addImm(0)
35305               .addReg(0)
35306               .addMBB(restoreMBB)
35307               .addReg(0);
35308     } else {
35309       const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
35310       MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::LEA32r), LabelReg)
35311               .addReg(XII->getGlobalBaseReg(MF))
35312               .addImm(0)
35313               .addReg(0)
35314               .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
35315               .addReg(0);
35316     }
35317   } else
35318     PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
35319   // Store IP
35320   MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrStoreOpc));
35321   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35322     if (i == X86::AddrDisp)
35323       MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
35324     else
35325       MIB.add(MI.getOperand(MemOpndSlot + i));
35326   }
35327   if (!UseImmLabel)
35328     MIB.addReg(LabelReg);
35329   else
35330     MIB.addMBB(restoreMBB);
35331   MIB.setMemRefs(MMOs);
35332 
35333   if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
35334     emitSetJmpShadowStackFix(MI, thisMBB);
35335   }
35336 
35337   // Setup
35338   MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::EH_SjLj_Setup))
35339           .addMBB(restoreMBB);
35340 
35341   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
35342   MIB.addRegMask(RegInfo->getNoPreservedMask());
35343   thisMBB->addSuccessor(mainMBB);
35344   thisMBB->addSuccessor(restoreMBB);
35345 
35346   // mainMBB:
35347   //  EAX = 0
35348   BuildMI(mainMBB, MIMD, TII->get(X86::MOV32r0), mainDstReg);
35349   mainMBB->addSuccessor(sinkMBB);
35350 
35351   // sinkMBB:
35352   BuildMI(*sinkMBB, sinkMBB->begin(), MIMD, TII->get(X86::PHI), DstReg)
35353       .addReg(mainDstReg)
35354       .addMBB(mainMBB)
35355       .addReg(restoreDstReg)
35356       .addMBB(restoreMBB);
35357 
35358   // restoreMBB:
35359   if (RegInfo->hasBasePointer(*MF)) {
35360     const bool Uses64BitFramePtr =
35361         Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
35362     X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
35363     X86FI->setRestoreBasePointer(MF);
35364     Register FramePtr = RegInfo->getFrameRegister(*MF);
35365     Register BasePtr = RegInfo->getBaseRegister();
35366     unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
35367     addRegOffset(BuildMI(restoreMBB, MIMD, TII->get(Opm), BasePtr),
35368                  FramePtr, true, X86FI->getRestoreBasePointerOffset())
35369       .setMIFlag(MachineInstr::FrameSetup);
35370   }
35371   BuildMI(restoreMBB, MIMD, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
35372   BuildMI(restoreMBB, MIMD, TII->get(X86::JMP_1)).addMBB(sinkMBB);
35373   restoreMBB->addSuccessor(sinkMBB);
35374 
35375   MI.eraseFromParent();
35376   return sinkMBB;
35377 }
35378 
35379 /// Fix the shadow stack using the previously saved SSP pointer.
35380 /// \sa emitSetJmpShadowStackFix
35381 /// \param [in] MI The temporary Machine Instruction for the builtin.
35382 /// \param [in] MBB The Machine Basic Block that will be modified.
35383 /// \return The sink MBB that will perform the future indirect branch.
35384 MachineBasicBlock *
35385 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
35386                                              MachineBasicBlock *MBB) const {
35387   const MIMetadata MIMD(MI);
35388   MachineFunction *MF = MBB->getParent();
35389   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35390   MachineRegisterInfo &MRI = MF->getRegInfo();
35391 
35392   // Memory Reference
35393   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35394                                            MI.memoperands_end());
35395 
35396   MVT PVT = getPointerTy(MF->getDataLayout());
35397   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
35398 
35399   // checkSspMBB:
35400   //         xor vreg1, vreg1
35401   //         rdssp vreg1
35402   //         test vreg1, vreg1
35403   //         je sinkMBB   # Jump if Shadow Stack is not supported
35404   // fallMBB:
35405   //         mov buf+24/12(%rip), vreg2
35406   //         sub vreg1, vreg2
35407   //         jbe sinkMBB  # No need to fix the Shadow Stack
35408   // fixShadowMBB:
35409   //         shr 3/2, vreg2
35410   //         incssp vreg2  # fix the SSP according to the lower 8 bits
35411   //         shr 8, vreg2
35412   //         je sinkMBB
35413   // fixShadowLoopPrepareMBB:
35414   //         shl vreg2
35415   //         mov 128, vreg3
35416   // fixShadowLoopMBB:
35417   //         incssp vreg3
35418   //         dec vreg2
35419   //         jne fixShadowLoopMBB # Iterate until you finish fixing
35420   //                              # the Shadow Stack
35421   // sinkMBB:
35422 
35423   MachineFunction::iterator I = ++MBB->getIterator();
35424   const BasicBlock *BB = MBB->getBasicBlock();
35425 
35426   MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
35427   MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
35428   MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
35429   MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
35430   MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
35431   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
35432   MF->insert(I, checkSspMBB);
35433   MF->insert(I, fallMBB);
35434   MF->insert(I, fixShadowMBB);
35435   MF->insert(I, fixShadowLoopPrepareMBB);
35436   MF->insert(I, fixShadowLoopMBB);
35437   MF->insert(I, sinkMBB);
35438 
35439   // Transfer the remainder of BB and its successor edges to sinkMBB.
35440   sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
35441                   MBB->end());
35442   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
35443 
35444   MBB->addSuccessor(checkSspMBB);
35445 
35446   // Initialize a register with zero.
35447   Register ZReg = MRI.createVirtualRegister(&X86::GR32RegClass);
35448   BuildMI(checkSspMBB, MIMD, TII->get(X86::MOV32r0), ZReg);
35449 
35450   if (PVT == MVT::i64) {
35451     Register TmpZReg = MRI.createVirtualRegister(PtrRC);
35452     BuildMI(checkSspMBB, MIMD, TII->get(X86::SUBREG_TO_REG), TmpZReg)
35453       .addImm(0)
35454       .addReg(ZReg)
35455       .addImm(X86::sub_32bit);
35456     ZReg = TmpZReg;
35457   }
35458 
35459   // Read the current SSP Register value to the zeroed register.
35460   Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
35461   unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
35462   BuildMI(checkSspMBB, MIMD, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
35463 
35464   // Check whether the result of the SSP register is zero and jump directly
35465   // to the sink.
35466   unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
35467   BuildMI(checkSspMBB, MIMD, TII->get(TestRROpc))
35468       .addReg(SSPCopyReg)
35469       .addReg(SSPCopyReg);
35470   BuildMI(checkSspMBB, MIMD, TII->get(X86::JCC_1))
35471       .addMBB(sinkMBB)
35472       .addImm(X86::COND_E);
35473   checkSspMBB->addSuccessor(sinkMBB);
35474   checkSspMBB->addSuccessor(fallMBB);
35475 
35476   // Reload the previously saved SSP register value.
35477   Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
35478   unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
35479   const int64_t SPPOffset = 3 * PVT.getStoreSize();
35480   MachineInstrBuilder MIB =
35481       BuildMI(fallMBB, MIMD, TII->get(PtrLoadOpc), PrevSSPReg);
35482   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35483     const MachineOperand &MO = MI.getOperand(i);
35484     if (i == X86::AddrDisp)
35485       MIB.addDisp(MO, SPPOffset);
35486     else if (MO.isReg()) // Don't add the whole operand, we don't want to
35487                          // preserve kill flags.
35488       MIB.addReg(MO.getReg());
35489     else
35490       MIB.add(MO);
35491   }
35492   MIB.setMemRefs(MMOs);
35493 
35494   // Subtract the current SSP from the previous SSP.
35495   Register SspSubReg = MRI.createVirtualRegister(PtrRC);
35496   unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
35497   BuildMI(fallMBB, MIMD, TII->get(SubRROpc), SspSubReg)
35498       .addReg(PrevSSPReg)
35499       .addReg(SSPCopyReg);
35500 
35501   // Jump to sink in case PrevSSPReg <= SSPCopyReg.
35502   BuildMI(fallMBB, MIMD, TII->get(X86::JCC_1))
35503       .addMBB(sinkMBB)
35504       .addImm(X86::COND_BE);
35505   fallMBB->addSuccessor(sinkMBB);
35506   fallMBB->addSuccessor(fixShadowMBB);
35507 
35508   // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
35509   unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
35510   unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
35511   Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
35512   BuildMI(fixShadowMBB, MIMD, TII->get(ShrRIOpc), SspFirstShrReg)
35513       .addReg(SspSubReg)
35514       .addImm(Offset);
35515 
35516   // Increase SSP when looking only on the lower 8 bits of the delta.
35517   unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
35518   BuildMI(fixShadowMBB, MIMD, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
35519 
35520   // Reset the lower 8 bits.
35521   Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
35522   BuildMI(fixShadowMBB, MIMD, TII->get(ShrRIOpc), SspSecondShrReg)
35523       .addReg(SspFirstShrReg)
35524       .addImm(8);
35525 
35526   // Jump if the result of the shift is zero.
35527   BuildMI(fixShadowMBB, MIMD, TII->get(X86::JCC_1))
35528       .addMBB(sinkMBB)
35529       .addImm(X86::COND_E);
35530   fixShadowMBB->addSuccessor(sinkMBB);
35531   fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
35532 
35533   // Do a single shift left.
35534   unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64ri : X86::SHL32ri;
35535   Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
35536   BuildMI(fixShadowLoopPrepareMBB, MIMD, TII->get(ShlR1Opc), SspAfterShlReg)
35537       .addReg(SspSecondShrReg)
35538       .addImm(1);
35539 
35540   // Save the value 128 to a register (will be used next with incssp).
35541   Register Value128InReg = MRI.createVirtualRegister(PtrRC);
35542   unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
35543   BuildMI(fixShadowLoopPrepareMBB, MIMD, TII->get(MovRIOpc), Value128InReg)
35544       .addImm(128);
35545   fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
35546 
35547   // Since incssp only looks at the lower 8 bits, we might need to do several
35548   // iterations of incssp until we finish fixing the shadow stack.
35549   Register DecReg = MRI.createVirtualRegister(PtrRC);
35550   Register CounterReg = MRI.createVirtualRegister(PtrRC);
35551   BuildMI(fixShadowLoopMBB, MIMD, TII->get(X86::PHI), CounterReg)
35552       .addReg(SspAfterShlReg)
35553       .addMBB(fixShadowLoopPrepareMBB)
35554       .addReg(DecReg)
35555       .addMBB(fixShadowLoopMBB);
35556 
35557   // Every iteration we increase the SSP by 128.
35558   BuildMI(fixShadowLoopMBB, MIMD, TII->get(IncsspOpc)).addReg(Value128InReg);
35559 
35560   // Every iteration we decrement the counter by 1.
35561   unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
35562   BuildMI(fixShadowLoopMBB, MIMD, TII->get(DecROpc), DecReg).addReg(CounterReg);
35563 
35564   // Jump if the counter is not zero yet.
35565   BuildMI(fixShadowLoopMBB, MIMD, TII->get(X86::JCC_1))
35566       .addMBB(fixShadowLoopMBB)
35567       .addImm(X86::COND_NE);
35568   fixShadowLoopMBB->addSuccessor(sinkMBB);
35569   fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
35570 
35571   return sinkMBB;
35572 }
35573 
35574 MachineBasicBlock *
35575 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
35576                                      MachineBasicBlock *MBB) const {
35577   const MIMetadata MIMD(MI);
35578   MachineFunction *MF = MBB->getParent();
35579   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35580   MachineRegisterInfo &MRI = MF->getRegInfo();
35581 
35582   // Memory Reference
35583   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35584                                            MI.memoperands_end());
35585 
35586   MVT PVT = getPointerTy(MF->getDataLayout());
35587   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
35588          "Invalid Pointer Size!");
35589 
35590   const TargetRegisterClass *RC =
35591     (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
35592   Register Tmp = MRI.createVirtualRegister(RC);
35593   // Since FP is only updated here but NOT referenced, it's treated as GPR.
35594   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
35595   Register FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
35596   Register SP = RegInfo->getStackRegister();
35597 
35598   MachineInstrBuilder MIB;
35599 
35600   const int64_t LabelOffset = 1 * PVT.getStoreSize();
35601   const int64_t SPOffset = 2 * PVT.getStoreSize();
35602 
35603   unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
35604   unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
35605 
35606   MachineBasicBlock *thisMBB = MBB;
35607 
35608   // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
35609   if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
35610     thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
35611   }
35612 
35613   // Reload FP
35614   MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrLoadOpc), FP);
35615   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35616     const MachineOperand &MO = MI.getOperand(i);
35617     if (MO.isReg()) // Don't add the whole operand, we don't want to
35618                     // preserve kill flags.
35619       MIB.addReg(MO.getReg());
35620     else
35621       MIB.add(MO);
35622   }
35623   MIB.setMemRefs(MMOs);
35624 
35625   // Reload IP
35626   MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrLoadOpc), Tmp);
35627   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35628     const MachineOperand &MO = MI.getOperand(i);
35629     if (i == X86::AddrDisp)
35630       MIB.addDisp(MO, LabelOffset);
35631     else if (MO.isReg()) // Don't add the whole operand, we don't want to
35632                          // preserve kill flags.
35633       MIB.addReg(MO.getReg());
35634     else
35635       MIB.add(MO);
35636   }
35637   MIB.setMemRefs(MMOs);
35638 
35639   // Reload SP
35640   MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrLoadOpc), SP);
35641   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35642     if (i == X86::AddrDisp)
35643       MIB.addDisp(MI.getOperand(i), SPOffset);
35644     else
35645       MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
35646                                  // the last instruction of the expansion.
35647   }
35648   MIB.setMemRefs(MMOs);
35649 
35650   // Jump
35651   BuildMI(*thisMBB, MI, MIMD, TII->get(IJmpOpc)).addReg(Tmp);
35652 
35653   MI.eraseFromParent();
35654   return thisMBB;
35655 }
35656 
35657 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
35658                                                MachineBasicBlock *MBB,
35659                                                MachineBasicBlock *DispatchBB,
35660                                                int FI) const {
35661   const MIMetadata MIMD(MI);
35662   MachineFunction *MF = MBB->getParent();
35663   MachineRegisterInfo *MRI = &MF->getRegInfo();
35664   const X86InstrInfo *TII = Subtarget.getInstrInfo();
35665 
35666   MVT PVT = getPointerTy(MF->getDataLayout());
35667   assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
35668 
35669   unsigned Op = 0;
35670   unsigned VR = 0;
35671 
35672   bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
35673                      !isPositionIndependent();
35674 
35675   if (UseImmLabel) {
35676     Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
35677   } else {
35678     const TargetRegisterClass *TRC =
35679         (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
35680     VR = MRI->createVirtualRegister(TRC);
35681     Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35682 
35683     if (Subtarget.is64Bit())
35684       BuildMI(*MBB, MI, MIMD, TII->get(X86::LEA64r), VR)
35685           .addReg(X86::RIP)
35686           .addImm(1)
35687           .addReg(0)
35688           .addMBB(DispatchBB)
35689           .addReg(0);
35690     else
35691       BuildMI(*MBB, MI, MIMD, TII->get(X86::LEA32r), VR)
35692           .addReg(0) /* TII->getGlobalBaseReg(MF) */
35693           .addImm(1)
35694           .addReg(0)
35695           .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
35696           .addReg(0);
35697   }
35698 
35699   MachineInstrBuilder MIB = BuildMI(*MBB, MI, MIMD, TII->get(Op));
35700   addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
35701   if (UseImmLabel)
35702     MIB.addMBB(DispatchBB);
35703   else
35704     MIB.addReg(VR);
35705 }
35706 
35707 MachineBasicBlock *
35708 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
35709                                          MachineBasicBlock *BB) const {
35710   const MIMetadata MIMD(MI);
35711   MachineFunction *MF = BB->getParent();
35712   MachineRegisterInfo *MRI = &MF->getRegInfo();
35713   const X86InstrInfo *TII = Subtarget.getInstrInfo();
35714   int FI = MF->getFrameInfo().getFunctionContextIndex();
35715 
35716   // Get a mapping of the call site numbers to all of the landing pads they're
35717   // associated with.
35718   DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
35719   unsigned MaxCSNum = 0;
35720   for (auto &MBB : *MF) {
35721     if (!MBB.isEHPad())
35722       continue;
35723 
35724     MCSymbol *Sym = nullptr;
35725     for (const auto &MI : MBB) {
35726       if (MI.isDebugInstr())
35727         continue;
35728 
35729       assert(MI.isEHLabel() && "expected EH_LABEL");
35730       Sym = MI.getOperand(0).getMCSymbol();
35731       break;
35732     }
35733 
35734     if (!MF->hasCallSiteLandingPad(Sym))
35735       continue;
35736 
35737     for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
35738       CallSiteNumToLPad[CSI].push_back(&MBB);
35739       MaxCSNum = std::max(MaxCSNum, CSI);
35740     }
35741   }
35742 
35743   // Get an ordered list of the machine basic blocks for the jump table.
35744   std::vector<MachineBasicBlock *> LPadList;
35745   SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
35746   LPadList.reserve(CallSiteNumToLPad.size());
35747 
35748   for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
35749     for (auto &LP : CallSiteNumToLPad[CSI]) {
35750       LPadList.push_back(LP);
35751       InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
35752     }
35753   }
35754 
35755   assert(!LPadList.empty() &&
35756          "No landing pad destinations for the dispatch jump table!");
35757 
35758   // Create the MBBs for the dispatch code.
35759 
35760   // Shove the dispatch's address into the return slot in the function context.
35761   MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
35762   DispatchBB->setIsEHPad(true);
35763 
35764   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
35765   BuildMI(TrapBB, MIMD, TII->get(X86::TRAP));
35766   DispatchBB->addSuccessor(TrapBB);
35767 
35768   MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
35769   DispatchBB->addSuccessor(DispContBB);
35770 
35771   // Insert MBBs.
35772   MF->push_back(DispatchBB);
35773   MF->push_back(DispContBB);
35774   MF->push_back(TrapBB);
35775 
35776   // Insert code into the entry block that creates and registers the function
35777   // context.
35778   SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
35779 
35780   // Create the jump table and associated information
35781   unsigned JTE = getJumpTableEncoding();
35782   MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
35783   unsigned MJTI = JTI->createJumpTableIndex(LPadList);
35784 
35785   const X86RegisterInfo &RI = TII->getRegisterInfo();
35786   // Add a register mask with no preserved registers.  This results in all
35787   // registers being marked as clobbered.
35788   if (RI.hasBasePointer(*MF)) {
35789     const bool FPIs64Bit =
35790         Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
35791     X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
35792     MFI->setRestoreBasePointer(MF);
35793 
35794     Register FP = RI.getFrameRegister(*MF);
35795     Register BP = RI.getBaseRegister();
35796     unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
35797     addRegOffset(BuildMI(DispatchBB, MIMD, TII->get(Op), BP), FP, true,
35798                  MFI->getRestoreBasePointerOffset())
35799         .addRegMask(RI.getNoPreservedMask());
35800   } else {
35801     BuildMI(DispatchBB, MIMD, TII->get(X86::NOOP))
35802         .addRegMask(RI.getNoPreservedMask());
35803   }
35804 
35805   // IReg is used as an index in a memory operand and therefore can't be SP
35806   Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
35807   addFrameReference(BuildMI(DispatchBB, MIMD, TII->get(X86::MOV32rm), IReg), FI,
35808                     Subtarget.is64Bit() ? 8 : 4);
35809   BuildMI(DispatchBB, MIMD, TII->get(X86::CMP32ri))
35810       .addReg(IReg)
35811       .addImm(LPadList.size());
35812   BuildMI(DispatchBB, MIMD, TII->get(X86::JCC_1))
35813       .addMBB(TrapBB)
35814       .addImm(X86::COND_AE);
35815 
35816   if (Subtarget.is64Bit()) {
35817     Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
35818     Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
35819 
35820     // leaq .LJTI0_0(%rip), BReg
35821     BuildMI(DispContBB, MIMD, TII->get(X86::LEA64r), BReg)
35822         .addReg(X86::RIP)
35823         .addImm(1)
35824         .addReg(0)
35825         .addJumpTableIndex(MJTI)
35826         .addReg(0);
35827     // movzx IReg64, IReg
35828     BuildMI(DispContBB, MIMD, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
35829         .addImm(0)
35830         .addReg(IReg)
35831         .addImm(X86::sub_32bit);
35832 
35833     switch (JTE) {
35834     case MachineJumpTableInfo::EK_BlockAddress:
35835       // jmpq *(BReg,IReg64,8)
35836       BuildMI(DispContBB, MIMD, TII->get(X86::JMP64m))
35837           .addReg(BReg)
35838           .addImm(8)
35839           .addReg(IReg64)
35840           .addImm(0)
35841           .addReg(0);
35842       break;
35843     case MachineJumpTableInfo::EK_LabelDifference32: {
35844       Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
35845       Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
35846       Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
35847 
35848       // movl (BReg,IReg64,4), OReg
35849       BuildMI(DispContBB, MIMD, TII->get(X86::MOV32rm), OReg)
35850           .addReg(BReg)
35851           .addImm(4)
35852           .addReg(IReg64)
35853           .addImm(0)
35854           .addReg(0);
35855       // movsx OReg64, OReg
35856       BuildMI(DispContBB, MIMD, TII->get(X86::MOVSX64rr32), OReg64)
35857           .addReg(OReg);
35858       // addq BReg, OReg64, TReg
35859       BuildMI(DispContBB, MIMD, TII->get(X86::ADD64rr), TReg)
35860           .addReg(OReg64)
35861           .addReg(BReg);
35862       // jmpq *TReg
35863       BuildMI(DispContBB, MIMD, TII->get(X86::JMP64r)).addReg(TReg);
35864       break;
35865     }
35866     default:
35867       llvm_unreachable("Unexpected jump table encoding");
35868     }
35869   } else {
35870     // jmpl *.LJTI0_0(,IReg,4)
35871     BuildMI(DispContBB, MIMD, TII->get(X86::JMP32m))
35872         .addReg(0)
35873         .addImm(4)
35874         .addReg(IReg)
35875         .addJumpTableIndex(MJTI)
35876         .addReg(0);
35877   }
35878 
35879   // Add the jump table entries as successors to the MBB.
35880   SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
35881   for (auto &LP : LPadList)
35882     if (SeenMBBs.insert(LP).second)
35883       DispContBB->addSuccessor(LP);
35884 
35885   // N.B. the order the invoke BBs are processed in doesn't matter here.
35886   SmallVector<MachineBasicBlock *, 64> MBBLPads;
35887   const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
35888   for (MachineBasicBlock *MBB : InvokeBBs) {
35889     // Remove the landing pad successor from the invoke block and replace it
35890     // with the new dispatch block.
35891     // Keep a copy of Successors since it's modified inside the loop.
35892     SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
35893                                                    MBB->succ_rend());
35894     // FIXME: Avoid quadratic complexity.
35895     for (auto *MBBS : Successors) {
35896       if (MBBS->isEHPad()) {
35897         MBB->removeSuccessor(MBBS);
35898         MBBLPads.push_back(MBBS);
35899       }
35900     }
35901 
35902     MBB->addSuccessor(DispatchBB);
35903 
35904     // Find the invoke call and mark all of the callee-saved registers as
35905     // 'implicit defined' so that they're spilled.  This prevents code from
35906     // moving instructions to before the EH block, where they will never be
35907     // executed.
35908     for (auto &II : reverse(*MBB)) {
35909       if (!II.isCall())
35910         continue;
35911 
35912       DenseMap<unsigned, bool> DefRegs;
35913       for (auto &MOp : II.operands())
35914         if (MOp.isReg())
35915           DefRegs[MOp.getReg()] = true;
35916 
35917       MachineInstrBuilder MIB(*MF, &II);
35918       for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
35919         unsigned Reg = SavedRegs[RegIdx];
35920         if (!DefRegs[Reg])
35921           MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
35922       }
35923 
35924       break;
35925     }
35926   }
35927 
35928   // Mark all former landing pads as non-landing pads.  The dispatch is the only
35929   // landing pad now.
35930   for (auto &LP : MBBLPads)
35931     LP->setIsEHPad(false);
35932 
35933   // The instruction is gone now.
35934   MI.eraseFromParent();
35935   return BB;
35936 }
35937 
35938 MachineBasicBlock *
35939 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
35940                                                MachineBasicBlock *BB) const {
35941   MachineFunction *MF = BB->getParent();
35942   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35943   const MIMetadata MIMD(MI);
35944 
35945   auto TMMImmToTMMReg = [](unsigned Imm) {
35946     assert (Imm < 8 && "Illegal tmm index");
35947     return X86::TMM0 + Imm;
35948   };
35949   switch (MI.getOpcode()) {
35950   default: llvm_unreachable("Unexpected instr type to insert");
35951   case X86::TLS_addr32:
35952   case X86::TLS_addr64:
35953   case X86::TLS_addrX32:
35954   case X86::TLS_base_addr32:
35955   case X86::TLS_base_addr64:
35956   case X86::TLS_base_addrX32:
35957     return EmitLoweredTLSAddr(MI, BB);
35958   case X86::INDIRECT_THUNK_CALL32:
35959   case X86::INDIRECT_THUNK_CALL64:
35960   case X86::INDIRECT_THUNK_TCRETURN32:
35961   case X86::INDIRECT_THUNK_TCRETURN64:
35962     return EmitLoweredIndirectThunk(MI, BB);
35963   case X86::CATCHRET:
35964     return EmitLoweredCatchRet(MI, BB);
35965   case X86::SEG_ALLOCA_32:
35966   case X86::SEG_ALLOCA_64:
35967     return EmitLoweredSegAlloca(MI, BB);
35968   case X86::PROBED_ALLOCA_32:
35969   case X86::PROBED_ALLOCA_64:
35970     return EmitLoweredProbedAlloca(MI, BB);
35971   case X86::TLSCall_32:
35972   case X86::TLSCall_64:
35973     return EmitLoweredTLSCall(MI, BB);
35974   case X86::CMOV_FR16:
35975   case X86::CMOV_FR16X:
35976   case X86::CMOV_FR32:
35977   case X86::CMOV_FR32X:
35978   case X86::CMOV_FR64:
35979   case X86::CMOV_FR64X:
35980   case X86::CMOV_GR8:
35981   case X86::CMOV_GR16:
35982   case X86::CMOV_GR32:
35983   case X86::CMOV_RFP32:
35984   case X86::CMOV_RFP64:
35985   case X86::CMOV_RFP80:
35986   case X86::CMOV_VR64:
35987   case X86::CMOV_VR128:
35988   case X86::CMOV_VR128X:
35989   case X86::CMOV_VR256:
35990   case X86::CMOV_VR256X:
35991   case X86::CMOV_VR512:
35992   case X86::CMOV_VK1:
35993   case X86::CMOV_VK2:
35994   case X86::CMOV_VK4:
35995   case X86::CMOV_VK8:
35996   case X86::CMOV_VK16:
35997   case X86::CMOV_VK32:
35998   case X86::CMOV_VK64:
35999     return EmitLoweredSelect(MI, BB);
36000 
36001   case X86::FP80_ADDr:
36002   case X86::FP80_ADDm32: {
36003     // Change the floating point control register to use double extended
36004     // precision when performing the addition.
36005     int OrigCWFrameIdx =
36006         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36007     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FNSTCW16m)),
36008                       OrigCWFrameIdx);
36009 
36010     // Load the old value of the control word...
36011     Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36012     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOVZX32rm16), OldCW),
36013                       OrigCWFrameIdx);
36014 
36015     // OR 0b11 into bit 8 and 9. 0b11 is the encoding for double extended
36016     // precision.
36017     Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36018     BuildMI(*BB, MI, MIMD, TII->get(X86::OR32ri), NewCW)
36019         .addReg(OldCW, RegState::Kill)
36020         .addImm(0x300);
36021 
36022     // Extract to 16 bits.
36023     Register NewCW16 =
36024         MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
36025     BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), NewCW16)
36026         .addReg(NewCW, RegState::Kill, X86::sub_16bit);
36027 
36028     // Prepare memory for FLDCW.
36029     int NewCWFrameIdx =
36030         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36031     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOV16mr)),
36032                       NewCWFrameIdx)
36033         .addReg(NewCW16, RegState::Kill);
36034 
36035     // Reload the modified control word now...
36036     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
36037                       NewCWFrameIdx);
36038 
36039     // Do the addition.
36040     if (MI.getOpcode() == X86::FP80_ADDr) {
36041       BuildMI(*BB, MI, MIMD, TII->get(X86::ADD_Fp80))
36042           .add(MI.getOperand(0))
36043           .add(MI.getOperand(1))
36044           .add(MI.getOperand(2));
36045     } else {
36046       BuildMI(*BB, MI, MIMD, TII->get(X86::ADD_Fp80m32))
36047           .add(MI.getOperand(0))
36048           .add(MI.getOperand(1))
36049           .add(MI.getOperand(2))
36050           .add(MI.getOperand(3))
36051           .add(MI.getOperand(4))
36052           .add(MI.getOperand(5))
36053           .add(MI.getOperand(6));
36054     }
36055 
36056     // Reload the original control word now.
36057     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
36058                       OrigCWFrameIdx);
36059 
36060     MI.eraseFromParent(); // The pseudo instruction is gone now.
36061     return BB;
36062   }
36063 
36064   case X86::FP32_TO_INT16_IN_MEM:
36065   case X86::FP32_TO_INT32_IN_MEM:
36066   case X86::FP32_TO_INT64_IN_MEM:
36067   case X86::FP64_TO_INT16_IN_MEM:
36068   case X86::FP64_TO_INT32_IN_MEM:
36069   case X86::FP64_TO_INT64_IN_MEM:
36070   case X86::FP80_TO_INT16_IN_MEM:
36071   case X86::FP80_TO_INT32_IN_MEM:
36072   case X86::FP80_TO_INT64_IN_MEM: {
36073     // Change the floating point control register to use "round towards zero"
36074     // mode when truncating to an integer value.
36075     int OrigCWFrameIdx =
36076         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36077     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FNSTCW16m)),
36078                       OrigCWFrameIdx);
36079 
36080     // Load the old value of the control word...
36081     Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36082     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOVZX32rm16), OldCW),
36083                       OrigCWFrameIdx);
36084 
36085     // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
36086     Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36087     BuildMI(*BB, MI, MIMD, TII->get(X86::OR32ri), NewCW)
36088       .addReg(OldCW, RegState::Kill).addImm(0xC00);
36089 
36090     // Extract to 16 bits.
36091     Register NewCW16 =
36092         MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
36093     BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), NewCW16)
36094       .addReg(NewCW, RegState::Kill, X86::sub_16bit);
36095 
36096     // Prepare memory for FLDCW.
36097     int NewCWFrameIdx =
36098         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36099     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOV16mr)),
36100                       NewCWFrameIdx)
36101       .addReg(NewCW16, RegState::Kill);
36102 
36103     // Reload the modified control word now...
36104     addFrameReference(BuildMI(*BB, MI, MIMD,
36105                               TII->get(X86::FLDCW16m)), NewCWFrameIdx);
36106 
36107     // Get the X86 opcode to use.
36108     unsigned Opc;
36109     switch (MI.getOpcode()) {
36110     default: llvm_unreachable("illegal opcode!");
36111     case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
36112     case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
36113     case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
36114     case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
36115     case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
36116     case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
36117     case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
36118     case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
36119     case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
36120     }
36121 
36122     X86AddressMode AM = getAddressFromInstr(&MI, 0);
36123     addFullAddress(BuildMI(*BB, MI, MIMD, TII->get(Opc)), AM)
36124         .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
36125 
36126     // Reload the original control word now.
36127     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
36128                       OrigCWFrameIdx);
36129 
36130     MI.eraseFromParent(); // The pseudo instruction is gone now.
36131     return BB;
36132   }
36133 
36134   // xbegin
36135   case X86::XBEGIN:
36136     return emitXBegin(MI, BB, Subtarget.getInstrInfo());
36137 
36138   case X86::VAARG_64:
36139   case X86::VAARG_X32:
36140     return EmitVAARGWithCustomInserter(MI, BB);
36141 
36142   case X86::EH_SjLj_SetJmp32:
36143   case X86::EH_SjLj_SetJmp64:
36144     return emitEHSjLjSetJmp(MI, BB);
36145 
36146   case X86::EH_SjLj_LongJmp32:
36147   case X86::EH_SjLj_LongJmp64:
36148     return emitEHSjLjLongJmp(MI, BB);
36149 
36150   case X86::Int_eh_sjlj_setup_dispatch:
36151     return EmitSjLjDispatchBlock(MI, BB);
36152 
36153   case TargetOpcode::STATEPOINT:
36154     // As an implementation detail, STATEPOINT shares the STACKMAP format at
36155     // this point in the process.  We diverge later.
36156     return emitPatchPoint(MI, BB);
36157 
36158   case TargetOpcode::STACKMAP:
36159   case TargetOpcode::PATCHPOINT:
36160     return emitPatchPoint(MI, BB);
36161 
36162   case TargetOpcode::PATCHABLE_EVENT_CALL:
36163   case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
36164     return BB;
36165 
36166   case X86::LCMPXCHG8B: {
36167     const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36168     // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
36169     // requires a memory operand. If it happens that current architecture is
36170     // i686 and for current function we need a base pointer
36171     // - which is ESI for i686 - register allocator would not be able to
36172     // allocate registers for an address in form of X(%reg, %reg, Y)
36173     // - there never would be enough unreserved registers during regalloc
36174     // (without the need for base ptr the only option would be X(%edi, %esi, Y).
36175     // We are giving a hand to register allocator by precomputing the address in
36176     // a new vreg using LEA.
36177 
36178     // If it is not i686 or there is no base pointer - nothing to do here.
36179     if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
36180       return BB;
36181 
36182     // Even though this code does not necessarily needs the base pointer to
36183     // be ESI, we check for that. The reason: if this assert fails, there are
36184     // some changes happened in the compiler base pointer handling, which most
36185     // probably have to be addressed somehow here.
36186     assert(TRI->getBaseRegister() == X86::ESI &&
36187            "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
36188            "base pointer in mind");
36189 
36190     MachineRegisterInfo &MRI = MF->getRegInfo();
36191     MVT SPTy = getPointerTy(MF->getDataLayout());
36192     const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
36193     Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
36194 
36195     X86AddressMode AM = getAddressFromInstr(&MI, 0);
36196     // Regalloc does not need any help when the memory operand of CMPXCHG8B
36197     // does not use index register.
36198     if (AM.IndexReg == X86::NoRegister)
36199       return BB;
36200 
36201     // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
36202     // four operand definitions that are E[ABCD] registers. We skip them and
36203     // then insert the LEA.
36204     MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
36205     while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
36206                                    RMBBI->definesRegister(X86::EBX) ||
36207                                    RMBBI->definesRegister(X86::ECX) ||
36208                                    RMBBI->definesRegister(X86::EDX))) {
36209       ++RMBBI;
36210     }
36211     MachineBasicBlock::iterator MBBI(RMBBI);
36212     addFullAddress(
36213         BuildMI(*BB, *MBBI, MIMD, TII->get(X86::LEA32r), computedAddrVReg), AM);
36214 
36215     setDirectAddressInInstr(&MI, 0, computedAddrVReg);
36216 
36217     return BB;
36218   }
36219   case X86::LCMPXCHG16B_NO_RBX: {
36220     const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36221     Register BasePtr = TRI->getBaseRegister();
36222     if (TRI->hasBasePointer(*MF) &&
36223         (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
36224       if (!BB->isLiveIn(BasePtr))
36225         BB->addLiveIn(BasePtr);
36226       // Save RBX into a virtual register.
36227       Register SaveRBX =
36228           MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36229       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), SaveRBX)
36230           .addReg(X86::RBX);
36231       Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36232       MachineInstrBuilder MIB =
36233           BuildMI(*BB, MI, MIMD, TII->get(X86::LCMPXCHG16B_SAVE_RBX), Dst);
36234       for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
36235         MIB.add(MI.getOperand(Idx));
36236       MIB.add(MI.getOperand(X86::AddrNumOperands));
36237       MIB.addReg(SaveRBX);
36238     } else {
36239       // Simple case, just copy the virtual register to RBX.
36240       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::RBX)
36241           .add(MI.getOperand(X86::AddrNumOperands));
36242       MachineInstrBuilder MIB =
36243           BuildMI(*BB, MI, MIMD, TII->get(X86::LCMPXCHG16B));
36244       for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
36245         MIB.add(MI.getOperand(Idx));
36246     }
36247     MI.eraseFromParent();
36248     return BB;
36249   }
36250   case X86::MWAITX: {
36251     const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36252     Register BasePtr = TRI->getBaseRegister();
36253     bool IsRBX = (BasePtr == X86::RBX || BasePtr == X86::EBX);
36254     // If no need to save the base pointer, we generate MWAITXrrr,
36255     // else we generate pseudo MWAITX_SAVE_RBX.
36256     if (!IsRBX || !TRI->hasBasePointer(*MF)) {
36257       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::ECX)
36258           .addReg(MI.getOperand(0).getReg());
36259       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EAX)
36260           .addReg(MI.getOperand(1).getReg());
36261       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EBX)
36262           .addReg(MI.getOperand(2).getReg());
36263       BuildMI(*BB, MI, MIMD, TII->get(X86::MWAITXrrr));
36264       MI.eraseFromParent();
36265     } else {
36266       if (!BB->isLiveIn(BasePtr)) {
36267         BB->addLiveIn(BasePtr);
36268       }
36269       // Parameters can be copied into ECX and EAX but not EBX yet.
36270       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::ECX)
36271           .addReg(MI.getOperand(0).getReg());
36272       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EAX)
36273           .addReg(MI.getOperand(1).getReg());
36274       assert(Subtarget.is64Bit() && "Expected 64-bit mode!");
36275       // Save RBX into a virtual register.
36276       Register SaveRBX =
36277           MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36278       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), SaveRBX)
36279           .addReg(X86::RBX);
36280       // Generate mwaitx pseudo.
36281       Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36282       BuildMI(*BB, MI, MIMD, TII->get(X86::MWAITX_SAVE_RBX))
36283           .addDef(Dst) // Destination tied in with SaveRBX.
36284           .addReg(MI.getOperand(2).getReg()) // input value of EBX.
36285           .addUse(SaveRBX);                  // Save of base pointer.
36286       MI.eraseFromParent();
36287     }
36288     return BB;
36289   }
36290   case TargetOpcode::PREALLOCATED_SETUP: {
36291     assert(Subtarget.is32Bit() && "preallocated only used in 32-bit");
36292     auto MFI = MF->getInfo<X86MachineFunctionInfo>();
36293     MFI->setHasPreallocatedCall(true);
36294     int64_t PreallocatedId = MI.getOperand(0).getImm();
36295     size_t StackAdjustment = MFI->getPreallocatedStackSize(PreallocatedId);
36296     assert(StackAdjustment != 0 && "0 stack adjustment");
36297     LLVM_DEBUG(dbgs() << "PREALLOCATED_SETUP stack adjustment "
36298                       << StackAdjustment << "\n");
36299     BuildMI(*BB, MI, MIMD, TII->get(X86::SUB32ri), X86::ESP)
36300         .addReg(X86::ESP)
36301         .addImm(StackAdjustment);
36302     MI.eraseFromParent();
36303     return BB;
36304   }
36305   case TargetOpcode::PREALLOCATED_ARG: {
36306     assert(Subtarget.is32Bit() && "preallocated calls only used in 32-bit");
36307     int64_t PreallocatedId = MI.getOperand(1).getImm();
36308     int64_t ArgIdx = MI.getOperand(2).getImm();
36309     auto MFI = MF->getInfo<X86MachineFunctionInfo>();
36310     size_t ArgOffset = MFI->getPreallocatedArgOffsets(PreallocatedId)[ArgIdx];
36311     LLVM_DEBUG(dbgs() << "PREALLOCATED_ARG arg index " << ArgIdx
36312                       << ", arg offset " << ArgOffset << "\n");
36313     // stack pointer + offset
36314     addRegOffset(BuildMI(*BB, MI, MIMD, TII->get(X86::LEA32r),
36315                          MI.getOperand(0).getReg()),
36316                  X86::ESP, false, ArgOffset);
36317     MI.eraseFromParent();
36318     return BB;
36319   }
36320   case X86::PTDPBSSD:
36321   case X86::PTDPBSUD:
36322   case X86::PTDPBUSD:
36323   case X86::PTDPBUUD:
36324   case X86::PTDPBF16PS:
36325   case X86::PTDPFP16PS: {
36326     unsigned Opc;
36327     switch (MI.getOpcode()) {
36328     default: llvm_unreachable("illegal opcode!");
36329     case X86::PTDPBSSD: Opc = X86::TDPBSSD; break;
36330     case X86::PTDPBSUD: Opc = X86::TDPBSUD; break;
36331     case X86::PTDPBUSD: Opc = X86::TDPBUSD; break;
36332     case X86::PTDPBUUD: Opc = X86::TDPBUUD; break;
36333     case X86::PTDPBF16PS: Opc = X86::TDPBF16PS; break;
36334     case X86::PTDPFP16PS: Opc = X86::TDPFP16PS; break;
36335     }
36336 
36337     MachineInstrBuilder MIB = BuildMI(*BB, MI, MIMD, TII->get(Opc));
36338     MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
36339     MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
36340     MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
36341     MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
36342 
36343     MI.eraseFromParent(); // The pseudo is gone now.
36344     return BB;
36345   }
36346   case X86::PTILEZERO: {
36347     unsigned Imm = MI.getOperand(0).getImm();
36348     BuildMI(*BB, MI, MIMD, TII->get(X86::TILEZERO), TMMImmToTMMReg(Imm));
36349     MI.eraseFromParent(); // The pseudo is gone now.
36350     return BB;
36351   }
36352   case X86::PTILELOADD:
36353   case X86::PTILELOADDT1:
36354   case X86::PTILESTORED: {
36355     unsigned Opc;
36356     switch (MI.getOpcode()) {
36357     default: llvm_unreachable("illegal opcode!");
36358     case X86::PTILELOADD:   Opc = X86::TILELOADD;   break;
36359     case X86::PTILELOADDT1: Opc = X86::TILELOADDT1; break;
36360     case X86::PTILESTORED:  Opc = X86::TILESTORED;  break;
36361     }
36362 
36363     MachineInstrBuilder MIB = BuildMI(*BB, MI, MIMD, TII->get(Opc));
36364     unsigned CurOp = 0;
36365     if (Opc != X86::TILESTORED)
36366       MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
36367                  RegState::Define);
36368 
36369     MIB.add(MI.getOperand(CurOp++)); // base
36370     MIB.add(MI.getOperand(CurOp++)); // scale
36371     MIB.add(MI.getOperand(CurOp++)); // index -- stride
36372     MIB.add(MI.getOperand(CurOp++)); // displacement
36373     MIB.add(MI.getOperand(CurOp++)); // segment
36374 
36375     if (Opc == X86::TILESTORED)
36376       MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
36377                  RegState::Undef);
36378 
36379     MI.eraseFromParent(); // The pseudo is gone now.
36380     return BB;
36381   }
36382   case X86::PTCMMIMFP16PS:
36383   case X86::PTCMMRLFP16PS: {
36384     const MIMetadata MIMD(MI);
36385     unsigned Opc;
36386     switch (MI.getOpcode()) {
36387     default: llvm_unreachable("Unexpected instruction!");
36388     case X86::PTCMMIMFP16PS:     Opc = X86::TCMMIMFP16PS;     break;
36389     case X86::PTCMMRLFP16PS:     Opc = X86::TCMMRLFP16PS;     break;
36390     }
36391     MachineInstrBuilder MIB = BuildMI(*BB, MI, MIMD, TII->get(Opc));
36392     MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
36393     MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
36394     MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
36395     MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
36396     MI.eraseFromParent(); // The pseudo is gone now.
36397     return BB;
36398   }
36399   }
36400 }
36401 
36402 //===----------------------------------------------------------------------===//
36403 //                           X86 Optimization Hooks
36404 //===----------------------------------------------------------------------===//
36405 
36406 bool
36407 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
36408                                                 const APInt &DemandedBits,
36409                                                 const APInt &DemandedElts,
36410                                                 TargetLoweringOpt &TLO) const {
36411   EVT VT = Op.getValueType();
36412   unsigned Opcode = Op.getOpcode();
36413   unsigned EltSize = VT.getScalarSizeInBits();
36414 
36415   if (VT.isVector()) {
36416     // If the constant is only all signbits in the active bits, then we should
36417     // extend it to the entire constant to allow it act as a boolean constant
36418     // vector.
36419     auto NeedsSignExtension = [&](SDValue V, unsigned ActiveBits) {
36420       if (!ISD::isBuildVectorOfConstantSDNodes(V.getNode()))
36421         return false;
36422       for (unsigned i = 0, e = V.getNumOperands(); i != e; ++i) {
36423         if (!DemandedElts[i] || V.getOperand(i).isUndef())
36424           continue;
36425         const APInt &Val = V.getConstantOperandAPInt(i);
36426         if (Val.getBitWidth() > Val.getNumSignBits() &&
36427             Val.trunc(ActiveBits).getNumSignBits() == ActiveBits)
36428           return true;
36429       }
36430       return false;
36431     };
36432     // For vectors - if we have a constant, then try to sign extend.
36433     // TODO: Handle AND cases.
36434     unsigned ActiveBits = DemandedBits.getActiveBits();
36435     if (EltSize > ActiveBits && EltSize > 1 && isTypeLegal(VT) &&
36436         (Opcode == ISD::OR || Opcode == ISD::XOR || Opcode == X86ISD::ANDNP) &&
36437         NeedsSignExtension(Op.getOperand(1), ActiveBits)) {
36438       EVT ExtSVT = EVT::getIntegerVT(*TLO.DAG.getContext(), ActiveBits);
36439       EVT ExtVT = EVT::getVectorVT(*TLO.DAG.getContext(), ExtSVT,
36440                                    VT.getVectorNumElements());
36441       SDValue NewC =
36442           TLO.DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(Op), VT,
36443                           Op.getOperand(1), TLO.DAG.getValueType(ExtVT));
36444       SDValue NewOp =
36445           TLO.DAG.getNode(Opcode, SDLoc(Op), VT, Op.getOperand(0), NewC);
36446       return TLO.CombineTo(Op, NewOp);
36447     }
36448     return false;
36449   }
36450 
36451   // Only optimize Ands to prevent shrinking a constant that could be
36452   // matched by movzx.
36453   if (Opcode != ISD::AND)
36454     return false;
36455 
36456   // Make sure the RHS really is a constant.
36457   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
36458   if (!C)
36459     return false;
36460 
36461   const APInt &Mask = C->getAPIntValue();
36462 
36463   // Clear all non-demanded bits initially.
36464   APInt ShrunkMask = Mask & DemandedBits;
36465 
36466   // Find the width of the shrunk mask.
36467   unsigned Width = ShrunkMask.getActiveBits();
36468 
36469   // If the mask is all 0s there's nothing to do here.
36470   if (Width == 0)
36471     return false;
36472 
36473   // Find the next power of 2 width, rounding up to a byte.
36474   Width = llvm::bit_ceil(std::max(Width, 8U));
36475   // Truncate the width to size to handle illegal types.
36476   Width = std::min(Width, EltSize);
36477 
36478   // Calculate a possible zero extend mask for this constant.
36479   APInt ZeroExtendMask = APInt::getLowBitsSet(EltSize, Width);
36480 
36481   // If we aren't changing the mask, just return true to keep it and prevent
36482   // the caller from optimizing.
36483   if (ZeroExtendMask == Mask)
36484     return true;
36485 
36486   // Make sure the new mask can be represented by a combination of mask bits
36487   // and non-demanded bits.
36488   if (!ZeroExtendMask.isSubsetOf(Mask | ~DemandedBits))
36489     return false;
36490 
36491   // Replace the constant with the zero extend mask.
36492   SDLoc DL(Op);
36493   SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
36494   SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
36495   return TLO.CombineTo(Op, NewOp);
36496 }
36497 
36498 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
36499                                                       KnownBits &Known,
36500                                                       const APInt &DemandedElts,
36501                                                       const SelectionDAG &DAG,
36502                                                       unsigned Depth) const {
36503   unsigned BitWidth = Known.getBitWidth();
36504   unsigned NumElts = DemandedElts.getBitWidth();
36505   unsigned Opc = Op.getOpcode();
36506   EVT VT = Op.getValueType();
36507   assert((Opc >= ISD::BUILTIN_OP_END ||
36508           Opc == ISD::INTRINSIC_WO_CHAIN ||
36509           Opc == ISD::INTRINSIC_W_CHAIN ||
36510           Opc == ISD::INTRINSIC_VOID) &&
36511          "Should use MaskedValueIsZero if you don't know whether Op"
36512          " is a target node!");
36513 
36514   Known.resetAll();
36515   switch (Opc) {
36516   default: break;
36517   case X86ISD::MUL_IMM: {
36518     KnownBits Known2;
36519     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36520     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36521     Known = KnownBits::mul(Known, Known2);
36522     break;
36523   }
36524   case X86ISD::SETCC:
36525     Known.Zero.setBitsFrom(1);
36526     break;
36527   case X86ISD::MOVMSK: {
36528     unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
36529     Known.Zero.setBitsFrom(NumLoBits);
36530     break;
36531   }
36532   case X86ISD::PEXTRB:
36533   case X86ISD::PEXTRW: {
36534     SDValue Src = Op.getOperand(0);
36535     EVT SrcVT = Src.getValueType();
36536     APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
36537                                             Op.getConstantOperandVal(1));
36538     Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
36539     Known = Known.anyextOrTrunc(BitWidth);
36540     Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
36541     break;
36542   }
36543   case X86ISD::VSRAI:
36544   case X86ISD::VSHLI:
36545   case X86ISD::VSRLI: {
36546     unsigned ShAmt = Op.getConstantOperandVal(1);
36547     if (ShAmt >= VT.getScalarSizeInBits()) {
36548       // Out of range logical bit shifts are guaranteed to be zero.
36549       // Out of range arithmetic bit shifts splat the sign bit.
36550       if (Opc != X86ISD::VSRAI) {
36551         Known.setAllZero();
36552         break;
36553       }
36554 
36555       ShAmt = VT.getScalarSizeInBits() - 1;
36556     }
36557 
36558     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36559     if (Opc == X86ISD::VSHLI) {
36560       Known.Zero <<= ShAmt;
36561       Known.One <<= ShAmt;
36562       // Low bits are known zero.
36563       Known.Zero.setLowBits(ShAmt);
36564     } else if (Opc == X86ISD::VSRLI) {
36565       Known.Zero.lshrInPlace(ShAmt);
36566       Known.One.lshrInPlace(ShAmt);
36567       // High bits are known zero.
36568       Known.Zero.setHighBits(ShAmt);
36569     } else {
36570       Known.Zero.ashrInPlace(ShAmt);
36571       Known.One.ashrInPlace(ShAmt);
36572     }
36573     break;
36574   }
36575   case X86ISD::PACKUS: {
36576     // PACKUS is just a truncation if the upper half is zero.
36577     APInt DemandedLHS, DemandedRHS;
36578     getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
36579 
36580     Known.One = APInt::getAllOnes(BitWidth * 2);
36581     Known.Zero = APInt::getAllOnes(BitWidth * 2);
36582 
36583     KnownBits Known2;
36584     if (!!DemandedLHS) {
36585       Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
36586       Known = Known.intersectWith(Known2);
36587     }
36588     if (!!DemandedRHS) {
36589       Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
36590       Known = Known.intersectWith(Known2);
36591     }
36592 
36593     if (Known.countMinLeadingZeros() < BitWidth)
36594       Known.resetAll();
36595     Known = Known.trunc(BitWidth);
36596     break;
36597   }
36598   case X86ISD::VBROADCAST: {
36599     SDValue Src = Op.getOperand(0);
36600     if (!Src.getSimpleValueType().isVector()) {
36601       Known = DAG.computeKnownBits(Src, Depth + 1);
36602       return;
36603     }
36604     break;
36605   }
36606   case X86ISD::AND: {
36607     if (Op.getResNo() == 0) {
36608       KnownBits Known2;
36609       Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36610       Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36611       Known &= Known2;
36612     }
36613     break;
36614   }
36615   case X86ISD::ANDNP: {
36616     KnownBits Known2;
36617     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36618     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36619 
36620     // ANDNP = (~X & Y);
36621     Known.One &= Known2.Zero;
36622     Known.Zero |= Known2.One;
36623     break;
36624   }
36625   case X86ISD::FOR: {
36626     KnownBits Known2;
36627     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36628     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36629 
36630     Known |= Known2;
36631     break;
36632   }
36633   case X86ISD::PSADBW: {
36634     assert(VT.getScalarType() == MVT::i64 &&
36635            Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
36636            "Unexpected PSADBW types");
36637 
36638     // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
36639     Known.Zero.setBitsFrom(16);
36640     break;
36641   }
36642   case X86ISD::PCMPGT:
36643   case X86ISD::PCMPEQ: {
36644     KnownBits KnownLhs =
36645         DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36646     KnownBits KnownRhs =
36647         DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36648     std::optional<bool> Res = Opc == X86ISD::PCMPEQ
36649                                   ? KnownBits::eq(KnownLhs, KnownRhs)
36650                                   : KnownBits::sgt(KnownLhs, KnownRhs);
36651     if (Res) {
36652       if (*Res)
36653         Known.setAllOnes();
36654       else
36655         Known.setAllZero();
36656     }
36657     break;
36658   }
36659   case X86ISD::PMULUDQ: {
36660     KnownBits Known2;
36661     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36662     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36663 
36664     Known = Known.trunc(BitWidth / 2).zext(BitWidth);
36665     Known2 = Known2.trunc(BitWidth / 2).zext(BitWidth);
36666     Known = KnownBits::mul(Known, Known2);
36667     break;
36668   }
36669   case X86ISD::CMOV: {
36670     Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
36671     // If we don't know any bits, early out.
36672     if (Known.isUnknown())
36673       break;
36674     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
36675 
36676     // Only known if known in both the LHS and RHS.
36677     Known = Known.intersectWith(Known2);
36678     break;
36679   }
36680   case X86ISD::BEXTR:
36681   case X86ISD::BEXTRI: {
36682     SDValue Op0 = Op.getOperand(0);
36683     SDValue Op1 = Op.getOperand(1);
36684 
36685     if (auto* Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
36686       unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
36687       unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
36688 
36689       // If the length is 0, the result is 0.
36690       if (Length == 0) {
36691         Known.setAllZero();
36692         break;
36693       }
36694 
36695       if ((Shift + Length) <= BitWidth) {
36696         Known = DAG.computeKnownBits(Op0, Depth + 1);
36697         Known = Known.extractBits(Length, Shift);
36698         Known = Known.zextOrTrunc(BitWidth);
36699       }
36700     }
36701     break;
36702   }
36703   case X86ISD::PDEP: {
36704     KnownBits Known2;
36705     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36706     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36707     // Zeros are retained from the mask operand. But not ones.
36708     Known.One.clearAllBits();
36709     // The result will have at least as many trailing zeros as the non-mask
36710     // operand since bits can only map to the same or higher bit position.
36711     Known.Zero.setLowBits(Known2.countMinTrailingZeros());
36712     break;
36713   }
36714   case X86ISD::PEXT: {
36715     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36716     // The result has as many leading zeros as the number of zeroes in the mask.
36717     unsigned Count = Known.Zero.popcount();
36718     Known.Zero = APInt::getHighBitsSet(BitWidth, Count);
36719     Known.One.clearAllBits();
36720     break;
36721   }
36722   case X86ISD::VTRUNC:
36723   case X86ISD::VTRUNCS:
36724   case X86ISD::VTRUNCUS:
36725   case X86ISD::CVTSI2P:
36726   case X86ISD::CVTUI2P:
36727   case X86ISD::CVTP2SI:
36728   case X86ISD::CVTP2UI:
36729   case X86ISD::MCVTP2SI:
36730   case X86ISD::MCVTP2UI:
36731   case X86ISD::CVTTP2SI:
36732   case X86ISD::CVTTP2UI:
36733   case X86ISD::MCVTTP2SI:
36734   case X86ISD::MCVTTP2UI:
36735   case X86ISD::MCVTSI2P:
36736   case X86ISD::MCVTUI2P:
36737   case X86ISD::VFPROUND:
36738   case X86ISD::VMFPROUND:
36739   case X86ISD::CVTPS2PH:
36740   case X86ISD::MCVTPS2PH: {
36741     // Truncations/Conversions - upper elements are known zero.
36742     EVT SrcVT = Op.getOperand(0).getValueType();
36743     if (SrcVT.isVector()) {
36744       unsigned NumSrcElts = SrcVT.getVectorNumElements();
36745       if (NumElts > NumSrcElts && DemandedElts.countr_zero() >= NumSrcElts)
36746         Known.setAllZero();
36747     }
36748     break;
36749   }
36750   case X86ISD::STRICT_CVTTP2SI:
36751   case X86ISD::STRICT_CVTTP2UI:
36752   case X86ISD::STRICT_CVTSI2P:
36753   case X86ISD::STRICT_CVTUI2P:
36754   case X86ISD::STRICT_VFPROUND:
36755   case X86ISD::STRICT_CVTPS2PH: {
36756     // Strict Conversions - upper elements are known zero.
36757     EVT SrcVT = Op.getOperand(1).getValueType();
36758     if (SrcVT.isVector()) {
36759       unsigned NumSrcElts = SrcVT.getVectorNumElements();
36760       if (NumElts > NumSrcElts && DemandedElts.countr_zero() >= NumSrcElts)
36761         Known.setAllZero();
36762     }
36763     break;
36764   }
36765   case X86ISD::MOVQ2DQ: {
36766     // Move from MMX to XMM. Upper half of XMM should be 0.
36767     if (DemandedElts.countr_zero() >= (NumElts / 2))
36768       Known.setAllZero();
36769     break;
36770   }
36771   case X86ISD::VBROADCAST_LOAD: {
36772     APInt UndefElts;
36773     SmallVector<APInt, 16> EltBits;
36774     if (getTargetConstantBitsFromNode(Op, BitWidth, UndefElts, EltBits,
36775                                       /*AllowWholeUndefs*/ false,
36776                                       /*AllowPartialUndefs*/ false)) {
36777       Known.Zero.setAllBits();
36778       Known.One.setAllBits();
36779       for (unsigned I = 0; I != NumElts; ++I) {
36780         if (!DemandedElts[I])
36781           continue;
36782         if (UndefElts[I]) {
36783           Known.resetAll();
36784           break;
36785         }
36786         KnownBits Known2 = KnownBits::makeConstant(EltBits[I]);
36787         Known = Known.intersectWith(Known2);
36788       }
36789       return;
36790     }
36791     break;
36792   }
36793   }
36794 
36795   // Handle target shuffles.
36796   // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
36797   if (isTargetShuffle(Opc)) {
36798     SmallVector<int, 64> Mask;
36799     SmallVector<SDValue, 2> Ops;
36800     if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
36801       unsigned NumOps = Ops.size();
36802       unsigned NumElts = VT.getVectorNumElements();
36803       if (Mask.size() == NumElts) {
36804         SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
36805         Known.Zero.setAllBits(); Known.One.setAllBits();
36806         for (unsigned i = 0; i != NumElts; ++i) {
36807           if (!DemandedElts[i])
36808             continue;
36809           int M = Mask[i];
36810           if (M == SM_SentinelUndef) {
36811             // For UNDEF elements, we don't know anything about the common state
36812             // of the shuffle result.
36813             Known.resetAll();
36814             break;
36815           }
36816           if (M == SM_SentinelZero) {
36817             Known.One.clearAllBits();
36818             continue;
36819           }
36820           assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
36821                  "Shuffle index out of range");
36822 
36823           unsigned OpIdx = (unsigned)M / NumElts;
36824           unsigned EltIdx = (unsigned)M % NumElts;
36825           if (Ops[OpIdx].getValueType() != VT) {
36826             // TODO - handle target shuffle ops with different value types.
36827             Known.resetAll();
36828             break;
36829           }
36830           DemandedOps[OpIdx].setBit(EltIdx);
36831         }
36832         // Known bits are the values that are shared by every demanded element.
36833         for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
36834           if (!DemandedOps[i])
36835             continue;
36836           KnownBits Known2 =
36837               DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
36838           Known = Known.intersectWith(Known2);
36839         }
36840       }
36841     }
36842   }
36843 }
36844 
36845 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
36846     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
36847     unsigned Depth) const {
36848   EVT VT = Op.getValueType();
36849   unsigned VTBits = VT.getScalarSizeInBits();
36850   unsigned Opcode = Op.getOpcode();
36851   switch (Opcode) {
36852   case X86ISD::SETCC_CARRY:
36853     // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
36854     return VTBits;
36855 
36856   case X86ISD::VTRUNC: {
36857     SDValue Src = Op.getOperand(0);
36858     MVT SrcVT = Src.getSimpleValueType();
36859     unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
36860     assert(VTBits < NumSrcBits && "Illegal truncation input type");
36861     APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
36862     unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
36863     if (Tmp > (NumSrcBits - VTBits))
36864       return Tmp - (NumSrcBits - VTBits);
36865     return 1;
36866   }
36867 
36868   case X86ISD::PACKSS: {
36869     // PACKSS is just a truncation if the sign bits extend to the packed size.
36870     APInt DemandedLHS, DemandedRHS;
36871     getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
36872                         DemandedRHS);
36873 
36874     // Helper to detect PACKSSDW(BITCAST(PACKSSDW(X)),BITCAST(PACKSSDW(Y)))
36875     // patterns often used to compact vXi64 allsignbit patterns.
36876     auto NumSignBitsPACKSS = [&](SDValue V, const APInt &Elts) -> unsigned {
36877       SDValue BC = peekThroughBitcasts(V);
36878       if (BC.getOpcode() == X86ISD::PACKSS &&
36879           BC.getScalarValueSizeInBits() == 16 &&
36880           V.getScalarValueSizeInBits() == 32) {
36881         SDValue BC0 = peekThroughBitcasts(BC.getOperand(0));
36882         SDValue BC1 = peekThroughBitcasts(BC.getOperand(1));
36883         if (BC0.getScalarValueSizeInBits() == 64 &&
36884             BC1.getScalarValueSizeInBits() == 64 &&
36885             DAG.ComputeNumSignBits(BC0, Depth + 1) == 64 &&
36886             DAG.ComputeNumSignBits(BC1, Depth + 1) == 64)
36887           return 32;
36888       }
36889       return DAG.ComputeNumSignBits(V, Elts, Depth + 1);
36890     };
36891 
36892     unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
36893     unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
36894     if (!!DemandedLHS)
36895       Tmp0 = NumSignBitsPACKSS(Op.getOperand(0), DemandedLHS);
36896     if (!!DemandedRHS)
36897       Tmp1 = NumSignBitsPACKSS(Op.getOperand(1), DemandedRHS);
36898     unsigned Tmp = std::min(Tmp0, Tmp1);
36899     if (Tmp > (SrcBits - VTBits))
36900       return Tmp - (SrcBits - VTBits);
36901     return 1;
36902   }
36903 
36904   case X86ISD::VBROADCAST: {
36905     SDValue Src = Op.getOperand(0);
36906     if (!Src.getSimpleValueType().isVector())
36907       return DAG.ComputeNumSignBits(Src, Depth + 1);
36908     break;
36909   }
36910 
36911   case X86ISD::VSHLI: {
36912     SDValue Src = Op.getOperand(0);
36913     const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
36914     if (ShiftVal.uge(VTBits))
36915       return VTBits; // Shifted all bits out --> zero.
36916     unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
36917     if (ShiftVal.uge(Tmp))
36918       return 1; // Shifted all sign bits out --> unknown.
36919     return Tmp - ShiftVal.getZExtValue();
36920   }
36921 
36922   case X86ISD::VSRAI: {
36923     SDValue Src = Op.getOperand(0);
36924     APInt ShiftVal = Op.getConstantOperandAPInt(1);
36925     if (ShiftVal.uge(VTBits - 1))
36926       return VTBits; // Sign splat.
36927     unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
36928     ShiftVal += Tmp;
36929     return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
36930   }
36931 
36932   case X86ISD::FSETCC:
36933     // cmpss/cmpsd return zero/all-bits result values in the bottom element.
36934     if (VT == MVT::f32 || VT == MVT::f64 ||
36935         ((VT == MVT::v4f32 || VT == MVT::v2f64) && DemandedElts == 1))
36936       return VTBits;
36937     break;
36938 
36939   case X86ISD::PCMPGT:
36940   case X86ISD::PCMPEQ:
36941   case X86ISD::CMPP:
36942   case X86ISD::VPCOM:
36943   case X86ISD::VPCOMU:
36944     // Vector compares return zero/all-bits result values.
36945     return VTBits;
36946 
36947   case X86ISD::ANDNP: {
36948     unsigned Tmp0 =
36949         DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
36950     if (Tmp0 == 1) return 1; // Early out.
36951     unsigned Tmp1 =
36952         DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
36953     return std::min(Tmp0, Tmp1);
36954   }
36955 
36956   case X86ISD::CMOV: {
36957     unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
36958     if (Tmp0 == 1) return 1;  // Early out.
36959     unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
36960     return std::min(Tmp0, Tmp1);
36961   }
36962   }
36963 
36964   // Handle target shuffles.
36965   // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
36966   if (isTargetShuffle(Opcode)) {
36967     SmallVector<int, 64> Mask;
36968     SmallVector<SDValue, 2> Ops;
36969     if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
36970       unsigned NumOps = Ops.size();
36971       unsigned NumElts = VT.getVectorNumElements();
36972       if (Mask.size() == NumElts) {
36973         SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
36974         for (unsigned i = 0; i != NumElts; ++i) {
36975           if (!DemandedElts[i])
36976             continue;
36977           int M = Mask[i];
36978           if (M == SM_SentinelUndef) {
36979             // For UNDEF elements, we don't know anything about the common state
36980             // of the shuffle result.
36981             return 1;
36982           } else if (M == SM_SentinelZero) {
36983             // Zero = all sign bits.
36984             continue;
36985           }
36986           assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
36987                  "Shuffle index out of range");
36988 
36989           unsigned OpIdx = (unsigned)M / NumElts;
36990           unsigned EltIdx = (unsigned)M % NumElts;
36991           if (Ops[OpIdx].getValueType() != VT) {
36992             // TODO - handle target shuffle ops with different value types.
36993             return 1;
36994           }
36995           DemandedOps[OpIdx].setBit(EltIdx);
36996         }
36997         unsigned Tmp0 = VTBits;
36998         for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
36999           if (!DemandedOps[i])
37000             continue;
37001           unsigned Tmp1 =
37002               DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
37003           Tmp0 = std::min(Tmp0, Tmp1);
37004         }
37005         return Tmp0;
37006       }
37007     }
37008   }
37009 
37010   // Fallback case.
37011   return 1;
37012 }
37013 
37014 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
37015   if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
37016     return N->getOperand(0);
37017   return N;
37018 }
37019 
37020 // Helper to look for a normal load that can be narrowed into a vzload with the
37021 // specified VT and memory VT. Returns SDValue() on failure.
37022 static SDValue narrowLoadToVZLoad(LoadSDNode *LN, MVT MemVT, MVT VT,
37023                                   SelectionDAG &DAG) {
37024   // Can't if the load is volatile or atomic.
37025   if (!LN->isSimple())
37026     return SDValue();
37027 
37028   SDVTList Tys = DAG.getVTList(VT, MVT::Other);
37029   SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
37030   return DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, SDLoc(LN), Tys, Ops, MemVT,
37031                                  LN->getPointerInfo(), LN->getOriginalAlign(),
37032                                  LN->getMemOperand()->getFlags());
37033 }
37034 
37035 // Attempt to match a combined shuffle mask against supported unary shuffle
37036 // instructions.
37037 // TODO: Investigate sharing more of this with shuffle lowering.
37038 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
37039                               bool AllowFloatDomain, bool AllowIntDomain,
37040                               SDValue V1, const SelectionDAG &DAG,
37041                               const X86Subtarget &Subtarget, unsigned &Shuffle,
37042                               MVT &SrcVT, MVT &DstVT) {
37043   unsigned NumMaskElts = Mask.size();
37044   unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
37045 
37046   // Match against a VZEXT_MOVL vXi32 and vXi16 zero-extending instruction.
37047   if (Mask[0] == 0 &&
37048       (MaskEltSize == 32 || (MaskEltSize == 16 && Subtarget.hasFP16()))) {
37049     if ((isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) ||
37050         (V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
37051          isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1))) {
37052       Shuffle = X86ISD::VZEXT_MOVL;
37053       if (MaskEltSize == 16)
37054         SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
37055       else
37056         SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
37057       return true;
37058     }
37059   }
37060 
37061   // Match against a ANY/SIGN/ZERO_EXTEND_VECTOR_INREG instruction.
37062   // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
37063   if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
37064                          (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
37065     unsigned MaxScale = 64 / MaskEltSize;
37066     bool UseSign = V1.getScalarValueSizeInBits() == MaskEltSize &&
37067                    DAG.ComputeNumSignBits(V1) == MaskEltSize;
37068     for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
37069       bool MatchAny = true;
37070       bool MatchZero = true;
37071       bool MatchSign = UseSign;
37072       unsigned NumDstElts = NumMaskElts / Scale;
37073       for (unsigned i = 0;
37074            i != NumDstElts && (MatchAny || MatchSign || MatchZero); ++i) {
37075         if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
37076           MatchAny = MatchSign = MatchZero = false;
37077           break;
37078         }
37079         unsigned Pos = (i * Scale) + 1;
37080         unsigned Len = Scale - 1;
37081         MatchAny &= isUndefInRange(Mask, Pos, Len);
37082         MatchZero &= isUndefOrZeroInRange(Mask, Pos, Len);
37083         MatchSign &= isUndefOrEqualInRange(Mask, (int)i, Pos, Len);
37084       }
37085       if (MatchAny || MatchSign || MatchZero) {
37086         assert((MatchSign || MatchZero) &&
37087                "Failed to match sext/zext but matched aext?");
37088         unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
37089         MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType()
37090                                           : MVT::getIntegerVT(MaskEltSize);
37091         SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
37092 
37093         Shuffle = unsigned(
37094             MatchAny ? ISD::ANY_EXTEND
37095                      : (MatchSign ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND));
37096         if (SrcVT.getVectorNumElements() != NumDstElts)
37097           Shuffle = DAG.getOpcode_EXTEND_VECTOR_INREG(Shuffle);
37098 
37099         DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
37100         DstVT = MVT::getVectorVT(DstVT, NumDstElts);
37101         return true;
37102       }
37103     }
37104   }
37105 
37106   // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
37107   if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2()) ||
37108        (MaskEltSize == 16 && Subtarget.hasFP16())) &&
37109       isUndefOrEqual(Mask[0], 0) &&
37110       isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
37111     Shuffle = X86ISD::VZEXT_MOVL;
37112     if (MaskEltSize == 16)
37113       SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
37114     else
37115       SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
37116     return true;
37117   }
37118 
37119   // Check if we have SSE3 which will let us use MOVDDUP etc. The
37120   // instructions are no slower than UNPCKLPD but has the option to
37121   // fold the input operand into even an unaligned memory load.
37122   if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
37123     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG, V1)) {
37124       Shuffle = X86ISD::MOVDDUP;
37125       SrcVT = DstVT = MVT::v2f64;
37126       return true;
37127     }
37128     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
37129       Shuffle = X86ISD::MOVSLDUP;
37130       SrcVT = DstVT = MVT::v4f32;
37131       return true;
37132     }
37133     if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3}, DAG, V1)) {
37134       Shuffle = X86ISD::MOVSHDUP;
37135       SrcVT = DstVT = MVT::v4f32;
37136       return true;
37137     }
37138   }
37139 
37140   if (MaskVT.is256BitVector() && AllowFloatDomain) {
37141     assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
37142     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
37143       Shuffle = X86ISD::MOVDDUP;
37144       SrcVT = DstVT = MVT::v4f64;
37145       return true;
37146     }
37147     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
37148                                   V1)) {
37149       Shuffle = X86ISD::MOVSLDUP;
37150       SrcVT = DstVT = MVT::v8f32;
37151       return true;
37152     }
37153     if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3, 5, 5, 7, 7}, DAG,
37154                                   V1)) {
37155       Shuffle = X86ISD::MOVSHDUP;
37156       SrcVT = DstVT = MVT::v8f32;
37157       return true;
37158     }
37159   }
37160 
37161   if (MaskVT.is512BitVector() && AllowFloatDomain) {
37162     assert(Subtarget.hasAVX512() &&
37163            "AVX512 required for 512-bit vector shuffles");
37164     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
37165                                   V1)) {
37166       Shuffle = X86ISD::MOVDDUP;
37167       SrcVT = DstVT = MVT::v8f64;
37168       return true;
37169     }
37170     if (isTargetShuffleEquivalent(
37171             MaskVT, Mask,
37172             {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14}, DAG, V1)) {
37173       Shuffle = X86ISD::MOVSLDUP;
37174       SrcVT = DstVT = MVT::v16f32;
37175       return true;
37176     }
37177     if (isTargetShuffleEquivalent(
37178             MaskVT, Mask,
37179             {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}, DAG, V1)) {
37180       Shuffle = X86ISD::MOVSHDUP;
37181       SrcVT = DstVT = MVT::v16f32;
37182       return true;
37183     }
37184   }
37185 
37186   return false;
37187 }
37188 
37189 // Attempt to match a combined shuffle mask against supported unary immediate
37190 // permute instructions.
37191 // TODO: Investigate sharing more of this with shuffle lowering.
37192 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
37193                                      const APInt &Zeroable,
37194                                      bool AllowFloatDomain, bool AllowIntDomain,
37195                                      const SelectionDAG &DAG,
37196                                      const X86Subtarget &Subtarget,
37197                                      unsigned &Shuffle, MVT &ShuffleVT,
37198                                      unsigned &PermuteImm) {
37199   unsigned NumMaskElts = Mask.size();
37200   unsigned InputSizeInBits = MaskVT.getSizeInBits();
37201   unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
37202   MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
37203   bool ContainsZeros = isAnyZero(Mask);
37204 
37205   // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
37206   if (!ContainsZeros && MaskScalarSizeInBits == 64) {
37207     // Check for lane crossing permutes.
37208     if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
37209       // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
37210       if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
37211         Shuffle = X86ISD::VPERMI;
37212         ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
37213         PermuteImm = getV4X86ShuffleImm(Mask);
37214         return true;
37215       }
37216       if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
37217         SmallVector<int, 4> RepeatedMask;
37218         if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
37219           Shuffle = X86ISD::VPERMI;
37220           ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
37221           PermuteImm = getV4X86ShuffleImm(RepeatedMask);
37222           return true;
37223         }
37224       }
37225     } else if (AllowFloatDomain && Subtarget.hasAVX()) {
37226       // VPERMILPD can permute with a non-repeating shuffle.
37227       Shuffle = X86ISD::VPERMILPI;
37228       ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
37229       PermuteImm = 0;
37230       for (int i = 0, e = Mask.size(); i != e; ++i) {
37231         int M = Mask[i];
37232         if (M == SM_SentinelUndef)
37233           continue;
37234         assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
37235         PermuteImm |= (M & 1) << i;
37236       }
37237       return true;
37238     }
37239   }
37240 
37241   // We are checking for shuffle match or shift match. Loop twice so we can
37242   // order which we try and match first depending on target preference.
37243   for (unsigned Order = 0; Order < 2; ++Order) {
37244     if (Subtarget.preferLowerShuffleAsShift() ? (Order == 1) : (Order == 0)) {
37245       // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
37246       // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
37247       // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
37248       if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
37249           !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
37250         SmallVector<int, 4> RepeatedMask;
37251         if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
37252           // Narrow the repeated mask to create 32-bit element permutes.
37253           SmallVector<int, 4> WordMask = RepeatedMask;
37254           if (MaskScalarSizeInBits == 64)
37255             narrowShuffleMaskElts(2, RepeatedMask, WordMask);
37256 
37257           Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
37258           ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
37259           ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
37260           PermuteImm = getV4X86ShuffleImm(WordMask);
37261           return true;
37262         }
37263       }
37264 
37265       // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
37266       if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16 &&
37267           ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37268            (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37269            (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
37270         SmallVector<int, 4> RepeatedMask;
37271         if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
37272           ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
37273           ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
37274 
37275           // PSHUFLW: permute lower 4 elements only.
37276           if (isUndefOrInRange(LoMask, 0, 4) &&
37277               isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
37278             Shuffle = X86ISD::PSHUFLW;
37279             ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
37280             PermuteImm = getV4X86ShuffleImm(LoMask);
37281             return true;
37282           }
37283 
37284           // PSHUFHW: permute upper 4 elements only.
37285           if (isUndefOrInRange(HiMask, 4, 8) &&
37286               isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
37287             // Offset the HiMask so that we can create the shuffle immediate.
37288             int OffsetHiMask[4];
37289             for (int i = 0; i != 4; ++i)
37290               OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
37291 
37292             Shuffle = X86ISD::PSHUFHW;
37293             ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
37294             PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
37295             return true;
37296           }
37297         }
37298       }
37299     } else {
37300       // Attempt to match against bit rotates.
37301       if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits < 64 &&
37302           ((MaskVT.is128BitVector() && Subtarget.hasXOP()) ||
37303            Subtarget.hasAVX512())) {
37304         int RotateAmt = matchShuffleAsBitRotate(ShuffleVT, MaskScalarSizeInBits,
37305                                                 Subtarget, Mask);
37306         if (0 < RotateAmt) {
37307           Shuffle = X86ISD::VROTLI;
37308           PermuteImm = (unsigned)RotateAmt;
37309           return true;
37310         }
37311       }
37312     }
37313     // Attempt to match against byte/bit shifts.
37314     if (AllowIntDomain &&
37315         ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37316          (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37317          (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37318       int ShiftAmt =
37319           matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits, Mask, 0,
37320                               Zeroable, Subtarget);
37321       if (0 < ShiftAmt && (!ShuffleVT.is512BitVector() || Subtarget.hasBWI() ||
37322                            32 <= ShuffleVT.getScalarSizeInBits())) {
37323         // Byte shifts can be slower so only match them on second attempt.
37324         if (Order == 0 &&
37325             (Shuffle == X86ISD::VSHLDQ || Shuffle == X86ISD::VSRLDQ))
37326           continue;
37327 
37328         PermuteImm = (unsigned)ShiftAmt;
37329         return true;
37330       }
37331 
37332     }
37333   }
37334 
37335   return false;
37336 }
37337 
37338 // Attempt to match a combined unary shuffle mask against supported binary
37339 // shuffle instructions.
37340 // TODO: Investigate sharing more of this with shuffle lowering.
37341 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
37342                                bool AllowFloatDomain, bool AllowIntDomain,
37343                                SDValue &V1, SDValue &V2, const SDLoc &DL,
37344                                SelectionDAG &DAG, const X86Subtarget &Subtarget,
37345                                unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
37346                                bool IsUnary) {
37347   unsigned NumMaskElts = Mask.size();
37348   unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
37349   unsigned SizeInBits = MaskVT.getSizeInBits();
37350 
37351   if (MaskVT.is128BitVector()) {
37352     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG) &&
37353         AllowFloatDomain) {
37354       V2 = V1;
37355       V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
37356       Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
37357       SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
37358       return true;
37359     }
37360     if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1}, DAG) &&
37361         AllowFloatDomain) {
37362       V2 = V1;
37363       Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
37364       SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
37365       return true;
37366     }
37367     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 3}, DAG) &&
37368         Subtarget.hasSSE2() && (AllowFloatDomain || !Subtarget.hasSSE41())) {
37369       std::swap(V1, V2);
37370       Shuffle = X86ISD::MOVSD;
37371       SrcVT = DstVT = MVT::v2f64;
37372       return true;
37373     }
37374     if (isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG) &&
37375         (AllowFloatDomain || !Subtarget.hasSSE41())) {
37376       Shuffle = X86ISD::MOVSS;
37377       SrcVT = DstVT = MVT::v4f32;
37378       return true;
37379     }
37380     if (isTargetShuffleEquivalent(MaskVT, Mask, {8, 1, 2, 3, 4, 5, 6, 7},
37381                                   DAG) &&
37382         Subtarget.hasFP16()) {
37383       Shuffle = X86ISD::MOVSH;
37384       SrcVT = DstVT = MVT::v8f16;
37385       return true;
37386     }
37387   }
37388 
37389   // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
37390   if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
37391       ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
37392       ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
37393     if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
37394                              Subtarget)) {
37395       DstVT = MaskVT;
37396       return true;
37397     }
37398   }
37399   // TODO: Can we handle this inside matchShuffleWithPACK?
37400   if (MaskVT == MVT::v4i32 && Subtarget.hasSSE2() &&
37401       isTargetShuffleEquivalent(MaskVT, Mask, {0, 2, 4, 6}, DAG) &&
37402       V1.getScalarValueSizeInBits() == 64 &&
37403       V2.getScalarValueSizeInBits() == 64) {
37404     // Use (SSE41) PACKUSWD if the leading zerobits goto the lowest 16-bits.
37405     unsigned MinLZV1 = DAG.computeKnownBits(V1).countMinLeadingZeros();
37406     unsigned MinLZV2 = DAG.computeKnownBits(V2).countMinLeadingZeros();
37407     if (Subtarget.hasSSE41() && MinLZV1 >= 48 && MinLZV2 >= 48) {
37408       SrcVT = MVT::v4i32;
37409       DstVT = MVT::v8i16;
37410       Shuffle = X86ISD::PACKUS;
37411       return true;
37412     }
37413     // Use PACKUSBW if the leading zerobits goto the lowest 8-bits.
37414     if (MinLZV1 >= 56 && MinLZV2 >= 56) {
37415       SrcVT = MVT::v8i16;
37416       DstVT = MVT::v16i8;
37417       Shuffle = X86ISD::PACKUS;
37418       return true;
37419     }
37420     // Use PACKSSWD if the signbits extend to the lowest 16-bits.
37421     if (DAG.ComputeNumSignBits(V1) > 48 && DAG.ComputeNumSignBits(V2) > 48) {
37422       SrcVT = MVT::v4i32;
37423       DstVT = MVT::v8i16;
37424       Shuffle = X86ISD::PACKSS;
37425       return true;
37426     }
37427   }
37428 
37429   // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
37430   if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
37431       (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37432       (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
37433       (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37434       (MaskVT.is512BitVector() && Subtarget.hasAVX512() &&
37435        (32 <= EltSizeInBits || Subtarget.hasBWI()))) {
37436     if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
37437                               Subtarget)) {
37438       SrcVT = DstVT = MaskVT;
37439       if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
37440         SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
37441       return true;
37442     }
37443   }
37444 
37445   // Attempt to match against a OR if we're performing a blend shuffle and the
37446   // non-blended source element is zero in each case.
37447   // TODO: Handle cases where V1/V2 sizes doesn't match SizeInBits.
37448   if (SizeInBits == V1.getValueSizeInBits() &&
37449       SizeInBits == V2.getValueSizeInBits() &&
37450       (EltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
37451       (EltSizeInBits % V2.getScalarValueSizeInBits()) == 0) {
37452     bool IsBlend = true;
37453     unsigned NumV1Elts = V1.getValueType().getVectorNumElements();
37454     unsigned NumV2Elts = V2.getValueType().getVectorNumElements();
37455     unsigned Scale1 = NumV1Elts / NumMaskElts;
37456     unsigned Scale2 = NumV2Elts / NumMaskElts;
37457     APInt DemandedZeroV1 = APInt::getZero(NumV1Elts);
37458     APInt DemandedZeroV2 = APInt::getZero(NumV2Elts);
37459     for (unsigned i = 0; i != NumMaskElts; ++i) {
37460       int M = Mask[i];
37461       if (M == SM_SentinelUndef)
37462         continue;
37463       if (M == SM_SentinelZero) {
37464         DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
37465         DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
37466         continue;
37467       }
37468       if (M == (int)i) {
37469         DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
37470         continue;
37471       }
37472       if (M == (int)(i + NumMaskElts)) {
37473         DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
37474         continue;
37475       }
37476       IsBlend = false;
37477       break;
37478     }
37479     if (IsBlend) {
37480       if (DAG.MaskedVectorIsZero(V1, DemandedZeroV1) &&
37481           DAG.MaskedVectorIsZero(V2, DemandedZeroV2)) {
37482         Shuffle = ISD::OR;
37483         SrcVT = DstVT = MaskVT.changeTypeToInteger();
37484         return true;
37485       }
37486       if (NumV1Elts == NumV2Elts && NumV1Elts == NumMaskElts) {
37487         // FIXME: handle mismatched sizes?
37488         // TODO: investigate if `ISD::OR` handling in
37489         // `TargetLowering::SimplifyDemandedVectorElts` can be improved instead.
37490         auto computeKnownBitsElementWise = [&DAG](SDValue V) {
37491           unsigned NumElts = V.getValueType().getVectorNumElements();
37492           KnownBits Known(NumElts);
37493           for (unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
37494             APInt Mask = APInt::getOneBitSet(NumElts, EltIdx);
37495             KnownBits PeepholeKnown = DAG.computeKnownBits(V, Mask);
37496             if (PeepholeKnown.isZero())
37497               Known.Zero.setBit(EltIdx);
37498             if (PeepholeKnown.isAllOnes())
37499               Known.One.setBit(EltIdx);
37500           }
37501           return Known;
37502         };
37503 
37504         KnownBits V1Known = computeKnownBitsElementWise(V1);
37505         KnownBits V2Known = computeKnownBitsElementWise(V2);
37506 
37507         for (unsigned i = 0; i != NumMaskElts && IsBlend; ++i) {
37508           int M = Mask[i];
37509           if (M == SM_SentinelUndef)
37510             continue;
37511           if (M == SM_SentinelZero) {
37512             IsBlend &= V1Known.Zero[i] && V2Known.Zero[i];
37513             continue;
37514           }
37515           if (M == (int)i) {
37516             IsBlend &= V2Known.Zero[i] || V1Known.One[i];
37517             continue;
37518           }
37519           if (M == (int)(i + NumMaskElts)) {
37520             IsBlend &= V1Known.Zero[i] || V2Known.One[i];
37521             continue;
37522           }
37523           llvm_unreachable("will not get here.");
37524         }
37525         if (IsBlend) {
37526           Shuffle = ISD::OR;
37527           SrcVT = DstVT = MaskVT.changeTypeToInteger();
37528           return true;
37529         }
37530       }
37531     }
37532   }
37533 
37534   return false;
37535 }
37536 
37537 static bool matchBinaryPermuteShuffle(
37538     MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
37539     bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
37540     const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
37541     unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
37542   unsigned NumMaskElts = Mask.size();
37543   unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
37544 
37545   // Attempt to match against VALIGND/VALIGNQ rotate.
37546   if (AllowIntDomain && (EltSizeInBits == 64 || EltSizeInBits == 32) &&
37547       ((MaskVT.is128BitVector() && Subtarget.hasVLX()) ||
37548        (MaskVT.is256BitVector() && Subtarget.hasVLX()) ||
37549        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37550     if (!isAnyZero(Mask)) {
37551       int Rotation = matchShuffleAsElementRotate(V1, V2, Mask);
37552       if (0 < Rotation) {
37553         Shuffle = X86ISD::VALIGN;
37554         if (EltSizeInBits == 64)
37555           ShuffleVT = MVT::getVectorVT(MVT::i64, MaskVT.getSizeInBits() / 64);
37556         else
37557           ShuffleVT = MVT::getVectorVT(MVT::i32, MaskVT.getSizeInBits() / 32);
37558         PermuteImm = Rotation;
37559         return true;
37560       }
37561     }
37562   }
37563 
37564   // Attempt to match against PALIGNR byte rotate.
37565   if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
37566                          (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37567                          (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
37568     int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
37569     if (0 < ByteRotation) {
37570       Shuffle = X86ISD::PALIGNR;
37571       ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
37572       PermuteImm = ByteRotation;
37573       return true;
37574     }
37575   }
37576 
37577   // Attempt to combine to X86ISD::BLENDI.
37578   if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
37579                             (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
37580       (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
37581     uint64_t BlendMask = 0;
37582     bool ForceV1Zero = false, ForceV2Zero = false;
37583     SmallVector<int, 8> TargetMask(Mask);
37584     if (matchShuffleAsBlend(MaskVT, V1, V2, TargetMask, Zeroable, ForceV1Zero,
37585                             ForceV2Zero, BlendMask)) {
37586       if (MaskVT == MVT::v16i16) {
37587         // We can only use v16i16 PBLENDW if the lanes are repeated.
37588         SmallVector<int, 8> RepeatedMask;
37589         if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
37590                                         RepeatedMask)) {
37591           assert(RepeatedMask.size() == 8 &&
37592                  "Repeated mask size doesn't match!");
37593           PermuteImm = 0;
37594           for (int i = 0; i < 8; ++i)
37595             if (RepeatedMask[i] >= 8)
37596               PermuteImm |= 1 << i;
37597           V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
37598           V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
37599           Shuffle = X86ISD::BLENDI;
37600           ShuffleVT = MaskVT;
37601           return true;
37602         }
37603       } else {
37604         V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
37605         V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
37606         PermuteImm = (unsigned)BlendMask;
37607         Shuffle = X86ISD::BLENDI;
37608         ShuffleVT = MaskVT;
37609         return true;
37610       }
37611     }
37612   }
37613 
37614   // Attempt to combine to INSERTPS, but only if it has elements that need to
37615   // be set to zero.
37616   if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
37617       MaskVT.is128BitVector() && isAnyZero(Mask) &&
37618       matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
37619     Shuffle = X86ISD::INSERTPS;
37620     ShuffleVT = MVT::v4f32;
37621     return true;
37622   }
37623 
37624   // Attempt to combine to SHUFPD.
37625   if (AllowFloatDomain && EltSizeInBits == 64 &&
37626       ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37627        (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
37628        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37629     bool ForceV1Zero = false, ForceV2Zero = false;
37630     if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
37631                                PermuteImm, Mask, Zeroable)) {
37632       V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
37633       V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
37634       Shuffle = X86ISD::SHUFP;
37635       ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
37636       return true;
37637     }
37638   }
37639 
37640   // Attempt to combine to SHUFPS.
37641   if (AllowFloatDomain && EltSizeInBits == 32 &&
37642       ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
37643        (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
37644        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37645     SmallVector<int, 4> RepeatedMask;
37646     if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
37647       // Match each half of the repeated mask, to determine if its just
37648       // referencing one of the vectors, is zeroable or entirely undef.
37649       auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
37650         int M0 = RepeatedMask[Offset];
37651         int M1 = RepeatedMask[Offset + 1];
37652 
37653         if (isUndefInRange(RepeatedMask, Offset, 2)) {
37654           return DAG.getUNDEF(MaskVT);
37655         } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
37656           S0 = (SM_SentinelUndef == M0 ? -1 : 0);
37657           S1 = (SM_SentinelUndef == M1 ? -1 : 1);
37658           return getZeroVector(MaskVT, Subtarget, DAG, DL);
37659         } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
37660           S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
37661           S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
37662           return V1;
37663         } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
37664           S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
37665           S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
37666           return V2;
37667         }
37668 
37669         return SDValue();
37670       };
37671 
37672       int ShufMask[4] = {-1, -1, -1, -1};
37673       SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
37674       SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
37675 
37676       if (Lo && Hi) {
37677         V1 = Lo;
37678         V2 = Hi;
37679         Shuffle = X86ISD::SHUFP;
37680         ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
37681         PermuteImm = getV4X86ShuffleImm(ShufMask);
37682         return true;
37683       }
37684     }
37685   }
37686 
37687   // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
37688   if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
37689       MaskVT.is128BitVector() &&
37690       matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
37691     Shuffle = X86ISD::INSERTPS;
37692     ShuffleVT = MVT::v4f32;
37693     return true;
37694   }
37695 
37696   return false;
37697 }
37698 
37699 static SDValue combineX86ShuffleChainWithExtract(
37700     ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
37701     bool HasVariableMask, bool AllowVariableCrossLaneMask,
37702     bool AllowVariablePerLaneMask, SelectionDAG &DAG,
37703     const X86Subtarget &Subtarget);
37704 
37705 /// Combine an arbitrary chain of shuffles into a single instruction if
37706 /// possible.
37707 ///
37708 /// This is the leaf of the recursive combine below. When we have found some
37709 /// chain of single-use x86 shuffle instructions and accumulated the combined
37710 /// shuffle mask represented by them, this will try to pattern match that mask
37711 /// into either a single instruction if there is a special purpose instruction
37712 /// for this operation, or into a PSHUFB instruction which is a fully general
37713 /// instruction but should only be used to replace chains over a certain depth.
37714 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
37715                                       ArrayRef<int> BaseMask, int Depth,
37716                                       bool HasVariableMask,
37717                                       bool AllowVariableCrossLaneMask,
37718                                       bool AllowVariablePerLaneMask,
37719                                       SelectionDAG &DAG,
37720                                       const X86Subtarget &Subtarget) {
37721   assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
37722   assert((Inputs.size() == 1 || Inputs.size() == 2) &&
37723          "Unexpected number of shuffle inputs!");
37724 
37725   SDLoc DL(Root);
37726   MVT RootVT = Root.getSimpleValueType();
37727   unsigned RootSizeInBits = RootVT.getSizeInBits();
37728   unsigned NumRootElts = RootVT.getVectorNumElements();
37729 
37730   // Canonicalize shuffle input op to the requested type.
37731   auto CanonicalizeShuffleInput = [&](MVT VT, SDValue Op) {
37732     if (VT.getSizeInBits() > Op.getValueSizeInBits())
37733       Op = widenSubVector(Op, false, Subtarget, DAG, DL, VT.getSizeInBits());
37734     else if (VT.getSizeInBits() < Op.getValueSizeInBits())
37735       Op = extractSubVector(Op, 0, DAG, DL, VT.getSizeInBits());
37736     return DAG.getBitcast(VT, Op);
37737   };
37738 
37739   // Find the inputs that enter the chain. Note that multiple uses are OK
37740   // here, we're not going to remove the operands we find.
37741   bool UnaryShuffle = (Inputs.size() == 1);
37742   SDValue V1 = peekThroughBitcasts(Inputs[0]);
37743   SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
37744                              : peekThroughBitcasts(Inputs[1]));
37745 
37746   MVT VT1 = V1.getSimpleValueType();
37747   MVT VT2 = V2.getSimpleValueType();
37748   assert((RootSizeInBits % VT1.getSizeInBits()) == 0 &&
37749          (RootSizeInBits % VT2.getSizeInBits()) == 0 && "Vector size mismatch");
37750 
37751   SDValue Res;
37752 
37753   unsigned NumBaseMaskElts = BaseMask.size();
37754   if (NumBaseMaskElts == 1) {
37755     assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
37756     return CanonicalizeShuffleInput(RootVT, V1);
37757   }
37758 
37759   bool OptForSize = DAG.shouldOptForSize();
37760   unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
37761   bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
37762                      (RootVT.isFloatingPoint() && Depth >= 1) ||
37763                      (RootVT.is256BitVector() && !Subtarget.hasAVX2());
37764 
37765   // Don't combine if we are a AVX512/EVEX target and the mask element size
37766   // is different from the root element size - this would prevent writemasks
37767   // from being reused.
37768   bool IsMaskedShuffle = false;
37769   if (RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128)) {
37770     if (Root.hasOneUse() && Root->use_begin()->getOpcode() == ISD::VSELECT &&
37771         Root->use_begin()->getOperand(0).getScalarValueSizeInBits() == 1) {
37772       IsMaskedShuffle = true;
37773     }
37774   }
37775 
37776   // If we are shuffling a splat (and not introducing zeros) then we can just
37777   // use it directly. This works for smaller elements as well as they already
37778   // repeat across each mask element.
37779   if (UnaryShuffle && !isAnyZero(BaseMask) &&
37780       V1.getValueSizeInBits() >= RootSizeInBits &&
37781       (BaseMaskEltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
37782       DAG.isSplatValue(V1, /*AllowUndefs*/ false)) {
37783     return CanonicalizeShuffleInput(RootVT, V1);
37784   }
37785 
37786   SmallVector<int, 64> Mask(BaseMask);
37787 
37788   // See if the shuffle is a hidden identity shuffle - repeated args in HOPs
37789   // etc. can be simplified.
37790   if (VT1 == VT2 && VT1.getSizeInBits() == RootSizeInBits && VT1.isVector()) {
37791     SmallVector<int> ScaledMask, IdentityMask;
37792     unsigned NumElts = VT1.getVectorNumElements();
37793     if (Mask.size() <= NumElts &&
37794         scaleShuffleElements(Mask, NumElts, ScaledMask)) {
37795       for (unsigned i = 0; i != NumElts; ++i)
37796         IdentityMask.push_back(i);
37797       if (isTargetShuffleEquivalent(RootVT, ScaledMask, IdentityMask, DAG, V1,
37798                                     V2))
37799         return CanonicalizeShuffleInput(RootVT, V1);
37800     }
37801   }
37802 
37803   // Handle 128/256-bit lane shuffles of 512-bit vectors.
37804   if (RootVT.is512BitVector() &&
37805       (NumBaseMaskElts == 2 || NumBaseMaskElts == 4)) {
37806     // If the upper subvectors are zeroable, then an extract+insert is more
37807     // optimal than using X86ISD::SHUF128. The insertion is free, even if it has
37808     // to zero the upper subvectors.
37809     if (isUndefOrZeroInRange(Mask, 1, NumBaseMaskElts - 1)) {
37810       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
37811         return SDValue(); // Nothing to do!
37812       assert(isInRange(Mask[0], 0, NumBaseMaskElts) &&
37813              "Unexpected lane shuffle");
37814       Res = CanonicalizeShuffleInput(RootVT, V1);
37815       unsigned SubIdx = Mask[0] * (NumRootElts / NumBaseMaskElts);
37816       bool UseZero = isAnyZero(Mask);
37817       Res = extractSubVector(Res, SubIdx, DAG, DL, BaseMaskEltSizeInBits);
37818       return widenSubVector(Res, UseZero, Subtarget, DAG, DL, RootSizeInBits);
37819     }
37820 
37821     // Narrow shuffle mask to v4x128.
37822     SmallVector<int, 4> ScaledMask;
37823     assert((BaseMaskEltSizeInBits % 128) == 0 && "Illegal mask size");
37824     narrowShuffleMaskElts(BaseMaskEltSizeInBits / 128, Mask, ScaledMask);
37825 
37826     // Try to lower to vshuf64x2/vshuf32x4.
37827     auto MatchSHUF128 = [&](MVT ShuffleVT, const SDLoc &DL,
37828                             ArrayRef<int> ScaledMask, SDValue V1, SDValue V2,
37829                             SelectionDAG &DAG) {
37830       int PermMask[4] = {-1, -1, -1, -1};
37831       // Ensure elements came from the same Op.
37832       SDValue Ops[2] = {DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT)};
37833       for (int i = 0; i < 4; ++i) {
37834         assert(ScaledMask[i] >= -1 && "Illegal shuffle sentinel value");
37835         if (ScaledMask[i] < 0)
37836           continue;
37837 
37838         SDValue Op = ScaledMask[i] >= 4 ? V2 : V1;
37839         unsigned OpIndex = i / 2;
37840         if (Ops[OpIndex].isUndef())
37841           Ops[OpIndex] = Op;
37842         else if (Ops[OpIndex] != Op)
37843           return SDValue();
37844 
37845         PermMask[i] = ScaledMask[i] % 4;
37846       }
37847 
37848       return DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
37849                          CanonicalizeShuffleInput(ShuffleVT, Ops[0]),
37850                          CanonicalizeShuffleInput(ShuffleVT, Ops[1]),
37851                          getV4X86ShuffleImm8ForMask(PermMask, DL, DAG));
37852     };
37853 
37854     // FIXME: Is there a better way to do this? is256BitLaneRepeatedShuffleMask
37855     // doesn't work because our mask is for 128 bits and we don't have an MVT
37856     // to match that.
37857     bool PreferPERMQ = UnaryShuffle && isUndefOrInRange(ScaledMask[0], 0, 2) &&
37858                        isUndefOrInRange(ScaledMask[1], 0, 2) &&
37859                        isUndefOrInRange(ScaledMask[2], 2, 4) &&
37860                        isUndefOrInRange(ScaledMask[3], 2, 4) &&
37861                        (ScaledMask[0] < 0 || ScaledMask[2] < 0 ||
37862                         ScaledMask[0] == (ScaledMask[2] % 2)) &&
37863                        (ScaledMask[1] < 0 || ScaledMask[3] < 0 ||
37864                         ScaledMask[1] == (ScaledMask[3] % 2));
37865 
37866     if (!isAnyZero(ScaledMask) && !PreferPERMQ) {
37867       if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
37868         return SDValue(); // Nothing to do!
37869       MVT ShuffleVT = (FloatDomain ? MVT::v8f64 : MVT::v8i64);
37870       if (SDValue V = MatchSHUF128(ShuffleVT, DL, ScaledMask, V1, V2, DAG))
37871         return DAG.getBitcast(RootVT, V);
37872     }
37873   }
37874 
37875   // Handle 128-bit lane shuffles of 256-bit vectors.
37876   if (RootVT.is256BitVector() && NumBaseMaskElts == 2) {
37877     // If the upper half is zeroable, then an extract+insert is more optimal
37878     // than using X86ISD::VPERM2X128. The insertion is free, even if it has to
37879     // zero the upper half.
37880     if (isUndefOrZero(Mask[1])) {
37881       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
37882         return SDValue(); // Nothing to do!
37883       assert(isInRange(Mask[0], 0, 2) && "Unexpected lane shuffle");
37884       Res = CanonicalizeShuffleInput(RootVT, V1);
37885       Res = extract128BitVector(Res, Mask[0] * (NumRootElts / 2), DAG, DL);
37886       return widenSubVector(Res, Mask[1] == SM_SentinelZero, Subtarget, DAG, DL,
37887                             256);
37888     }
37889 
37890     // If we're inserting the low subvector, an insert-subvector 'concat'
37891     // pattern is quicker than VPERM2X128.
37892     // TODO: Add AVX2 support instead of VPERMQ/VPERMPD.
37893     if (BaseMask[0] == 0 && (BaseMask[1] == 0 || BaseMask[1] == 2) &&
37894         !Subtarget.hasAVX2()) {
37895       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
37896         return SDValue(); // Nothing to do!
37897       SDValue Lo = CanonicalizeShuffleInput(RootVT, V1);
37898       SDValue Hi = CanonicalizeShuffleInput(RootVT, BaseMask[1] == 0 ? V1 : V2);
37899       Hi = extractSubVector(Hi, 0, DAG, DL, 128);
37900       return insertSubVector(Lo, Hi, NumRootElts / 2, DAG, DL, 128);
37901     }
37902 
37903     if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
37904       return SDValue(); // Nothing to do!
37905 
37906     // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
37907     // we need to use the zeroing feature.
37908     // Prefer blends for sequential shuffles unless we are optimizing for size.
37909     if (UnaryShuffle &&
37910         !(Subtarget.hasAVX2() && isUndefOrInRange(Mask, 0, 2)) &&
37911         (OptForSize || !isSequentialOrUndefOrZeroInRange(Mask, 0, 2, 0))) {
37912       unsigned PermMask = 0;
37913       PermMask |= ((Mask[0] < 0 ? 0x8 : (Mask[0] & 1)) << 0);
37914       PermMask |= ((Mask[1] < 0 ? 0x8 : (Mask[1] & 1)) << 4);
37915       return DAG.getNode(
37916           X86ISD::VPERM2X128, DL, RootVT, CanonicalizeShuffleInput(RootVT, V1),
37917           DAG.getUNDEF(RootVT), DAG.getTargetConstant(PermMask, DL, MVT::i8));
37918     }
37919 
37920     if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
37921       return SDValue(); // Nothing to do!
37922 
37923     // TODO - handle AVX512VL cases with X86ISD::SHUF128.
37924     if (!UnaryShuffle && !IsMaskedShuffle) {
37925       assert(llvm::all_of(Mask, [](int M) { return 0 <= M && M < 4; }) &&
37926              "Unexpected shuffle sentinel value");
37927       // Prefer blends to X86ISD::VPERM2X128.
37928       if (!((Mask[0] == 0 && Mask[1] == 3) || (Mask[0] == 2 && Mask[1] == 1))) {
37929         unsigned PermMask = 0;
37930         PermMask |= ((Mask[0] & 3) << 0);
37931         PermMask |= ((Mask[1] & 3) << 4);
37932         SDValue LHS = isInRange(Mask[0], 0, 2) ? V1 : V2;
37933         SDValue RHS = isInRange(Mask[1], 0, 2) ? V1 : V2;
37934         return DAG.getNode(X86ISD::VPERM2X128, DL, RootVT,
37935                           CanonicalizeShuffleInput(RootVT, LHS),
37936                           CanonicalizeShuffleInput(RootVT, RHS),
37937                           DAG.getTargetConstant(PermMask, DL, MVT::i8));
37938       }
37939     }
37940   }
37941 
37942   // For masks that have been widened to 128-bit elements or more,
37943   // narrow back down to 64-bit elements.
37944   if (BaseMaskEltSizeInBits > 64) {
37945     assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
37946     int MaskScale = BaseMaskEltSizeInBits / 64;
37947     SmallVector<int, 64> ScaledMask;
37948     narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
37949     Mask = std::move(ScaledMask);
37950   }
37951 
37952   // For masked shuffles, we're trying to match the root width for better
37953   // writemask folding, attempt to scale the mask.
37954   // TODO - variable shuffles might need this to be widened again.
37955   if (IsMaskedShuffle && NumRootElts > Mask.size()) {
37956     assert((NumRootElts % Mask.size()) == 0 && "Illegal mask size");
37957     int MaskScale = NumRootElts / Mask.size();
37958     SmallVector<int, 64> ScaledMask;
37959     narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
37960     Mask = std::move(ScaledMask);
37961   }
37962 
37963   unsigned NumMaskElts = Mask.size();
37964   unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
37965 
37966   // Determine the effective mask value type.
37967   FloatDomain &= (32 <= MaskEltSizeInBits);
37968   MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
37969                            : MVT::getIntegerVT(MaskEltSizeInBits);
37970   MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
37971 
37972   // Only allow legal mask types.
37973   if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
37974     return SDValue();
37975 
37976   // Attempt to match the mask against known shuffle patterns.
37977   MVT ShuffleSrcVT, ShuffleVT;
37978   unsigned Shuffle, PermuteImm;
37979 
37980   // Which shuffle domains are permitted?
37981   // Permit domain crossing at higher combine depths.
37982   // TODO: Should we indicate which domain is preferred if both are allowed?
37983   bool AllowFloatDomain = FloatDomain || (Depth >= 3);
37984   bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
37985                         (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
37986 
37987   // Determine zeroable mask elements.
37988   APInt KnownUndef, KnownZero;
37989   resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
37990   APInt Zeroable = KnownUndef | KnownZero;
37991 
37992   if (UnaryShuffle) {
37993     // Attempt to match against broadcast-from-vector.
37994     // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
37995     if ((Subtarget.hasAVX2() ||
37996          (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits)) &&
37997         (!IsMaskedShuffle || NumRootElts == NumMaskElts)) {
37998       if (isUndefOrEqual(Mask, 0)) {
37999         if (V1.getValueType() == MaskVT &&
38000             V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
38001             X86::mayFoldLoad(V1.getOperand(0), Subtarget)) {
38002           if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
38003             return SDValue(); // Nothing to do!
38004           Res = V1.getOperand(0);
38005           Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
38006           return DAG.getBitcast(RootVT, Res);
38007         }
38008         if (Subtarget.hasAVX2()) {
38009           if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
38010             return SDValue(); // Nothing to do!
38011           Res = CanonicalizeShuffleInput(MaskVT, V1);
38012           Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
38013           return DAG.getBitcast(RootVT, Res);
38014         }
38015       }
38016     }
38017 
38018     if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, V1,
38019                           DAG, Subtarget, Shuffle, ShuffleSrcVT, ShuffleVT) &&
38020         (!IsMaskedShuffle ||
38021          (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38022       if (Depth == 0 && Root.getOpcode() == Shuffle)
38023         return SDValue(); // Nothing to do!
38024       Res = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
38025       Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
38026       return DAG.getBitcast(RootVT, Res);
38027     }
38028 
38029     if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
38030                                  AllowIntDomain, DAG, Subtarget, Shuffle, ShuffleVT,
38031                                  PermuteImm) &&
38032         (!IsMaskedShuffle ||
38033          (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38034       if (Depth == 0 && Root.getOpcode() == Shuffle)
38035         return SDValue(); // Nothing to do!
38036       Res = CanonicalizeShuffleInput(ShuffleVT, V1);
38037       Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
38038                         DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38039       return DAG.getBitcast(RootVT, Res);
38040     }
38041   }
38042 
38043   // Attempt to combine to INSERTPS, but only if the inserted element has come
38044   // from a scalar.
38045   // TODO: Handle other insertions here as well?
38046   if (!UnaryShuffle && AllowFloatDomain && RootSizeInBits == 128 &&
38047       Subtarget.hasSSE41() &&
38048       !isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG)) {
38049     if (MaskEltSizeInBits == 32) {
38050       SDValue SrcV1 = V1, SrcV2 = V2;
38051       if (matchShuffleAsInsertPS(SrcV1, SrcV2, PermuteImm, Zeroable, Mask,
38052                                  DAG) &&
38053           SrcV2.getOpcode() == ISD::SCALAR_TO_VECTOR) {
38054         if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
38055           return SDValue(); // Nothing to do!
38056         Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
38057                           CanonicalizeShuffleInput(MVT::v4f32, SrcV1),
38058                           CanonicalizeShuffleInput(MVT::v4f32, SrcV2),
38059                           DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38060         return DAG.getBitcast(RootVT, Res);
38061       }
38062     }
38063     if (MaskEltSizeInBits == 64 &&
38064         isTargetShuffleEquivalent(MaskVT, Mask, {0, 2}, DAG) &&
38065         V2.getOpcode() == ISD::SCALAR_TO_VECTOR &&
38066         V2.getScalarValueSizeInBits() <= 32) {
38067       if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
38068         return SDValue(); // Nothing to do!
38069       PermuteImm = (/*DstIdx*/ 2 << 4) | (/*SrcIdx*/ 0 << 0);
38070       Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
38071                         CanonicalizeShuffleInput(MVT::v4f32, V1),
38072                         CanonicalizeShuffleInput(MVT::v4f32, V2),
38073                         DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38074       return DAG.getBitcast(RootVT, Res);
38075     }
38076   }
38077 
38078   SDValue NewV1 = V1; // Save operands in case early exit happens.
38079   SDValue NewV2 = V2;
38080   if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
38081                          NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
38082                          ShuffleVT, UnaryShuffle) &&
38083       (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38084     if (Depth == 0 && Root.getOpcode() == Shuffle)
38085       return SDValue(); // Nothing to do!
38086     NewV1 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV1);
38087     NewV2 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV2);
38088     Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
38089     return DAG.getBitcast(RootVT, Res);
38090   }
38091 
38092   NewV1 = V1; // Save operands in case early exit happens.
38093   NewV2 = V2;
38094   if (matchBinaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
38095                                 AllowIntDomain, NewV1, NewV2, DL, DAG,
38096                                 Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
38097       (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38098     if (Depth == 0 && Root.getOpcode() == Shuffle)
38099       return SDValue(); // Nothing to do!
38100     NewV1 = CanonicalizeShuffleInput(ShuffleVT, NewV1);
38101     NewV2 = CanonicalizeShuffleInput(ShuffleVT, NewV2);
38102     Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
38103                       DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38104     return DAG.getBitcast(RootVT, Res);
38105   }
38106 
38107   // Typically from here on, we need an integer version of MaskVT.
38108   MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
38109   IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
38110 
38111   // Annoyingly, SSE4A instructions don't map into the above match helpers.
38112   if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
38113     uint64_t BitLen, BitIdx;
38114     if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
38115                             Zeroable)) {
38116       if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
38117         return SDValue(); // Nothing to do!
38118       V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
38119       Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
38120                         DAG.getTargetConstant(BitLen, DL, MVT::i8),
38121                         DAG.getTargetConstant(BitIdx, DL, MVT::i8));
38122       return DAG.getBitcast(RootVT, Res);
38123     }
38124 
38125     if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
38126       if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
38127         return SDValue(); // Nothing to do!
38128       V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
38129       V2 = CanonicalizeShuffleInput(IntMaskVT, V2);
38130       Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
38131                         DAG.getTargetConstant(BitLen, DL, MVT::i8),
38132                         DAG.getTargetConstant(BitIdx, DL, MVT::i8));
38133       return DAG.getBitcast(RootVT, Res);
38134     }
38135   }
38136 
38137   // Match shuffle against TRUNCATE patterns.
38138   if (AllowIntDomain && MaskEltSizeInBits < 64 && Subtarget.hasAVX512()) {
38139     // Match against a VTRUNC instruction, accounting for src/dst sizes.
38140     if (matchShuffleAsVTRUNC(ShuffleSrcVT, ShuffleVT, IntMaskVT, Mask, Zeroable,
38141                              Subtarget)) {
38142       bool IsTRUNCATE = ShuffleVT.getVectorNumElements() ==
38143                         ShuffleSrcVT.getVectorNumElements();
38144       unsigned Opc =
38145           IsTRUNCATE ? (unsigned)ISD::TRUNCATE : (unsigned)X86ISD::VTRUNC;
38146       if (Depth == 0 && Root.getOpcode() == Opc)
38147         return SDValue(); // Nothing to do!
38148       V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
38149       Res = DAG.getNode(Opc, DL, ShuffleVT, V1);
38150       if (ShuffleVT.getSizeInBits() < RootSizeInBits)
38151         Res = widenSubVector(Res, true, Subtarget, DAG, DL, RootSizeInBits);
38152       return DAG.getBitcast(RootVT, Res);
38153     }
38154 
38155     // Do we need a more general binary truncation pattern?
38156     if (RootSizeInBits < 512 &&
38157         ((RootVT.is256BitVector() && Subtarget.useAVX512Regs()) ||
38158          (RootVT.is128BitVector() && Subtarget.hasVLX())) &&
38159         (MaskEltSizeInBits > 8 || Subtarget.hasBWI()) &&
38160         isSequentialOrUndefInRange(Mask, 0, NumMaskElts, 0, 2)) {
38161       // Bail if this was already a truncation or PACK node.
38162       // We sometimes fail to match PACK if we demand known undef elements.
38163       if (Depth == 0 && (Root.getOpcode() == ISD::TRUNCATE ||
38164                          Root.getOpcode() == X86ISD::PACKSS ||
38165                          Root.getOpcode() == X86ISD::PACKUS))
38166         return SDValue(); // Nothing to do!
38167       ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
38168       ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts / 2);
38169       V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
38170       V2 = CanonicalizeShuffleInput(ShuffleSrcVT, V2);
38171       ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
38172       ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts);
38173       Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShuffleSrcVT, V1, V2);
38174       Res = DAG.getNode(ISD::TRUNCATE, DL, IntMaskVT, Res);
38175       return DAG.getBitcast(RootVT, Res);
38176     }
38177   }
38178 
38179   // Don't try to re-form single instruction chains under any circumstances now
38180   // that we've done encoding canonicalization for them.
38181   if (Depth < 1)
38182     return SDValue();
38183 
38184   // Depth threshold above which we can efficiently use variable mask shuffles.
38185   int VariableCrossLaneShuffleDepth =
38186       Subtarget.hasFastVariableCrossLaneShuffle() ? 1 : 2;
38187   int VariablePerLaneShuffleDepth =
38188       Subtarget.hasFastVariablePerLaneShuffle() ? 1 : 2;
38189   AllowVariableCrossLaneMask &=
38190       (Depth >= VariableCrossLaneShuffleDepth) || HasVariableMask;
38191   AllowVariablePerLaneMask &=
38192       (Depth >= VariablePerLaneShuffleDepth) || HasVariableMask;
38193   // VPERMI2W/VPERMI2B are 3 uops on Skylake and Icelake so we require a
38194   // higher depth before combining them.
38195   bool AllowBWIVPERMV3 =
38196       (Depth >= (VariableCrossLaneShuffleDepth + 2) || HasVariableMask);
38197 
38198   bool MaskContainsZeros = isAnyZero(Mask);
38199 
38200   if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
38201     // If we have a single input lane-crossing shuffle then lower to VPERMV.
38202     if (UnaryShuffle && AllowVariableCrossLaneMask && !MaskContainsZeros) {
38203       if (Subtarget.hasAVX2() &&
38204           (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) {
38205         SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
38206         Res = CanonicalizeShuffleInput(MaskVT, V1);
38207         Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
38208         return DAG.getBitcast(RootVT, Res);
38209       }
38210       // AVX512 variants (non-VLX will pad to 512-bit shuffles).
38211       if ((Subtarget.hasAVX512() &&
38212            (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
38213             MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
38214           (Subtarget.hasBWI() &&
38215            (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
38216           (Subtarget.hasVBMI() &&
38217            (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8))) {
38218         V1 = CanonicalizeShuffleInput(MaskVT, V1);
38219         V2 = DAG.getUNDEF(MaskVT);
38220         Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38221         return DAG.getBitcast(RootVT, Res);
38222       }
38223     }
38224 
38225     // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
38226     // vector as the second source (non-VLX will pad to 512-bit shuffles).
38227     if (UnaryShuffle && AllowVariableCrossLaneMask &&
38228         ((Subtarget.hasAVX512() &&
38229           (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
38230            MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
38231            MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32 ||
38232            MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
38233          (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
38234           (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
38235          (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
38236           (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
38237       // Adjust shuffle mask - replace SM_SentinelZero with second source index.
38238       for (unsigned i = 0; i != NumMaskElts; ++i)
38239         if (Mask[i] == SM_SentinelZero)
38240           Mask[i] = NumMaskElts + i;
38241       V1 = CanonicalizeShuffleInput(MaskVT, V1);
38242       V2 = getZeroVector(MaskVT, Subtarget, DAG, DL);
38243       Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38244       return DAG.getBitcast(RootVT, Res);
38245     }
38246 
38247     // If that failed and either input is extracted then try to combine as a
38248     // shuffle with the larger type.
38249     if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
38250             Inputs, Root, BaseMask, Depth, HasVariableMask,
38251             AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG,
38252             Subtarget))
38253       return WideShuffle;
38254 
38255     // If we have a dual input lane-crossing shuffle then lower to VPERMV3,
38256     // (non-VLX will pad to 512-bit shuffles).
38257     if (AllowVariableCrossLaneMask && !MaskContainsZeros &&
38258         ((Subtarget.hasAVX512() &&
38259           (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
38260            MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
38261            MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32 ||
38262            MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
38263          (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
38264           (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
38265          (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
38266           (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
38267       V1 = CanonicalizeShuffleInput(MaskVT, V1);
38268       V2 = CanonicalizeShuffleInput(MaskVT, V2);
38269       Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38270       return DAG.getBitcast(RootVT, Res);
38271     }
38272     return SDValue();
38273   }
38274 
38275   // See if we can combine a single input shuffle with zeros to a bit-mask,
38276   // which is much simpler than any shuffle.
38277   if (UnaryShuffle && MaskContainsZeros && AllowVariablePerLaneMask &&
38278       isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
38279       DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
38280     APInt Zero = APInt::getZero(MaskEltSizeInBits);
38281     APInt AllOnes = APInt::getAllOnes(MaskEltSizeInBits);
38282     APInt UndefElts(NumMaskElts, 0);
38283     SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
38284     for (unsigned i = 0; i != NumMaskElts; ++i) {
38285       int M = Mask[i];
38286       if (M == SM_SentinelUndef) {
38287         UndefElts.setBit(i);
38288         continue;
38289       }
38290       if (M == SM_SentinelZero)
38291         continue;
38292       EltBits[i] = AllOnes;
38293     }
38294     SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
38295     Res = CanonicalizeShuffleInput(MaskVT, V1);
38296     unsigned AndOpcode =
38297         MaskVT.isFloatingPoint() ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
38298     Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
38299     return DAG.getBitcast(RootVT, Res);
38300   }
38301 
38302   // If we have a single input shuffle with different shuffle patterns in the
38303   // the 128-bit lanes use the variable mask to VPERMILPS.
38304   // TODO Combine other mask types at higher depths.
38305   if (UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
38306       ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
38307        (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
38308     SmallVector<SDValue, 16> VPermIdx;
38309     for (int M : Mask) {
38310       SDValue Idx =
38311           M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
38312       VPermIdx.push_back(Idx);
38313     }
38314     SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
38315     Res = CanonicalizeShuffleInput(MaskVT, V1);
38316     Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
38317     return DAG.getBitcast(RootVT, Res);
38318   }
38319 
38320   // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
38321   // to VPERMIL2PD/VPERMIL2PS.
38322   if (AllowVariablePerLaneMask && Subtarget.hasXOP() &&
38323       (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
38324        MaskVT == MVT::v8f32)) {
38325     // VPERMIL2 Operation.
38326     // Bits[3] - Match Bit.
38327     // Bits[2:1] - (Per Lane) PD Shuffle Mask.
38328     // Bits[2:0] - (Per Lane) PS Shuffle Mask.
38329     unsigned NumLanes = MaskVT.getSizeInBits() / 128;
38330     unsigned NumEltsPerLane = NumMaskElts / NumLanes;
38331     SmallVector<int, 8> VPerm2Idx;
38332     unsigned M2ZImm = 0;
38333     for (int M : Mask) {
38334       if (M == SM_SentinelUndef) {
38335         VPerm2Idx.push_back(-1);
38336         continue;
38337       }
38338       if (M == SM_SentinelZero) {
38339         M2ZImm = 2;
38340         VPerm2Idx.push_back(8);
38341         continue;
38342       }
38343       int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
38344       Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
38345       VPerm2Idx.push_back(Index);
38346     }
38347     V1 = CanonicalizeShuffleInput(MaskVT, V1);
38348     V2 = CanonicalizeShuffleInput(MaskVT, V2);
38349     SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
38350     Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
38351                       DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
38352     return DAG.getBitcast(RootVT, Res);
38353   }
38354 
38355   // If we have 3 or more shuffle instructions or a chain involving a variable
38356   // mask, we can replace them with a single PSHUFB instruction profitably.
38357   // Intel's manuals suggest only using PSHUFB if doing so replacing 5
38358   // instructions, but in practice PSHUFB tends to be *very* fast so we're
38359   // more aggressive.
38360   if (UnaryShuffle && AllowVariablePerLaneMask &&
38361       ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
38362        (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
38363        (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
38364     SmallVector<SDValue, 16> PSHUFBMask;
38365     int NumBytes = RootVT.getSizeInBits() / 8;
38366     int Ratio = NumBytes / NumMaskElts;
38367     for (int i = 0; i < NumBytes; ++i) {
38368       int M = Mask[i / Ratio];
38369       if (M == SM_SentinelUndef) {
38370         PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
38371         continue;
38372       }
38373       if (M == SM_SentinelZero) {
38374         PSHUFBMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
38375         continue;
38376       }
38377       M = Ratio * M + i % Ratio;
38378       assert((M / 16) == (i / 16) && "Lane crossing detected");
38379       PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
38380     }
38381     MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
38382     Res = CanonicalizeShuffleInput(ByteVT, V1);
38383     SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
38384     Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
38385     return DAG.getBitcast(RootVT, Res);
38386   }
38387 
38388   // With XOP, if we have a 128-bit binary input shuffle we can always combine
38389   // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
38390   // slower than PSHUFB on targets that support both.
38391   if (AllowVariablePerLaneMask && RootVT.is128BitVector() &&
38392       Subtarget.hasXOP()) {
38393     // VPPERM Mask Operation
38394     // Bits[4:0] - Byte Index (0 - 31)
38395     // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
38396     SmallVector<SDValue, 16> VPPERMMask;
38397     int NumBytes = 16;
38398     int Ratio = NumBytes / NumMaskElts;
38399     for (int i = 0; i < NumBytes; ++i) {
38400       int M = Mask[i / Ratio];
38401       if (M == SM_SentinelUndef) {
38402         VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
38403         continue;
38404       }
38405       if (M == SM_SentinelZero) {
38406         VPPERMMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
38407         continue;
38408       }
38409       M = Ratio * M + i % Ratio;
38410       VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
38411     }
38412     MVT ByteVT = MVT::v16i8;
38413     V1 = CanonicalizeShuffleInput(ByteVT, V1);
38414     V2 = CanonicalizeShuffleInput(ByteVT, V2);
38415     SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
38416     Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
38417     return DAG.getBitcast(RootVT, Res);
38418   }
38419 
38420   // If that failed and either input is extracted then try to combine as a
38421   // shuffle with the larger type.
38422   if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
38423           Inputs, Root, BaseMask, Depth, HasVariableMask,
38424           AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG, Subtarget))
38425     return WideShuffle;
38426 
38427   // If we have a dual input shuffle then lower to VPERMV3,
38428   // (non-VLX will pad to 512-bit shuffles)
38429   if (!UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
38430       ((Subtarget.hasAVX512() &&
38431         (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v8f64 ||
38432          MaskVT == MVT::v2i64 || MaskVT == MVT::v4i64 || MaskVT == MVT::v8i64 ||
38433          MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 || MaskVT == MVT::v8f32 ||
38434          MaskVT == MVT::v8i32 || MaskVT == MVT::v16f32 ||
38435          MaskVT == MVT::v16i32)) ||
38436        (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
38437         (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16 ||
38438          MaskVT == MVT::v32i16)) ||
38439        (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
38440         (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8 ||
38441          MaskVT == MVT::v64i8)))) {
38442     V1 = CanonicalizeShuffleInput(MaskVT, V1);
38443     V2 = CanonicalizeShuffleInput(MaskVT, V2);
38444     Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38445     return DAG.getBitcast(RootVT, Res);
38446   }
38447 
38448   // Failed to find any combines.
38449   return SDValue();
38450 }
38451 
38452 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
38453 // instruction if possible.
38454 //
38455 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
38456 // type size to attempt to combine:
38457 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
38458 // -->
38459 // extract_subvector(shuffle(x,y,m2),0)
38460 static SDValue combineX86ShuffleChainWithExtract(
38461     ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
38462     bool HasVariableMask, bool AllowVariableCrossLaneMask,
38463     bool AllowVariablePerLaneMask, SelectionDAG &DAG,
38464     const X86Subtarget &Subtarget) {
38465   unsigned NumMaskElts = BaseMask.size();
38466   unsigned NumInputs = Inputs.size();
38467   if (NumInputs == 0)
38468     return SDValue();
38469 
38470   EVT RootVT = Root.getValueType();
38471   unsigned RootSizeInBits = RootVT.getSizeInBits();
38472   unsigned RootEltSizeInBits = RootSizeInBits / NumMaskElts;
38473   assert((RootSizeInBits % NumMaskElts) == 0 && "Unexpected root shuffle mask");
38474 
38475   // Peek through extract_subvector to find widest legal vector.
38476   // TODO: Handle ISD::TRUNCATE
38477   unsigned WideSizeInBits = RootSizeInBits;
38478   for (unsigned I = 0; I != NumInputs; ++I) {
38479     SDValue Input = peekThroughBitcasts(Inputs[I]);
38480     while (Input.getOpcode() == ISD::EXTRACT_SUBVECTOR)
38481       Input = peekThroughBitcasts(Input.getOperand(0));
38482     if (DAG.getTargetLoweringInfo().isTypeLegal(Input.getValueType()) &&
38483         WideSizeInBits < Input.getValueSizeInBits())
38484       WideSizeInBits = Input.getValueSizeInBits();
38485   }
38486 
38487   // Bail if we fail to find a source larger than the existing root.
38488   unsigned Scale = WideSizeInBits / RootSizeInBits;
38489   if (WideSizeInBits <= RootSizeInBits ||
38490       (WideSizeInBits % RootSizeInBits) != 0)
38491     return SDValue();
38492 
38493   // Create new mask for larger type.
38494   SmallVector<int, 64> WideMask(BaseMask);
38495   for (int &M : WideMask) {
38496     if (M < 0)
38497       continue;
38498     M = (M % NumMaskElts) + ((M / NumMaskElts) * Scale * NumMaskElts);
38499   }
38500   WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
38501 
38502   // Attempt to peek through inputs and adjust mask when we extract from an
38503   // upper subvector.
38504   int AdjustedMasks = 0;
38505   SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
38506   for (unsigned I = 0; I != NumInputs; ++I) {
38507     SDValue &Input = WideInputs[I];
38508     Input = peekThroughBitcasts(Input);
38509     while (Input.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
38510            Input.getOperand(0).getValueSizeInBits() <= WideSizeInBits) {
38511       uint64_t Idx = Input.getConstantOperandVal(1);
38512       if (Idx != 0) {
38513         ++AdjustedMasks;
38514         unsigned InputEltSizeInBits = Input.getScalarValueSizeInBits();
38515         Idx = (Idx * InputEltSizeInBits) / RootEltSizeInBits;
38516 
38517         int lo = I * WideMask.size();
38518         int hi = (I + 1) * WideMask.size();
38519         for (int &M : WideMask)
38520           if (lo <= M && M < hi)
38521             M += Idx;
38522       }
38523       Input = peekThroughBitcasts(Input.getOperand(0));
38524     }
38525   }
38526 
38527   // Remove unused/repeated shuffle source ops.
38528   resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
38529   assert(!WideInputs.empty() && "Shuffle with no inputs detected");
38530 
38531   // Bail if we're always extracting from the lowest subvectors,
38532   // combineX86ShuffleChain should match this for the current width, or the
38533   // shuffle still references too many inputs.
38534   if (AdjustedMasks == 0 || WideInputs.size() > 2)
38535     return SDValue();
38536 
38537   // Minor canonicalization of the accumulated shuffle mask to make it easier
38538   // to match below. All this does is detect masks with sequential pairs of
38539   // elements, and shrink them to the half-width mask. It does this in a loop
38540   // so it will reduce the size of the mask to the minimal width mask which
38541   // performs an equivalent shuffle.
38542   while (WideMask.size() > 1) {
38543     SmallVector<int, 64> WidenedMask;
38544     if (!canWidenShuffleElements(WideMask, WidenedMask))
38545       break;
38546     WideMask = std::move(WidenedMask);
38547   }
38548 
38549   // Canonicalization of binary shuffle masks to improve pattern matching by
38550   // commuting the inputs.
38551   if (WideInputs.size() == 2 && canonicalizeShuffleMaskWithCommute(WideMask)) {
38552     ShuffleVectorSDNode::commuteMask(WideMask);
38553     std::swap(WideInputs[0], WideInputs[1]);
38554   }
38555 
38556   // Increase depth for every upper subvector we've peeked through.
38557   Depth += AdjustedMasks;
38558 
38559   // Attempt to combine wider chain.
38560   // TODO: Can we use a better Root?
38561   SDValue WideRoot = WideInputs.front().getValueSizeInBits() >
38562                              WideInputs.back().getValueSizeInBits()
38563                          ? WideInputs.front()
38564                          : WideInputs.back();
38565   assert(WideRoot.getValueSizeInBits() == WideSizeInBits &&
38566          "WideRootSize mismatch");
38567 
38568   if (SDValue WideShuffle =
38569           combineX86ShuffleChain(WideInputs, WideRoot, WideMask, Depth,
38570                                  HasVariableMask, AllowVariableCrossLaneMask,
38571                                  AllowVariablePerLaneMask, DAG, Subtarget)) {
38572     WideShuffle =
38573         extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
38574     return DAG.getBitcast(RootVT, WideShuffle);
38575   }
38576 
38577   return SDValue();
38578 }
38579 
38580 // Canonicalize the combined shuffle mask chain with horizontal ops.
38581 // NOTE: This may update the Ops and Mask.
38582 static SDValue canonicalizeShuffleMaskWithHorizOp(
38583     MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
38584     unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
38585     const X86Subtarget &Subtarget) {
38586   if (Mask.empty() || Ops.empty())
38587     return SDValue();
38588 
38589   SmallVector<SDValue> BC;
38590   for (SDValue Op : Ops)
38591     BC.push_back(peekThroughBitcasts(Op));
38592 
38593   // All ops must be the same horizop + type.
38594   SDValue BC0 = BC[0];
38595   EVT VT0 = BC0.getValueType();
38596   unsigned Opcode0 = BC0.getOpcode();
38597   if (VT0.getSizeInBits() != RootSizeInBits || llvm::any_of(BC, [&](SDValue V) {
38598         return V.getOpcode() != Opcode0 || V.getValueType() != VT0;
38599       }))
38600     return SDValue();
38601 
38602   bool isHoriz = (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
38603                   Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB);
38604   bool isPack = (Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS);
38605   if (!isHoriz && !isPack)
38606     return SDValue();
38607 
38608   // Do all ops have a single use?
38609   bool OneUseOps = llvm::all_of(Ops, [](SDValue Op) {
38610     return Op.hasOneUse() &&
38611            peekThroughBitcasts(Op) == peekThroughOneUseBitcasts(Op);
38612   });
38613 
38614   int NumElts = VT0.getVectorNumElements();
38615   int NumLanes = VT0.getSizeInBits() / 128;
38616   int NumEltsPerLane = NumElts / NumLanes;
38617   int NumHalfEltsPerLane = NumEltsPerLane / 2;
38618   MVT SrcVT = BC0.getOperand(0).getSimpleValueType();
38619   unsigned EltSizeInBits = RootSizeInBits / Mask.size();
38620 
38621   if (NumEltsPerLane >= 4 &&
38622       (isPack || shouldUseHorizontalOp(Ops.size() == 1, DAG, Subtarget))) {
38623     SmallVector<int> LaneMask, ScaledMask;
38624     if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, LaneMask) &&
38625         scaleShuffleElements(LaneMask, 4, ScaledMask)) {
38626       // See if we can remove the shuffle by resorting the HOP chain so that
38627       // the HOP args are pre-shuffled.
38628       // TODO: Generalize to any sized/depth chain.
38629       // TODO: Add support for PACKSS/PACKUS.
38630       if (isHoriz) {
38631         // Attempt to find a HOP(HOP(X,Y),HOP(Z,W)) source operand.
38632         auto GetHOpSrc = [&](int M) {
38633           if (M == SM_SentinelUndef)
38634             return DAG.getUNDEF(VT0);
38635           if (M == SM_SentinelZero)
38636             return getZeroVector(VT0.getSimpleVT(), Subtarget, DAG, DL);
38637           SDValue Src0 = BC[M / 4];
38638           SDValue Src1 = Src0.getOperand((M % 4) >= 2);
38639           if (Src1.getOpcode() == Opcode0 && Src0->isOnlyUserOf(Src1.getNode()))
38640             return Src1.getOperand(M % 2);
38641           return SDValue();
38642         };
38643         SDValue M0 = GetHOpSrc(ScaledMask[0]);
38644         SDValue M1 = GetHOpSrc(ScaledMask[1]);
38645         SDValue M2 = GetHOpSrc(ScaledMask[2]);
38646         SDValue M3 = GetHOpSrc(ScaledMask[3]);
38647         if (M0 && M1 && M2 && M3) {
38648           SDValue LHS = DAG.getNode(Opcode0, DL, SrcVT, M0, M1);
38649           SDValue RHS = DAG.getNode(Opcode0, DL, SrcVT, M2, M3);
38650           return DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
38651         }
38652       }
38653       // shuffle(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
38654       if (Ops.size() >= 2) {
38655         SDValue LHS, RHS;
38656         auto GetHOpSrc = [&](int M, int &OutM) {
38657           // TODO: Support SM_SentinelZero
38658           if (M < 0)
38659             return M == SM_SentinelUndef;
38660           SDValue Src = BC[M / 4].getOperand((M % 4) >= 2);
38661           if (!LHS || LHS == Src) {
38662             LHS = Src;
38663             OutM = (M % 2);
38664             return true;
38665           }
38666           if (!RHS || RHS == Src) {
38667             RHS = Src;
38668             OutM = (M % 2) + 2;
38669             return true;
38670           }
38671           return false;
38672         };
38673         int PostMask[4] = {-1, -1, -1, -1};
38674         if (GetHOpSrc(ScaledMask[0], PostMask[0]) &&
38675             GetHOpSrc(ScaledMask[1], PostMask[1]) &&
38676             GetHOpSrc(ScaledMask[2], PostMask[2]) &&
38677             GetHOpSrc(ScaledMask[3], PostMask[3])) {
38678           LHS = DAG.getBitcast(SrcVT, LHS);
38679           RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
38680           SDValue Res = DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
38681           // Use SHUFPS for the permute so this will work on SSE2 targets,
38682           // shuffle combining and domain handling will simplify this later on.
38683           MVT ShuffleVT = MVT::getVectorVT(MVT::f32, RootSizeInBits / 32);
38684           Res = DAG.getBitcast(ShuffleVT, Res);
38685           return DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
38686                              getV4X86ShuffleImm8ForMask(PostMask, DL, DAG));
38687         }
38688       }
38689     }
38690   }
38691 
38692   if (2 < Ops.size())
38693     return SDValue();
38694 
38695   SDValue BC1 = BC[BC.size() - 1];
38696   if (Mask.size() == VT0.getVectorNumElements()) {
38697     // Canonicalize binary shuffles of horizontal ops that use the
38698     // same sources to an unary shuffle.
38699     // TODO: Try to perform this fold even if the shuffle remains.
38700     if (Ops.size() == 2) {
38701       auto ContainsOps = [](SDValue HOp, SDValue Op) {
38702         return Op == HOp.getOperand(0) || Op == HOp.getOperand(1);
38703       };
38704       // Commute if all BC0's ops are contained in BC1.
38705       if (ContainsOps(BC1, BC0.getOperand(0)) &&
38706           ContainsOps(BC1, BC0.getOperand(1))) {
38707         ShuffleVectorSDNode::commuteMask(Mask);
38708         std::swap(Ops[0], Ops[1]);
38709         std::swap(BC0, BC1);
38710       }
38711 
38712       // If BC1 can be represented by BC0, then convert to unary shuffle.
38713       if (ContainsOps(BC0, BC1.getOperand(0)) &&
38714           ContainsOps(BC0, BC1.getOperand(1))) {
38715         for (int &M : Mask) {
38716           if (M < NumElts) // BC0 element or UNDEF/Zero sentinel.
38717             continue;
38718           int SubLane = ((M % NumEltsPerLane) >= NumHalfEltsPerLane) ? 1 : 0;
38719           M -= NumElts + (SubLane * NumHalfEltsPerLane);
38720           if (BC1.getOperand(SubLane) != BC0.getOperand(0))
38721             M += NumHalfEltsPerLane;
38722         }
38723       }
38724     }
38725 
38726     // Canonicalize unary horizontal ops to only refer to lower halves.
38727     for (int i = 0; i != NumElts; ++i) {
38728       int &M = Mask[i];
38729       if (isUndefOrZero(M))
38730         continue;
38731       if (M < NumElts && BC0.getOperand(0) == BC0.getOperand(1) &&
38732           (M % NumEltsPerLane) >= NumHalfEltsPerLane)
38733         M -= NumHalfEltsPerLane;
38734       if (NumElts <= M && BC1.getOperand(0) == BC1.getOperand(1) &&
38735           (M % NumEltsPerLane) >= NumHalfEltsPerLane)
38736         M -= NumHalfEltsPerLane;
38737     }
38738   }
38739 
38740   // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
38741   // single instruction. Attempt to match a v2X64 repeating shuffle pattern that
38742   // represents the LHS/RHS inputs for the lower/upper halves.
38743   SmallVector<int, 16> TargetMask128, WideMask128;
38744   if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, TargetMask128) &&
38745       scaleShuffleElements(TargetMask128, 2, WideMask128)) {
38746     assert(isUndefOrZeroOrInRange(WideMask128, 0, 4) && "Illegal shuffle");
38747     bool SingleOp = (Ops.size() == 1);
38748     if (isPack || OneUseOps ||
38749         shouldUseHorizontalOp(SingleOp, DAG, Subtarget)) {
38750       SDValue Lo = isInRange(WideMask128[0], 0, 2) ? BC0 : BC1;
38751       SDValue Hi = isInRange(WideMask128[1], 0, 2) ? BC0 : BC1;
38752       Lo = Lo.getOperand(WideMask128[0] & 1);
38753       Hi = Hi.getOperand(WideMask128[1] & 1);
38754       if (SingleOp) {
38755         SDValue Undef = DAG.getUNDEF(SrcVT);
38756         SDValue Zero = getZeroVector(SrcVT, Subtarget, DAG, DL);
38757         Lo = (WideMask128[0] == SM_SentinelZero ? Zero : Lo);
38758         Hi = (WideMask128[1] == SM_SentinelZero ? Zero : Hi);
38759         Lo = (WideMask128[0] == SM_SentinelUndef ? Undef : Lo);
38760         Hi = (WideMask128[1] == SM_SentinelUndef ? Undef : Hi);
38761       }
38762       return DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
38763     }
38764   }
38765 
38766   // If we are post-shuffling a 256-bit hop and not requiring the upper
38767   // elements, then try to narrow to a 128-bit hop directly.
38768   SmallVector<int, 16> WideMask64;
38769   if (Ops.size() == 1 && NumLanes == 2 &&
38770       scaleShuffleElements(Mask, 4, WideMask64) &&
38771       isUndefInRange(WideMask64, 2, 2)) {
38772     int M0 = WideMask64[0];
38773     int M1 = WideMask64[1];
38774     if (isInRange(M0, 0, 4) && isInRange(M1, 0, 4)) {
38775       MVT HalfVT = VT0.getSimpleVT().getHalfNumVectorElementsVT();
38776       unsigned Idx0 = (M0 & 2) ? (SrcVT.getVectorNumElements() / 2) : 0;
38777       unsigned Idx1 = (M1 & 2) ? (SrcVT.getVectorNumElements() / 2) : 0;
38778       SDValue V0 = extract128BitVector(BC[0].getOperand(M0 & 1), Idx0, DAG, DL);
38779       SDValue V1 = extract128BitVector(BC[0].getOperand(M1 & 1), Idx1, DAG, DL);
38780       SDValue Res = DAG.getNode(Opcode0, DL, HalfVT, V0, V1);
38781       return widenSubVector(Res, false, Subtarget, DAG, DL, 256);
38782     }
38783   }
38784 
38785   return SDValue();
38786 }
38787 
38788 // Attempt to constant fold all of the constant source ops.
38789 // Returns true if the entire shuffle is folded to a constant.
38790 // TODO: Extend this to merge multiple constant Ops and update the mask.
38791 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
38792                                            ArrayRef<int> Mask, SDValue Root,
38793                                            bool HasVariableMask,
38794                                            SelectionDAG &DAG,
38795                                            const X86Subtarget &Subtarget) {
38796   MVT VT = Root.getSimpleValueType();
38797 
38798   unsigned SizeInBits = VT.getSizeInBits();
38799   unsigned NumMaskElts = Mask.size();
38800   unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
38801   unsigned NumOps = Ops.size();
38802 
38803   // Extract constant bits from each source op.
38804   SmallVector<APInt, 16> UndefEltsOps(NumOps);
38805   SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
38806   for (unsigned I = 0; I != NumOps; ++I)
38807     if (!getTargetConstantBitsFromNode(Ops[I], MaskSizeInBits, UndefEltsOps[I],
38808                                        RawBitsOps[I]))
38809       return SDValue();
38810 
38811   // If we're optimizing for size, only fold if at least one of the constants is
38812   // only used once or the combined shuffle has included a variable mask
38813   // shuffle, this is to avoid constant pool bloat.
38814   bool IsOptimizingSize = DAG.shouldOptForSize();
38815   if (IsOptimizingSize && !HasVariableMask &&
38816       llvm::none_of(Ops, [](SDValue SrcOp) { return SrcOp->hasOneUse(); }))
38817     return SDValue();
38818 
38819   // Shuffle the constant bits according to the mask.
38820   SDLoc DL(Root);
38821   APInt UndefElts(NumMaskElts, 0);
38822   APInt ZeroElts(NumMaskElts, 0);
38823   APInt ConstantElts(NumMaskElts, 0);
38824   SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
38825                                         APInt::getZero(MaskSizeInBits));
38826   for (unsigned i = 0; i != NumMaskElts; ++i) {
38827     int M = Mask[i];
38828     if (M == SM_SentinelUndef) {
38829       UndefElts.setBit(i);
38830       continue;
38831     } else if (M == SM_SentinelZero) {
38832       ZeroElts.setBit(i);
38833       continue;
38834     }
38835     assert(0 <= M && M < (int)(NumMaskElts * NumOps));
38836 
38837     unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
38838     unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
38839 
38840     auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
38841     if (SrcUndefElts[SrcMaskIdx]) {
38842       UndefElts.setBit(i);
38843       continue;
38844     }
38845 
38846     auto &SrcEltBits = RawBitsOps[SrcOpIdx];
38847     APInt &Bits = SrcEltBits[SrcMaskIdx];
38848     if (!Bits) {
38849       ZeroElts.setBit(i);
38850       continue;
38851     }
38852 
38853     ConstantElts.setBit(i);
38854     ConstantBitData[i] = Bits;
38855   }
38856   assert((UndefElts | ZeroElts | ConstantElts).isAllOnes());
38857 
38858   // Attempt to create a zero vector.
38859   if ((UndefElts | ZeroElts).isAllOnes())
38860     return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL);
38861 
38862   // Create the constant data.
38863   MVT MaskSVT;
38864   if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
38865     MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
38866   else
38867     MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
38868 
38869   MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
38870   if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
38871     return SDValue();
38872 
38873   SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
38874   return DAG.getBitcast(VT, CstOp);
38875 }
38876 
38877 namespace llvm {
38878   namespace X86 {
38879     enum {
38880       MaxShuffleCombineDepth = 8
38881     };
38882   } // namespace X86
38883 } // namespace llvm
38884 
38885 /// Fully generic combining of x86 shuffle instructions.
38886 ///
38887 /// This should be the last combine run over the x86 shuffle instructions. Once
38888 /// they have been fully optimized, this will recursively consider all chains
38889 /// of single-use shuffle instructions, build a generic model of the cumulative
38890 /// shuffle operation, and check for simpler instructions which implement this
38891 /// operation. We use this primarily for two purposes:
38892 ///
38893 /// 1) Collapse generic shuffles to specialized single instructions when
38894 ///    equivalent. In most cases, this is just an encoding size win, but
38895 ///    sometimes we will collapse multiple generic shuffles into a single
38896 ///    special-purpose shuffle.
38897 /// 2) Look for sequences of shuffle instructions with 3 or more total
38898 ///    instructions, and replace them with the slightly more expensive SSSE3
38899 ///    PSHUFB instruction if available. We do this as the last combining step
38900 ///    to ensure we avoid using PSHUFB if we can implement the shuffle with
38901 ///    a suitable short sequence of other instructions. The PSHUFB will either
38902 ///    use a register or have to read from memory and so is slightly (but only
38903 ///    slightly) more expensive than the other shuffle instructions.
38904 ///
38905 /// Because this is inherently a quadratic operation (for each shuffle in
38906 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
38907 /// This should never be an issue in practice as the shuffle lowering doesn't
38908 /// produce sequences of more than 8 instructions.
38909 ///
38910 /// FIXME: We will currently miss some cases where the redundant shuffling
38911 /// would simplify under the threshold for PSHUFB formation because of
38912 /// combine-ordering. To fix this, we should do the redundant instruction
38913 /// combining in this recursive walk.
38914 static SDValue combineX86ShufflesRecursively(
38915     ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
38916     ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
38917     unsigned MaxDepth, bool HasVariableMask, bool AllowVariableCrossLaneMask,
38918     bool AllowVariablePerLaneMask, SelectionDAG &DAG,
38919     const X86Subtarget &Subtarget) {
38920   assert(!RootMask.empty() &&
38921          (RootMask.size() > 1 || (RootMask[0] == 0 && SrcOpIndex == 0)) &&
38922          "Illegal shuffle root mask");
38923   MVT RootVT = Root.getSimpleValueType();
38924   assert(RootVT.isVector() && "Shuffles operate on vector types!");
38925   unsigned RootSizeInBits = RootVT.getSizeInBits();
38926 
38927   // Bound the depth of our recursive combine because this is ultimately
38928   // quadratic in nature.
38929   if (Depth >= MaxDepth)
38930     return SDValue();
38931 
38932   // Directly rip through bitcasts to find the underlying operand.
38933   SDValue Op = SrcOps[SrcOpIndex];
38934   Op = peekThroughOneUseBitcasts(Op);
38935 
38936   EVT VT = Op.getValueType();
38937   if (!VT.isVector() || !VT.isSimple())
38938     return SDValue(); // Bail if we hit a non-simple non-vector.
38939 
38940   // FIXME: Just bail on f16 for now.
38941   if (VT.getVectorElementType() == MVT::f16)
38942     return SDValue();
38943 
38944   assert((RootSizeInBits % VT.getSizeInBits()) == 0 &&
38945          "Can only combine shuffles upto size of the root op.");
38946 
38947   // Create a demanded elts mask from the referenced elements of Op.
38948   APInt OpDemandedElts = APInt::getZero(RootMask.size());
38949   for (int M : RootMask) {
38950     int BaseIdx = RootMask.size() * SrcOpIndex;
38951     if (isInRange(M, BaseIdx, BaseIdx + RootMask.size()))
38952       OpDemandedElts.setBit(M - BaseIdx);
38953   }
38954   if (RootSizeInBits != VT.getSizeInBits()) {
38955     // Op is smaller than Root - extract the demanded elts for the subvector.
38956     unsigned Scale = RootSizeInBits / VT.getSizeInBits();
38957     unsigned NumOpMaskElts = RootMask.size() / Scale;
38958     assert((RootMask.size() % Scale) == 0 && "Root mask size mismatch");
38959     assert(OpDemandedElts
38960                .extractBits(RootMask.size() - NumOpMaskElts, NumOpMaskElts)
38961                .isZero() &&
38962            "Out of range elements referenced in root mask");
38963     OpDemandedElts = OpDemandedElts.extractBits(NumOpMaskElts, 0);
38964   }
38965   OpDemandedElts =
38966       APIntOps::ScaleBitMask(OpDemandedElts, VT.getVectorNumElements());
38967 
38968   // Extract target shuffle mask and resolve sentinels and inputs.
38969   SmallVector<int, 64> OpMask;
38970   SmallVector<SDValue, 2> OpInputs;
38971   APInt OpUndef, OpZero;
38972   bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
38973   if (getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
38974                              OpZero, DAG, Depth, false)) {
38975     // Shuffle inputs must not be larger than the shuffle result.
38976     // TODO: Relax this for single input faux shuffles (e.g. trunc).
38977     if (llvm::any_of(OpInputs, [VT](SDValue OpInput) {
38978           return OpInput.getValueSizeInBits() > VT.getSizeInBits();
38979         }))
38980       return SDValue();
38981   } else if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
38982              (RootSizeInBits % Op.getOperand(0).getValueSizeInBits()) == 0 &&
38983              !isNullConstant(Op.getOperand(1))) {
38984     SDValue SrcVec = Op.getOperand(0);
38985     int ExtractIdx = Op.getConstantOperandVal(1);
38986     unsigned NumElts = VT.getVectorNumElements();
38987     OpInputs.assign({SrcVec});
38988     OpMask.assign(NumElts, SM_SentinelUndef);
38989     std::iota(OpMask.begin(), OpMask.end(), ExtractIdx);
38990     OpZero = OpUndef = APInt::getZero(NumElts);
38991   } else {
38992     return SDValue();
38993   }
38994 
38995   // If the shuffle result was smaller than the root, we need to adjust the
38996   // mask indices and pad the mask with undefs.
38997   if (RootSizeInBits > VT.getSizeInBits()) {
38998     unsigned NumSubVecs = RootSizeInBits / VT.getSizeInBits();
38999     unsigned OpMaskSize = OpMask.size();
39000     if (OpInputs.size() > 1) {
39001       unsigned PaddedMaskSize = NumSubVecs * OpMaskSize;
39002       for (int &M : OpMask) {
39003         if (M < 0)
39004           continue;
39005         int EltIdx = M % OpMaskSize;
39006         int OpIdx = M / OpMaskSize;
39007         M = (PaddedMaskSize * OpIdx) + EltIdx;
39008       }
39009     }
39010     OpZero = OpZero.zext(NumSubVecs * OpMaskSize);
39011     OpUndef = OpUndef.zext(NumSubVecs * OpMaskSize);
39012     OpMask.append((NumSubVecs - 1) * OpMaskSize, SM_SentinelUndef);
39013   }
39014 
39015   SmallVector<int, 64> Mask;
39016   SmallVector<SDValue, 16> Ops;
39017 
39018   // We don't need to merge masks if the root is empty.
39019   bool EmptyRoot = (Depth == 0) && (RootMask.size() == 1);
39020   if (EmptyRoot) {
39021     // Only resolve zeros if it will remove an input, otherwise we might end
39022     // up in an infinite loop.
39023     bool ResolveKnownZeros = true;
39024     if (!OpZero.isZero()) {
39025       APInt UsedInputs = APInt::getZero(OpInputs.size());
39026       for (int i = 0, e = OpMask.size(); i != e; ++i) {
39027         int M = OpMask[i];
39028         if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
39029           continue;
39030         UsedInputs.setBit(M / OpMask.size());
39031         if (UsedInputs.isAllOnes()) {
39032           ResolveKnownZeros = false;
39033           break;
39034         }
39035       }
39036     }
39037     resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero,
39038                                       ResolveKnownZeros);
39039 
39040     Mask = OpMask;
39041     Ops.append(OpInputs.begin(), OpInputs.end());
39042   } else {
39043     resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
39044 
39045     // Add the inputs to the Ops list, avoiding duplicates.
39046     Ops.append(SrcOps.begin(), SrcOps.end());
39047 
39048     auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
39049       // Attempt to find an existing match.
39050       SDValue InputBC = peekThroughBitcasts(Input);
39051       for (int i = 0, e = Ops.size(); i < e; ++i)
39052         if (InputBC == peekThroughBitcasts(Ops[i]))
39053           return i;
39054       // Match failed - should we replace an existing Op?
39055       if (InsertionPoint >= 0) {
39056         Ops[InsertionPoint] = Input;
39057         return InsertionPoint;
39058       }
39059       // Add to the end of the Ops list.
39060       Ops.push_back(Input);
39061       return Ops.size() - 1;
39062     };
39063 
39064     SmallVector<int, 2> OpInputIdx;
39065     for (SDValue OpInput : OpInputs)
39066       OpInputIdx.push_back(
39067           AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
39068 
39069     assert(((RootMask.size() > OpMask.size() &&
39070              RootMask.size() % OpMask.size() == 0) ||
39071             (OpMask.size() > RootMask.size() &&
39072              OpMask.size() % RootMask.size() == 0) ||
39073             OpMask.size() == RootMask.size()) &&
39074            "The smaller number of elements must divide the larger.");
39075 
39076     // This function can be performance-critical, so we rely on the power-of-2
39077     // knowledge that we have about the mask sizes to replace div/rem ops with
39078     // bit-masks and shifts.
39079     assert(llvm::has_single_bit<uint32_t>(RootMask.size()) &&
39080            "Non-power-of-2 shuffle mask sizes");
39081     assert(llvm::has_single_bit<uint32_t>(OpMask.size()) &&
39082            "Non-power-of-2 shuffle mask sizes");
39083     unsigned RootMaskSizeLog2 = llvm::countr_zero(RootMask.size());
39084     unsigned OpMaskSizeLog2 = llvm::countr_zero(OpMask.size());
39085 
39086     unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
39087     unsigned RootRatio =
39088         std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
39089     unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
39090     assert((RootRatio == 1 || OpRatio == 1) &&
39091            "Must not have a ratio for both incoming and op masks!");
39092 
39093     assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
39094     assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
39095     assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
39096     unsigned RootRatioLog2 = llvm::countr_zero(RootRatio);
39097     unsigned OpRatioLog2 = llvm::countr_zero(OpRatio);
39098 
39099     Mask.resize(MaskWidth, SM_SentinelUndef);
39100 
39101     // Merge this shuffle operation's mask into our accumulated mask. Note that
39102     // this shuffle's mask will be the first applied to the input, followed by
39103     // the root mask to get us all the way to the root value arrangement. The
39104     // reason for this order is that we are recursing up the operation chain.
39105     for (unsigned i = 0; i < MaskWidth; ++i) {
39106       unsigned RootIdx = i >> RootRatioLog2;
39107       if (RootMask[RootIdx] < 0) {
39108         // This is a zero or undef lane, we're done.
39109         Mask[i] = RootMask[RootIdx];
39110         continue;
39111       }
39112 
39113       unsigned RootMaskedIdx =
39114           RootRatio == 1
39115               ? RootMask[RootIdx]
39116               : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
39117 
39118       // Just insert the scaled root mask value if it references an input other
39119       // than the SrcOp we're currently inserting.
39120       if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
39121           (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
39122         Mask[i] = RootMaskedIdx;
39123         continue;
39124       }
39125 
39126       RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
39127       unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
39128       if (OpMask[OpIdx] < 0) {
39129         // The incoming lanes are zero or undef, it doesn't matter which ones we
39130         // are using.
39131         Mask[i] = OpMask[OpIdx];
39132         continue;
39133       }
39134 
39135       // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
39136       unsigned OpMaskedIdx = OpRatio == 1 ? OpMask[OpIdx]
39137                                           : (OpMask[OpIdx] << OpRatioLog2) +
39138                                                 (RootMaskedIdx & (OpRatio - 1));
39139 
39140       OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
39141       int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
39142       assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
39143       OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
39144 
39145       Mask[i] = OpMaskedIdx;
39146     }
39147   }
39148 
39149   // Peek through vector widenings and set out of bounds mask indices to undef.
39150   // TODO: Can resolveTargetShuffleInputsAndMask do some of this?
39151   for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
39152     SDValue &Op = Ops[I];
39153     if (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op.getOperand(0).isUndef() &&
39154         isNullConstant(Op.getOperand(2))) {
39155       Op = Op.getOperand(1);
39156       unsigned Scale = RootSizeInBits / Op.getValueSizeInBits();
39157       int Lo = I * Mask.size();
39158       int Hi = (I + 1) * Mask.size();
39159       int NewHi = Lo + (Mask.size() / Scale);
39160       for (int &M : Mask) {
39161         if (Lo <= M && NewHi <= M && M < Hi)
39162           M = SM_SentinelUndef;
39163       }
39164     }
39165   }
39166 
39167   // Peek through any free extract_subvector nodes back to root size.
39168   for (SDValue &Op : Ops)
39169     while (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
39170            (RootSizeInBits % Op.getOperand(0).getValueSizeInBits()) == 0 &&
39171            isNullConstant(Op.getOperand(1)))
39172       Op = Op.getOperand(0);
39173 
39174   // Remove unused/repeated shuffle source ops.
39175   resolveTargetShuffleInputsAndMask(Ops, Mask);
39176 
39177   // Handle the all undef/zero/ones cases early.
39178   if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
39179     return DAG.getUNDEF(RootVT);
39180   if (all_of(Mask, [](int Idx) { return Idx < 0; }))
39181     return getZeroVector(RootVT, Subtarget, DAG, SDLoc(Root));
39182   if (Ops.size() == 1 && ISD::isBuildVectorAllOnes(Ops[0].getNode()) &&
39183       !llvm::is_contained(Mask, SM_SentinelZero))
39184     return getOnesVector(RootVT, DAG, SDLoc(Root));
39185 
39186   assert(!Ops.empty() && "Shuffle with no inputs detected");
39187   HasVariableMask |= IsOpVariableMask;
39188 
39189   // Update the list of shuffle nodes that have been combined so far.
39190   SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
39191                                                 SrcNodes.end());
39192   CombinedNodes.push_back(Op.getNode());
39193 
39194   // See if we can recurse into each shuffle source op (if it's a target
39195   // shuffle). The source op should only be generally combined if it either has
39196   // a single use (i.e. current Op) or all its users have already been combined,
39197   // if not then we can still combine but should prevent generation of variable
39198   // shuffles to avoid constant pool bloat.
39199   // Don't recurse if we already have more source ops than we can combine in
39200   // the remaining recursion depth.
39201   if (Ops.size() < (MaxDepth - Depth)) {
39202     for (int i = 0, e = Ops.size(); i < e; ++i) {
39203       // For empty roots, we need to resolve zeroable elements before combining
39204       // them with other shuffles.
39205       SmallVector<int, 64> ResolvedMask = Mask;
39206       if (EmptyRoot)
39207         resolveTargetShuffleFromZeroables(ResolvedMask, OpUndef, OpZero);
39208       bool AllowCrossLaneVar = false;
39209       bool AllowPerLaneVar = false;
39210       if (Ops[i].getNode()->hasOneUse() ||
39211           SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode())) {
39212         AllowCrossLaneVar = AllowVariableCrossLaneMask;
39213         AllowPerLaneVar = AllowVariablePerLaneMask;
39214       }
39215       if (SDValue Res = combineX86ShufflesRecursively(
39216               Ops, i, Root, ResolvedMask, CombinedNodes, Depth + 1, MaxDepth,
39217               HasVariableMask, AllowCrossLaneVar, AllowPerLaneVar, DAG,
39218               Subtarget))
39219         return Res;
39220     }
39221   }
39222 
39223   // Attempt to constant fold all of the constant source ops.
39224   if (SDValue Cst = combineX86ShufflesConstants(
39225           Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
39226     return Cst;
39227 
39228   // If constant fold failed and we only have constants - then we have
39229   // multiple uses by a single non-variable shuffle - just bail.
39230   if (Depth == 0 && llvm::all_of(Ops, [&](SDValue Op) {
39231         APInt UndefElts;
39232         SmallVector<APInt> RawBits;
39233         unsigned EltSizeInBits = RootSizeInBits / Mask.size();
39234         return getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
39235                                              RawBits);
39236       })) {
39237     return SDValue();
39238   }
39239 
39240   // Canonicalize the combined shuffle mask chain with horizontal ops.
39241   // NOTE: This will update the Ops and Mask.
39242   if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
39243           Ops, Mask, RootSizeInBits, SDLoc(Root), DAG, Subtarget))
39244     return DAG.getBitcast(RootVT, HOp);
39245 
39246   // Try to refine our inputs given our knowledge of target shuffle mask.
39247   for (auto I : enumerate(Ops)) {
39248     int OpIdx = I.index();
39249     SDValue &Op = I.value();
39250 
39251     // What range of shuffle mask element values results in picking from Op?
39252     int Lo = OpIdx * Mask.size();
39253     int Hi = Lo + Mask.size();
39254 
39255     // Which elements of Op do we demand, given the mask's granularity?
39256     APInt OpDemandedElts(Mask.size(), 0);
39257     for (int MaskElt : Mask) {
39258       if (isInRange(MaskElt, Lo, Hi)) { // Picks from Op?
39259         int OpEltIdx = MaskElt - Lo;
39260         OpDemandedElts.setBit(OpEltIdx);
39261       }
39262     }
39263 
39264     // Is the shuffle result smaller than the root?
39265     if (Op.getValueSizeInBits() < RootSizeInBits) {
39266       // We padded the mask with undefs. But we now need to undo that.
39267       unsigned NumExpectedVectorElts = Mask.size();
39268       unsigned EltSizeInBits = RootSizeInBits / NumExpectedVectorElts;
39269       unsigned NumOpVectorElts = Op.getValueSizeInBits() / EltSizeInBits;
39270       assert(!OpDemandedElts.extractBits(
39271                  NumExpectedVectorElts - NumOpVectorElts, NumOpVectorElts) &&
39272              "Demanding the virtual undef widening padding?");
39273       OpDemandedElts = OpDemandedElts.trunc(NumOpVectorElts); // NUW
39274     }
39275 
39276     // The Op itself may be of different VT, so we need to scale the mask.
39277     unsigned NumOpElts = Op.getValueType().getVectorNumElements();
39278     APInt OpScaledDemandedElts = APIntOps::ScaleBitMask(OpDemandedElts, NumOpElts);
39279 
39280     // Can this operand be simplified any further, given it's demanded elements?
39281     if (SDValue NewOp =
39282             DAG.getTargetLoweringInfo().SimplifyMultipleUseDemandedVectorElts(
39283                 Op, OpScaledDemandedElts, DAG))
39284       Op = NewOp;
39285   }
39286   // FIXME: should we rerun resolveTargetShuffleInputsAndMask() now?
39287 
39288   // Widen any subvector shuffle inputs we've collected.
39289   // TODO: Remove this to avoid generating temporary nodes, we should only
39290   // widen once combineX86ShuffleChain has found a match.
39291   if (any_of(Ops, [RootSizeInBits](SDValue Op) {
39292         return Op.getValueSizeInBits() < RootSizeInBits;
39293       })) {
39294     for (SDValue &Op : Ops)
39295       if (Op.getValueSizeInBits() < RootSizeInBits)
39296         Op = widenSubVector(Op, false, Subtarget, DAG, SDLoc(Op),
39297                             RootSizeInBits);
39298     // Reresolve - we might have repeated subvector sources.
39299     resolveTargetShuffleInputsAndMask(Ops, Mask);
39300   }
39301 
39302   // We can only combine unary and binary shuffle mask cases.
39303   if (Ops.size() <= 2) {
39304     // Minor canonicalization of the accumulated shuffle mask to make it easier
39305     // to match below. All this does is detect masks with sequential pairs of
39306     // elements, and shrink them to the half-width mask. It does this in a loop
39307     // so it will reduce the size of the mask to the minimal width mask which
39308     // performs an equivalent shuffle.
39309     while (Mask.size() > 1) {
39310       SmallVector<int, 64> WidenedMask;
39311       if (!canWidenShuffleElements(Mask, WidenedMask))
39312         break;
39313       Mask = std::move(WidenedMask);
39314     }
39315 
39316     // Canonicalization of binary shuffle masks to improve pattern matching by
39317     // commuting the inputs.
39318     if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
39319       ShuffleVectorSDNode::commuteMask(Mask);
39320       std::swap(Ops[0], Ops[1]);
39321     }
39322 
39323     // Try to combine into a single shuffle instruction.
39324     if (SDValue Shuffle = combineX86ShuffleChain(
39325             Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
39326             AllowVariablePerLaneMask, DAG, Subtarget))
39327       return Shuffle;
39328 
39329     // If all the operands come from the same larger vector, fallthrough and try
39330     // to use combineX86ShuffleChainWithExtract.
39331     SDValue LHS = peekThroughBitcasts(Ops.front());
39332     SDValue RHS = peekThroughBitcasts(Ops.back());
39333     if (Ops.size() != 2 || !Subtarget.hasAVX2() || RootSizeInBits != 128 ||
39334         (RootSizeInBits / Mask.size()) != 64 ||
39335         LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
39336         RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
39337         LHS.getOperand(0) != RHS.getOperand(0))
39338       return SDValue();
39339   }
39340 
39341   // If that failed and any input is extracted then try to combine as a
39342   // shuffle with the larger type.
39343   return combineX86ShuffleChainWithExtract(
39344       Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
39345       AllowVariablePerLaneMask, DAG, Subtarget);
39346 }
39347 
39348 /// Helper entry wrapper to combineX86ShufflesRecursively.
39349 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
39350                                              const X86Subtarget &Subtarget) {
39351   return combineX86ShufflesRecursively(
39352       {Op}, 0, Op, {0}, {}, /*Depth*/ 0, X86::MaxShuffleCombineDepth,
39353       /*HasVarMask*/ false,
39354       /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, DAG,
39355       Subtarget);
39356 }
39357 
39358 /// Get the PSHUF-style mask from PSHUF node.
39359 ///
39360 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
39361 /// PSHUF-style masks that can be reused with such instructions.
39362 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
39363   MVT VT = N.getSimpleValueType();
39364   SmallVector<int, 4> Mask;
39365   SmallVector<SDValue, 2> Ops;
39366   bool HaveMask =
39367       getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask);
39368   (void)HaveMask;
39369   assert(HaveMask);
39370 
39371   // If we have more than 128-bits, only the low 128-bits of shuffle mask
39372   // matter. Check that the upper masks are repeats and remove them.
39373   if (VT.getSizeInBits() > 128) {
39374     int LaneElts = 128 / VT.getScalarSizeInBits();
39375 #ifndef NDEBUG
39376     for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
39377       for (int j = 0; j < LaneElts; ++j)
39378         assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
39379                "Mask doesn't repeat in high 128-bit lanes!");
39380 #endif
39381     Mask.resize(LaneElts);
39382   }
39383 
39384   switch (N.getOpcode()) {
39385   case X86ISD::PSHUFD:
39386     return Mask;
39387   case X86ISD::PSHUFLW:
39388     Mask.resize(4);
39389     return Mask;
39390   case X86ISD::PSHUFHW:
39391     Mask.erase(Mask.begin(), Mask.begin() + 4);
39392     for (int &M : Mask)
39393       M -= 4;
39394     return Mask;
39395   default:
39396     llvm_unreachable("No valid shuffle instruction found!");
39397   }
39398 }
39399 
39400 /// Search for a combinable shuffle across a chain ending in pshufd.
39401 ///
39402 /// We walk up the chain and look for a combinable shuffle, skipping over
39403 /// shuffles that we could hoist this shuffle's transformation past without
39404 /// altering anything.
39405 static SDValue
39406 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
39407                              SelectionDAG &DAG) {
39408   assert(N.getOpcode() == X86ISD::PSHUFD &&
39409          "Called with something other than an x86 128-bit half shuffle!");
39410   SDLoc DL(N);
39411 
39412   // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
39413   // of the shuffles in the chain so that we can form a fresh chain to replace
39414   // this one.
39415   SmallVector<SDValue, 8> Chain;
39416   SDValue V = N.getOperand(0);
39417   for (; V.hasOneUse(); V = V.getOperand(0)) {
39418     switch (V.getOpcode()) {
39419     default:
39420       return SDValue(); // Nothing combined!
39421 
39422     case ISD::BITCAST:
39423       // Skip bitcasts as we always know the type for the target specific
39424       // instructions.
39425       continue;
39426 
39427     case X86ISD::PSHUFD:
39428       // Found another dword shuffle.
39429       break;
39430 
39431     case X86ISD::PSHUFLW:
39432       // Check that the low words (being shuffled) are the identity in the
39433       // dword shuffle, and the high words are self-contained.
39434       if (Mask[0] != 0 || Mask[1] != 1 ||
39435           !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
39436         return SDValue();
39437 
39438       Chain.push_back(V);
39439       continue;
39440 
39441     case X86ISD::PSHUFHW:
39442       // Check that the high words (being shuffled) are the identity in the
39443       // dword shuffle, and the low words are self-contained.
39444       if (Mask[2] != 2 || Mask[3] != 3 ||
39445           !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
39446         return SDValue();
39447 
39448       Chain.push_back(V);
39449       continue;
39450 
39451     case X86ISD::UNPCKL:
39452     case X86ISD::UNPCKH:
39453       // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
39454       // shuffle into a preceding word shuffle.
39455       if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
39456           V.getSimpleValueType().getVectorElementType() != MVT::i16)
39457         return SDValue();
39458 
39459       // Search for a half-shuffle which we can combine with.
39460       unsigned CombineOp =
39461           V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
39462       if (V.getOperand(0) != V.getOperand(1) ||
39463           !V->isOnlyUserOf(V.getOperand(0).getNode()))
39464         return SDValue();
39465       Chain.push_back(V);
39466       V = V.getOperand(0);
39467       do {
39468         switch (V.getOpcode()) {
39469         default:
39470           return SDValue(); // Nothing to combine.
39471 
39472         case X86ISD::PSHUFLW:
39473         case X86ISD::PSHUFHW:
39474           if (V.getOpcode() == CombineOp)
39475             break;
39476 
39477           Chain.push_back(V);
39478 
39479           [[fallthrough]];
39480         case ISD::BITCAST:
39481           V = V.getOperand(0);
39482           continue;
39483         }
39484         break;
39485       } while (V.hasOneUse());
39486       break;
39487     }
39488     // Break out of the loop if we break out of the switch.
39489     break;
39490   }
39491 
39492   if (!V.hasOneUse())
39493     // We fell out of the loop without finding a viable combining instruction.
39494     return SDValue();
39495 
39496   // Merge this node's mask and our incoming mask.
39497   SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
39498   for (int &M : Mask)
39499     M = VMask[M];
39500   V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
39501                   getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
39502 
39503   // Rebuild the chain around this new shuffle.
39504   while (!Chain.empty()) {
39505     SDValue W = Chain.pop_back_val();
39506 
39507     if (V.getValueType() != W.getOperand(0).getValueType())
39508       V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
39509 
39510     switch (W.getOpcode()) {
39511     default:
39512       llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
39513 
39514     case X86ISD::UNPCKL:
39515     case X86ISD::UNPCKH:
39516       V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
39517       break;
39518 
39519     case X86ISD::PSHUFD:
39520     case X86ISD::PSHUFLW:
39521     case X86ISD::PSHUFHW:
39522       V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
39523       break;
39524     }
39525   }
39526   if (V.getValueType() != N.getValueType())
39527     V = DAG.getBitcast(N.getValueType(), V);
39528 
39529   // Return the new chain to replace N.
39530   return V;
39531 }
39532 
39533 // Attempt to commute shufps LHS loads:
39534 // permilps(shufps(load(),x)) --> permilps(shufps(x,load()))
39535 static SDValue combineCommutableSHUFP(SDValue N, MVT VT, const SDLoc &DL,
39536                                       SelectionDAG &DAG) {
39537   // TODO: Add vXf64 support.
39538   if (VT != MVT::v4f32 && VT != MVT::v8f32 && VT != MVT::v16f32)
39539     return SDValue();
39540 
39541   // SHUFP(LHS, RHS) -> SHUFP(RHS, LHS) iff LHS is foldable + RHS is not.
39542   auto commuteSHUFP = [&VT, &DL, &DAG](SDValue Parent, SDValue V) {
39543     if (V.getOpcode() != X86ISD::SHUFP || !Parent->isOnlyUserOf(V.getNode()))
39544       return SDValue();
39545     SDValue N0 = V.getOperand(0);
39546     SDValue N1 = V.getOperand(1);
39547     unsigned Imm = V.getConstantOperandVal(2);
39548     const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
39549     if (!X86::mayFoldLoad(peekThroughOneUseBitcasts(N0), Subtarget) ||
39550         X86::mayFoldLoad(peekThroughOneUseBitcasts(N1), Subtarget))
39551       return SDValue();
39552     Imm = ((Imm & 0x0F) << 4) | ((Imm & 0xF0) >> 4);
39553     return DAG.getNode(X86ISD::SHUFP, DL, VT, N1, N0,
39554                        DAG.getTargetConstant(Imm, DL, MVT::i8));
39555   };
39556 
39557   switch (N.getOpcode()) {
39558   case X86ISD::VPERMILPI:
39559     if (SDValue NewSHUFP = commuteSHUFP(N, N.getOperand(0))) {
39560       unsigned Imm = N.getConstantOperandVal(1);
39561       return DAG.getNode(X86ISD::VPERMILPI, DL, VT, NewSHUFP,
39562                          DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
39563     }
39564     break;
39565   case X86ISD::SHUFP: {
39566     SDValue N0 = N.getOperand(0);
39567     SDValue N1 = N.getOperand(1);
39568     unsigned Imm = N.getConstantOperandVal(2);
39569     if (N0 == N1) {
39570       if (SDValue NewSHUFP = commuteSHUFP(N, N0))
39571         return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, NewSHUFP,
39572                            DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
39573     } else if (SDValue NewSHUFP = commuteSHUFP(N, N0)) {
39574       return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, N1,
39575                          DAG.getTargetConstant(Imm ^ 0x0A, DL, MVT::i8));
39576     } else if (SDValue NewSHUFP = commuteSHUFP(N, N1)) {
39577       return DAG.getNode(X86ISD::SHUFP, DL, VT, N0, NewSHUFP,
39578                          DAG.getTargetConstant(Imm ^ 0xA0, DL, MVT::i8));
39579     }
39580     break;
39581   }
39582   }
39583 
39584   return SDValue();
39585 }
39586 
39587 // TODO - move this to TLI like isBinOp?
39588 static bool isUnaryOp(unsigned Opcode) {
39589   switch (Opcode) {
39590   case ISD::CTLZ:
39591   case ISD::CTTZ:
39592   case ISD::CTPOP:
39593     return true;
39594   }
39595   return false;
39596 }
39597 
39598 // Canonicalize SHUFFLE(UNARYOP(X)) -> UNARYOP(SHUFFLE(X)).
39599 // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
39600 static SDValue canonicalizeShuffleWithOp(SDValue N, SelectionDAG &DAG,
39601                                          const SDLoc &DL) {
39602   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39603   EVT ShuffleVT = N.getValueType();
39604 
39605   auto IsMergeableWithShuffle = [&DAG](SDValue Op, bool FoldLoad = false) {
39606     // AllZeros/AllOnes constants are freely shuffled and will peek through
39607     // bitcasts. Other constant build vectors do not peek through bitcasts. Only
39608     // merge with target shuffles if it has one use so shuffle combining is
39609     // likely to kick in. Shuffles of splats are expected to be removed.
39610     return ISD::isBuildVectorAllOnes(Op.getNode()) ||
39611            ISD::isBuildVectorAllZeros(Op.getNode()) ||
39612            ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
39613            ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()) ||
39614            (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op->hasOneUse()) ||
39615            (isTargetShuffle(Op.getOpcode()) && Op->hasOneUse()) ||
39616            (FoldLoad && isShuffleFoldableLoad(Op)) ||
39617            DAG.isSplatValue(Op, /*AllowUndefs*/ false);
39618   };
39619   auto IsSafeToMoveShuffle = [ShuffleVT](SDValue Op, unsigned BinOp) {
39620     // Ensure we only shuffle whole vector src elements, unless its a logical
39621     // binops where we can more aggressively move shuffles from dst to src.
39622     return BinOp == ISD::AND || BinOp == ISD::OR || BinOp == ISD::XOR ||
39623            BinOp == X86ISD::ANDNP ||
39624            (Op.getScalarValueSizeInBits() <= ShuffleVT.getScalarSizeInBits());
39625   };
39626 
39627   unsigned Opc = N.getOpcode();
39628   switch (Opc) {
39629   // Unary and Unary+Permute Shuffles.
39630   case X86ISD::PSHUFB: {
39631     // Don't merge PSHUFB if it contains zero'd elements.
39632     SmallVector<int> Mask;
39633     SmallVector<SDValue> Ops;
39634     if (!getTargetShuffleMask(N.getNode(), ShuffleVT.getSimpleVT(), false, Ops,
39635                               Mask))
39636       break;
39637     [[fallthrough]];
39638   }
39639   case X86ISD::VBROADCAST:
39640   case X86ISD::MOVDDUP:
39641   case X86ISD::PSHUFD:
39642   case X86ISD::PSHUFHW:
39643   case X86ISD::PSHUFLW:
39644   case X86ISD::VPERMI:
39645   case X86ISD::VPERMILPI: {
39646     if (N.getOperand(0).getValueType() == ShuffleVT &&
39647         N->isOnlyUserOf(N.getOperand(0).getNode())) {
39648       SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
39649       unsigned SrcOpcode = N0.getOpcode();
39650       if (TLI.isBinOp(SrcOpcode) && IsSafeToMoveShuffle(N0, SrcOpcode)) {
39651         SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
39652         SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
39653         if (IsMergeableWithShuffle(Op00, Opc != X86ISD::PSHUFB) ||
39654             IsMergeableWithShuffle(Op01, Opc != X86ISD::PSHUFB)) {
39655           SDValue LHS, RHS;
39656           Op00 = DAG.getBitcast(ShuffleVT, Op00);
39657           Op01 = DAG.getBitcast(ShuffleVT, Op01);
39658           if (N.getNumOperands() == 2) {
39659             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, N.getOperand(1));
39660             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, N.getOperand(1));
39661           } else {
39662             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00);
39663             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01);
39664           }
39665           EVT OpVT = N0.getValueType();
39666           return DAG.getBitcast(ShuffleVT,
39667                                 DAG.getNode(SrcOpcode, DL, OpVT,
39668                                             DAG.getBitcast(OpVT, LHS),
39669                                             DAG.getBitcast(OpVT, RHS)));
39670         }
39671       }
39672     }
39673     break;
39674   }
39675   // Binary and Binary+Permute Shuffles.
39676   case X86ISD::INSERTPS: {
39677     // Don't merge INSERTPS if it contains zero'd elements.
39678     unsigned InsertPSMask = N.getConstantOperandVal(2);
39679     unsigned ZeroMask = InsertPSMask & 0xF;
39680     if (ZeroMask != 0)
39681       break;
39682     [[fallthrough]];
39683   }
39684   case X86ISD::MOVSD:
39685   case X86ISD::MOVSS:
39686   case X86ISD::BLENDI:
39687   case X86ISD::SHUFP:
39688   case X86ISD::UNPCKH:
39689   case X86ISD::UNPCKL: {
39690     if (N->isOnlyUserOf(N.getOperand(0).getNode()) &&
39691         N->isOnlyUserOf(N.getOperand(1).getNode())) {
39692       SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
39693       SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
39694       unsigned SrcOpcode = N0.getOpcode();
39695       if (TLI.isBinOp(SrcOpcode) && N1.getOpcode() == SrcOpcode &&
39696           N0.getValueType() == N1.getValueType() &&
39697           IsSafeToMoveShuffle(N0, SrcOpcode) &&
39698           IsSafeToMoveShuffle(N1, SrcOpcode)) {
39699         SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
39700         SDValue Op10 = peekThroughOneUseBitcasts(N1.getOperand(0));
39701         SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
39702         SDValue Op11 = peekThroughOneUseBitcasts(N1.getOperand(1));
39703         // Ensure the total number of shuffles doesn't increase by folding this
39704         // shuffle through to the source ops.
39705         if (((IsMergeableWithShuffle(Op00) && IsMergeableWithShuffle(Op10)) ||
39706              (IsMergeableWithShuffle(Op01) && IsMergeableWithShuffle(Op11))) ||
39707             ((IsMergeableWithShuffle(Op00) || IsMergeableWithShuffle(Op10)) &&
39708              (IsMergeableWithShuffle(Op01) || IsMergeableWithShuffle(Op11)))) {
39709           SDValue LHS, RHS;
39710           Op00 = DAG.getBitcast(ShuffleVT, Op00);
39711           Op10 = DAG.getBitcast(ShuffleVT, Op10);
39712           Op01 = DAG.getBitcast(ShuffleVT, Op01);
39713           Op11 = DAG.getBitcast(ShuffleVT, Op11);
39714           if (N.getNumOperands() == 3) {
39715             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10, N.getOperand(2));
39716             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11, N.getOperand(2));
39717           } else {
39718             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10);
39719             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11);
39720           }
39721           EVT OpVT = N0.getValueType();
39722           return DAG.getBitcast(ShuffleVT,
39723                                 DAG.getNode(SrcOpcode, DL, OpVT,
39724                                             DAG.getBitcast(OpVT, LHS),
39725                                             DAG.getBitcast(OpVT, RHS)));
39726         }
39727       }
39728       if (isUnaryOp(SrcOpcode) && N1.getOpcode() == SrcOpcode &&
39729           N0.getValueType() == N1.getValueType() &&
39730           IsSafeToMoveShuffle(N0, SrcOpcode) &&
39731           IsSafeToMoveShuffle(N1, SrcOpcode)) {
39732         SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
39733         SDValue Op10 = peekThroughOneUseBitcasts(N1.getOperand(0));
39734         SDValue Res;
39735         Op00 = DAG.getBitcast(ShuffleVT, Op00);
39736         Op10 = DAG.getBitcast(ShuffleVT, Op10);
39737         if (N.getNumOperands() == 3) {
39738           Res = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10, N.getOperand(2));
39739         } else {
39740           Res = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10);
39741         }
39742         EVT OpVT = N0.getValueType();
39743         return DAG.getBitcast(
39744             ShuffleVT,
39745             DAG.getNode(SrcOpcode, DL, OpVT, DAG.getBitcast(OpVT, Res)));
39746       }
39747     }
39748     break;
39749   }
39750   }
39751   return SDValue();
39752 }
39753 
39754 /// Attempt to fold vpermf128(op(),op()) -> op(vpermf128(),vpermf128()).
39755 static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
39756                                                       SelectionDAG &DAG,
39757                                                       const SDLoc &DL) {
39758   assert(V.getOpcode() == X86ISD::VPERM2X128 && "Unknown lane shuffle");
39759 
39760   MVT VT = V.getSimpleValueType();
39761   SDValue Src0 = peekThroughBitcasts(V.getOperand(0));
39762   SDValue Src1 = peekThroughBitcasts(V.getOperand(1));
39763   unsigned SrcOpc0 = Src0.getOpcode();
39764   unsigned SrcOpc1 = Src1.getOpcode();
39765   EVT SrcVT0 = Src0.getValueType();
39766   EVT SrcVT1 = Src1.getValueType();
39767 
39768   if (!Src1.isUndef() && (SrcVT0 != SrcVT1 || SrcOpc0 != SrcOpc1))
39769     return SDValue();
39770 
39771   switch (SrcOpc0) {
39772   case X86ISD::MOVDDUP: {
39773     SDValue LHS = Src0.getOperand(0);
39774     SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
39775     SDValue Res =
39776         DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS, V.getOperand(2));
39777     Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res);
39778     return DAG.getBitcast(VT, Res);
39779   }
39780   case X86ISD::VPERMILPI:
39781     // TODO: Handle v4f64 permutes with different low/high lane masks.
39782     if (SrcVT0 == MVT::v4f64) {
39783       uint64_t Mask = Src0.getConstantOperandVal(1);
39784       if ((Mask & 0x3) != ((Mask >> 2) & 0x3))
39785         break;
39786     }
39787     [[fallthrough]];
39788   case X86ISD::VSHLI:
39789   case X86ISD::VSRLI:
39790   case X86ISD::VSRAI:
39791   case X86ISD::PSHUFD:
39792     if (Src1.isUndef() || Src0.getOperand(1) == Src1.getOperand(1)) {
39793       SDValue LHS = Src0.getOperand(0);
39794       SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
39795       SDValue Res = DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS,
39796                                 V.getOperand(2));
39797       Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res, Src0.getOperand(1));
39798       return DAG.getBitcast(VT, Res);
39799     }
39800     break;
39801   }
39802 
39803   return SDValue();
39804 }
39805 
39806 /// Try to combine x86 target specific shuffles.
39807 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
39808                                     TargetLowering::DAGCombinerInfo &DCI,
39809                                     const X86Subtarget &Subtarget) {
39810   SDLoc DL(N);
39811   MVT VT = N.getSimpleValueType();
39812   SmallVector<int, 4> Mask;
39813   unsigned Opcode = N.getOpcode();
39814 
39815   if (SDValue R = combineCommutableSHUFP(N, VT, DL, DAG))
39816     return R;
39817 
39818   // Handle specific target shuffles.
39819   switch (Opcode) {
39820   case X86ISD::MOVDDUP: {
39821     SDValue Src = N.getOperand(0);
39822     // Turn a 128-bit MOVDDUP of a full vector load into movddup+vzload.
39823     if (VT == MVT::v2f64 && Src.hasOneUse() &&
39824         ISD::isNormalLoad(Src.getNode())) {
39825       LoadSDNode *LN = cast<LoadSDNode>(Src);
39826       if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::f64, MVT::v2f64, DAG)) {
39827         SDValue Movddup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, VZLoad);
39828         DCI.CombineTo(N.getNode(), Movddup);
39829         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
39830         DCI.recursivelyDeleteUnusedNodes(LN);
39831         return N; // Return N so it doesn't get rechecked!
39832       }
39833     }
39834 
39835     return SDValue();
39836   }
39837   case X86ISD::VBROADCAST: {
39838     SDValue Src = N.getOperand(0);
39839     SDValue BC = peekThroughBitcasts(Src);
39840     EVT SrcVT = Src.getValueType();
39841     EVT BCVT = BC.getValueType();
39842 
39843     // If broadcasting from another shuffle, attempt to simplify it.
39844     // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
39845     if (isTargetShuffle(BC.getOpcode()) &&
39846         VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
39847       unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
39848       SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
39849                                         SM_SentinelUndef);
39850       for (unsigned i = 0; i != Scale; ++i)
39851         DemandedMask[i] = i;
39852       if (SDValue Res = combineX86ShufflesRecursively(
39853               {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
39854               X86::MaxShuffleCombineDepth,
39855               /*HasVarMask*/ false, /*AllowCrossLaneVarMask*/ true,
39856               /*AllowPerLaneVarMask*/ true, DAG, Subtarget))
39857         return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
39858                            DAG.getBitcast(SrcVT, Res));
39859     }
39860 
39861     // broadcast(bitcast(src)) -> bitcast(broadcast(src))
39862     // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
39863     if (Src.getOpcode() == ISD::BITCAST &&
39864         SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits() &&
39865         DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
39866         FixedVectorType::isValidElementType(
39867             BCVT.getScalarType().getTypeForEVT(*DAG.getContext()))) {
39868       EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
39869                                    VT.getVectorNumElements());
39870       return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
39871     }
39872 
39873     // vbroadcast(bitcast(vbroadcast(src))) -> bitcast(vbroadcast(src))
39874     // If we're re-broadcasting a smaller type then broadcast with that type and
39875     // bitcast.
39876     // TODO: Do this for any splat?
39877     if (Src.getOpcode() == ISD::BITCAST &&
39878         (BC.getOpcode() == X86ISD::VBROADCAST ||
39879          BC.getOpcode() == X86ISD::VBROADCAST_LOAD) &&
39880         (VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits()) == 0 &&
39881         (VT.getSizeInBits() % BCVT.getSizeInBits()) == 0) {
39882       MVT NewVT =
39883           MVT::getVectorVT(BCVT.getSimpleVT().getScalarType(),
39884                            VT.getSizeInBits() / BCVT.getScalarSizeInBits());
39885       return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
39886     }
39887 
39888     // Reduce broadcast source vector to lowest 128-bits.
39889     if (SrcVT.getSizeInBits() > 128)
39890       return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
39891                          extract128BitVector(Src, 0, DAG, DL));
39892 
39893     // broadcast(scalar_to_vector(x)) -> broadcast(x).
39894     if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR &&
39895         Src.getValueType().getScalarType() == Src.getOperand(0).getValueType())
39896       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
39897 
39898     // broadcast(extract_vector_elt(x, 0)) -> broadcast(x).
39899     if (Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
39900         isNullConstant(Src.getOperand(1)) &&
39901         Src.getValueType() ==
39902             Src.getOperand(0).getValueType().getScalarType() &&
39903         DAG.getTargetLoweringInfo().isTypeLegal(
39904             Src.getOperand(0).getValueType()))
39905       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
39906 
39907     // Share broadcast with the longest vector and extract low subvector (free).
39908     // Ensure the same SDValue from the SDNode use is being used.
39909     for (SDNode *User : Src->uses())
39910       if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
39911           Src == User->getOperand(0) &&
39912           User->getValueSizeInBits(0).getFixedValue() >
39913               VT.getFixedSizeInBits()) {
39914         return extractSubVector(SDValue(User, 0), 0, DAG, DL,
39915                                 VT.getSizeInBits());
39916       }
39917 
39918     // vbroadcast(scalarload X) -> vbroadcast_load X
39919     // For float loads, extract other uses of the scalar from the broadcast.
39920     if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
39921         ISD::isNormalLoad(Src.getNode())) {
39922       LoadSDNode *LN = cast<LoadSDNode>(Src);
39923       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39924       SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
39925       SDValue BcastLd =
39926           DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
39927                                   LN->getMemoryVT(), LN->getMemOperand());
39928       // If the load value is used only by N, replace it via CombineTo N.
39929       bool NoReplaceExtract = Src.hasOneUse();
39930       DCI.CombineTo(N.getNode(), BcastLd);
39931       if (NoReplaceExtract) {
39932         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
39933         DCI.recursivelyDeleteUnusedNodes(LN);
39934       } else {
39935         SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
39936                                   DAG.getIntPtrConstant(0, DL));
39937         DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
39938       }
39939       return N; // Return N so it doesn't get rechecked!
39940     }
39941 
39942     // Due to isTypeDesirableForOp, we won't always shrink a load truncated to
39943     // i16. So shrink it ourselves if we can make a broadcast_load.
39944     if (SrcVT == MVT::i16 && Src.getOpcode() == ISD::TRUNCATE &&
39945         Src.hasOneUse() && Src.getOperand(0).hasOneUse()) {
39946       assert(Subtarget.hasAVX2() && "Expected AVX2");
39947       SDValue TruncIn = Src.getOperand(0);
39948 
39949       // If this is a truncate of a non extending load we can just narrow it to
39950       // use a broadcast_load.
39951       if (ISD::isNormalLoad(TruncIn.getNode())) {
39952         LoadSDNode *LN = cast<LoadSDNode>(TruncIn);
39953         // Unless its volatile or atomic.
39954         if (LN->isSimple()) {
39955           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39956           SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
39957           SDValue BcastLd = DAG.getMemIntrinsicNode(
39958               X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
39959               LN->getPointerInfo(), LN->getOriginalAlign(),
39960               LN->getMemOperand()->getFlags());
39961           DCI.CombineTo(N.getNode(), BcastLd);
39962           DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
39963           DCI.recursivelyDeleteUnusedNodes(Src.getNode());
39964           return N; // Return N so it doesn't get rechecked!
39965         }
39966       }
39967 
39968       // If this is a truncate of an i16 extload, we can directly replace it.
39969       if (ISD::isUNINDEXEDLoad(Src.getOperand(0).getNode()) &&
39970           ISD::isEXTLoad(Src.getOperand(0).getNode())) {
39971         LoadSDNode *LN = cast<LoadSDNode>(Src.getOperand(0));
39972         if (LN->getMemoryVT().getSizeInBits() == 16) {
39973           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39974           SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
39975           SDValue BcastLd =
39976               DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
39977                                       LN->getMemoryVT(), LN->getMemOperand());
39978           DCI.CombineTo(N.getNode(), BcastLd);
39979           DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
39980           DCI.recursivelyDeleteUnusedNodes(Src.getNode());
39981           return N; // Return N so it doesn't get rechecked!
39982         }
39983       }
39984 
39985       // If this is a truncate of load that has been shifted right, we can
39986       // offset the pointer and use a narrower load.
39987       if (TruncIn.getOpcode() == ISD::SRL &&
39988           TruncIn.getOperand(0).hasOneUse() &&
39989           isa<ConstantSDNode>(TruncIn.getOperand(1)) &&
39990           ISD::isNormalLoad(TruncIn.getOperand(0).getNode())) {
39991         LoadSDNode *LN = cast<LoadSDNode>(TruncIn.getOperand(0));
39992         unsigned ShiftAmt = TruncIn.getConstantOperandVal(1);
39993         // Make sure the shift amount and the load size are divisible by 16.
39994         // Don't do this if the load is volatile or atomic.
39995         if (ShiftAmt % 16 == 0 && TruncIn.getValueSizeInBits() % 16 == 0 &&
39996             LN->isSimple()) {
39997           unsigned Offset = ShiftAmt / 8;
39998           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39999           SDValue Ptr = DAG.getMemBasePlusOffset(
40000               LN->getBasePtr(), TypeSize::getFixed(Offset), DL);
40001           SDValue Ops[] = { LN->getChain(), Ptr };
40002           SDValue BcastLd = DAG.getMemIntrinsicNode(
40003               X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
40004               LN->getPointerInfo().getWithOffset(Offset),
40005               LN->getOriginalAlign(),
40006               LN->getMemOperand()->getFlags());
40007           DCI.CombineTo(N.getNode(), BcastLd);
40008           DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40009           DCI.recursivelyDeleteUnusedNodes(Src.getNode());
40010           return N; // Return N so it doesn't get rechecked!
40011         }
40012       }
40013     }
40014 
40015     // vbroadcast(vzload X) -> vbroadcast_load X
40016     if (Src.getOpcode() == X86ISD::VZEXT_LOAD && Src.hasOneUse()) {
40017       MemSDNode *LN = cast<MemIntrinsicSDNode>(Src);
40018       if (LN->getMemoryVT().getSizeInBits() == VT.getScalarSizeInBits()) {
40019         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40020         SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
40021         SDValue BcastLd =
40022             DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
40023                                     LN->getMemoryVT(), LN->getMemOperand());
40024         DCI.CombineTo(N.getNode(), BcastLd);
40025         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40026         DCI.recursivelyDeleteUnusedNodes(LN);
40027         return N; // Return N so it doesn't get rechecked!
40028       }
40029     }
40030 
40031     // vbroadcast(vector load X) -> vbroadcast_load
40032     if ((SrcVT == MVT::v2f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v2i64 ||
40033          SrcVT == MVT::v4i32) &&
40034         Src.hasOneUse() && ISD::isNormalLoad(Src.getNode())) {
40035       LoadSDNode *LN = cast<LoadSDNode>(Src);
40036       // Unless the load is volatile or atomic.
40037       if (LN->isSimple()) {
40038         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40039         SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
40040         SDValue BcastLd = DAG.getMemIntrinsicNode(
40041             X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SrcVT.getScalarType(),
40042             LN->getPointerInfo(), LN->getOriginalAlign(),
40043             LN->getMemOperand()->getFlags());
40044         DCI.CombineTo(N.getNode(), BcastLd);
40045         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40046         DCI.recursivelyDeleteUnusedNodes(LN);
40047         return N; // Return N so it doesn't get rechecked!
40048       }
40049     }
40050 
40051     return SDValue();
40052   }
40053   case X86ISD::VZEXT_MOVL: {
40054     SDValue N0 = N.getOperand(0);
40055 
40056     // If this a vzmovl of a full vector load, replace it with a vzload, unless
40057     // the load is volatile.
40058     if (N0.hasOneUse() && ISD::isNormalLoad(N0.getNode())) {
40059       auto *LN = cast<LoadSDNode>(N0);
40060       if (SDValue VZLoad =
40061               narrowLoadToVZLoad(LN, VT.getVectorElementType(), VT, DAG)) {
40062         DCI.CombineTo(N.getNode(), VZLoad);
40063         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
40064         DCI.recursivelyDeleteUnusedNodes(LN);
40065         return N;
40066       }
40067     }
40068 
40069     // If this a VZEXT_MOVL of a VBROADCAST_LOAD, we don't need the broadcast
40070     // and can just use a VZEXT_LOAD.
40071     // FIXME: Is there some way to do this with SimplifyDemandedVectorElts?
40072     if (N0.hasOneUse() && N0.getOpcode() == X86ISD::VBROADCAST_LOAD) {
40073       auto *LN = cast<MemSDNode>(N0);
40074       if (VT.getScalarSizeInBits() == LN->getMemoryVT().getSizeInBits()) {
40075         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40076         SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
40077         SDValue VZLoad =
40078             DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops,
40079                                     LN->getMemoryVT(), LN->getMemOperand());
40080         DCI.CombineTo(N.getNode(), VZLoad);
40081         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
40082         DCI.recursivelyDeleteUnusedNodes(LN);
40083         return N;
40084       }
40085     }
40086 
40087     // Turn (v2i64 (vzext_movl (scalar_to_vector (i64 X)))) into
40088     // (v2i64 (bitcast (v4i32 (vzext_movl (scalar_to_vector (i32 (trunc X)))))))
40089     // if the upper bits of the i64 are zero.
40090     if (N0.hasOneUse() && N0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
40091         N0.getOperand(0).hasOneUse() &&
40092         N0.getOperand(0).getValueType() == MVT::i64) {
40093       SDValue In = N0.getOperand(0);
40094       APInt Mask = APInt::getHighBitsSet(64, 32);
40095       if (DAG.MaskedValueIsZero(In, Mask)) {
40096         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, In);
40097         MVT VecVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
40098         SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Trunc);
40099         SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, VecVT, SclVec);
40100         return DAG.getBitcast(VT, Movl);
40101       }
40102     }
40103 
40104     // Load a scalar integer constant directly to XMM instead of transferring an
40105     // immediate value from GPR.
40106     // vzext_movl (scalar_to_vector C) --> load [C,0...]
40107     if (N0.getOpcode() == ISD::SCALAR_TO_VECTOR) {
40108       if (auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
40109         // Create a vector constant - scalar constant followed by zeros.
40110         EVT ScalarVT = N0.getOperand(0).getValueType();
40111         Type *ScalarTy = ScalarVT.getTypeForEVT(*DAG.getContext());
40112         unsigned NumElts = VT.getVectorNumElements();
40113         Constant *Zero = ConstantInt::getNullValue(ScalarTy);
40114         SmallVector<Constant *, 32> ConstantVec(NumElts, Zero);
40115         ConstantVec[0] = const_cast<ConstantInt *>(C->getConstantIntValue());
40116 
40117         // Load the vector constant from constant pool.
40118         MVT PVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
40119         SDValue CP = DAG.getConstantPool(ConstantVector::get(ConstantVec), PVT);
40120         MachinePointerInfo MPI =
40121             MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
40122         Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
40123         return DAG.getLoad(VT, DL, DAG.getEntryNode(), CP, MPI, Alignment,
40124                            MachineMemOperand::MOLoad);
40125       }
40126     }
40127 
40128     // Pull subvector inserts into undef through VZEXT_MOVL by making it an
40129     // insert into a zero vector. This helps get VZEXT_MOVL closer to
40130     // scalar_to_vectors where 256/512 are canonicalized to an insert and a
40131     // 128-bit scalar_to_vector. This reduces the number of isel patterns.
40132     if (!DCI.isBeforeLegalizeOps() && N0.hasOneUse()) {
40133       SDValue V = peekThroughOneUseBitcasts(N0);
40134 
40135       if (V.getOpcode() == ISD::INSERT_SUBVECTOR && V.getOperand(0).isUndef() &&
40136           isNullConstant(V.getOperand(2))) {
40137         SDValue In = V.getOperand(1);
40138         MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
40139                                      In.getValueSizeInBits() /
40140                                          VT.getScalarSizeInBits());
40141         In = DAG.getBitcast(SubVT, In);
40142         SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, SubVT, In);
40143         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
40144                            getZeroVector(VT, Subtarget, DAG, DL), Movl,
40145                            V.getOperand(2));
40146       }
40147     }
40148 
40149     return SDValue();
40150   }
40151   case X86ISD::BLENDI: {
40152     SDValue N0 = N.getOperand(0);
40153     SDValue N1 = N.getOperand(1);
40154 
40155     // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
40156     // TODO: Handle MVT::v16i16 repeated blend mask.
40157     if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
40158         N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
40159       MVT SrcVT = N0.getOperand(0).getSimpleValueType();
40160       if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
40161           SrcVT.getScalarSizeInBits() >= 32) {
40162         unsigned BlendMask = N.getConstantOperandVal(2);
40163         unsigned Size = VT.getVectorNumElements();
40164         unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
40165         BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
40166         return DAG.getBitcast(
40167             VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
40168                             N1.getOperand(0),
40169                             DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
40170       }
40171     }
40172     return SDValue();
40173   }
40174   case X86ISD::SHUFP: {
40175     // Fold shufps(shuffle(x),shuffle(y)) -> shufps(x,y).
40176     // This is a more relaxed shuffle combiner that can ignore oneuse limits.
40177     // TODO: Support types other than v4f32.
40178     if (VT == MVT::v4f32) {
40179       bool Updated = false;
40180       SmallVector<int> Mask;
40181       SmallVector<SDValue> Ops;
40182       if (getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask) &&
40183           Ops.size() == 2) {
40184         for (int i = 0; i != 2; ++i) {
40185           SmallVector<SDValue> SubOps;
40186           SmallVector<int> SubMask, SubScaledMask;
40187           SDValue Sub = peekThroughBitcasts(Ops[i]);
40188           // TODO: Scaling might be easier if we specify the demanded elts.
40189           if (getTargetShuffleInputs(Sub, SubOps, SubMask, DAG, 0, false) &&
40190               scaleShuffleElements(SubMask, 4, SubScaledMask) &&
40191               SubOps.size() == 1 && isUndefOrInRange(SubScaledMask, 0, 4)) {
40192             int Ofs = i * 2;
40193             Mask[Ofs + 0] = SubScaledMask[Mask[Ofs + 0] % 4] + (i * 4);
40194             Mask[Ofs + 1] = SubScaledMask[Mask[Ofs + 1] % 4] + (i * 4);
40195             Ops[i] = DAG.getBitcast(VT, SubOps[0]);
40196             Updated = true;
40197           }
40198         }
40199       }
40200       if (Updated) {
40201         for (int &M : Mask)
40202           M %= 4;
40203         Ops.push_back(getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
40204         return DAG.getNode(X86ISD::SHUFP, DL, VT, Ops);
40205       }
40206     }
40207     return SDValue();
40208   }
40209   case X86ISD::VPERMI: {
40210     // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
40211     // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
40212     SDValue N0 = N.getOperand(0);
40213     SDValue N1 = N.getOperand(1);
40214     unsigned EltSizeInBits = VT.getScalarSizeInBits();
40215     if (N0.getOpcode() == ISD::BITCAST &&
40216         N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
40217       SDValue Src = N0.getOperand(0);
40218       EVT SrcVT = Src.getValueType();
40219       SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
40220       return DAG.getBitcast(VT, Res);
40221     }
40222     return SDValue();
40223   }
40224   case X86ISD::VPERM2X128: {
40225     // Fold vperm2x128(bitcast(x),bitcast(y),c) -> bitcast(vperm2x128(x,y,c)).
40226     SDValue LHS = N->getOperand(0);
40227     SDValue RHS = N->getOperand(1);
40228     if (LHS.getOpcode() == ISD::BITCAST &&
40229         (RHS.getOpcode() == ISD::BITCAST || RHS.isUndef())) {
40230       EVT SrcVT = LHS.getOperand(0).getValueType();
40231       if (RHS.isUndef() || SrcVT == RHS.getOperand(0).getValueType()) {
40232         return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT,
40233                                               DAG.getBitcast(SrcVT, LHS),
40234                                               DAG.getBitcast(SrcVT, RHS),
40235                                               N->getOperand(2)));
40236       }
40237     }
40238 
40239     // Fold vperm2x128(op(),op()) -> op(vperm2x128(),vperm2x128()).
40240     if (SDValue Res = canonicalizeLaneShuffleWithRepeatedOps(N, DAG, DL))
40241       return Res;
40242 
40243     // Fold vperm2x128 subvector shuffle with an inner concat pattern.
40244     // vperm2x128(concat(X,Y),concat(Z,W)) --> concat X,Y etc.
40245     auto FindSubVector128 = [&](unsigned Idx) {
40246       if (Idx > 3)
40247         return SDValue();
40248       SDValue Src = peekThroughBitcasts(N.getOperand(Idx < 2 ? 0 : 1));
40249       SmallVector<SDValue> SubOps;
40250       if (collectConcatOps(Src.getNode(), SubOps, DAG) && SubOps.size() == 2)
40251         return SubOps[Idx & 1];
40252       unsigned NumElts = Src.getValueType().getVectorNumElements();
40253       if ((Idx & 1) == 1 && Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
40254           Src.getOperand(1).getValueSizeInBits() == 128 &&
40255           Src.getConstantOperandAPInt(2) == (NumElts / 2)) {
40256         return Src.getOperand(1);
40257       }
40258       return SDValue();
40259     };
40260     unsigned Imm = N.getConstantOperandVal(2);
40261     if (SDValue SubLo = FindSubVector128(Imm & 0x0F)) {
40262       if (SDValue SubHi = FindSubVector128((Imm & 0xF0) >> 4)) {
40263         MVT SubVT = VT.getHalfNumVectorElementsVT();
40264         SubLo = DAG.getBitcast(SubVT, SubLo);
40265         SubHi = DAG.getBitcast(SubVT, SubHi);
40266         return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, SubLo, SubHi);
40267       }
40268     }
40269     return SDValue();
40270   }
40271   case X86ISD::PSHUFD:
40272   case X86ISD::PSHUFLW:
40273   case X86ISD::PSHUFHW: {
40274     SDValue N0 = N.getOperand(0);
40275     SDValue N1 = N.getOperand(1);
40276     if (N0->hasOneUse()) {
40277       SDValue V = peekThroughOneUseBitcasts(N0);
40278       switch (V.getOpcode()) {
40279       case X86ISD::VSHL:
40280       case X86ISD::VSRL:
40281       case X86ISD::VSRA:
40282       case X86ISD::VSHLI:
40283       case X86ISD::VSRLI:
40284       case X86ISD::VSRAI:
40285       case X86ISD::VROTLI:
40286       case X86ISD::VROTRI: {
40287         MVT InnerVT = V.getSimpleValueType();
40288         if (InnerVT.getScalarSizeInBits() <= VT.getScalarSizeInBits()) {
40289           SDValue Res = DAG.getNode(Opcode, DL, VT,
40290                                     DAG.getBitcast(VT, V.getOperand(0)), N1);
40291           Res = DAG.getBitcast(InnerVT, Res);
40292           Res = DAG.getNode(V.getOpcode(), DL, InnerVT, Res, V.getOperand(1));
40293           return DAG.getBitcast(VT, Res);
40294         }
40295         break;
40296       }
40297       }
40298     }
40299 
40300     Mask = getPSHUFShuffleMask(N);
40301     assert(Mask.size() == 4);
40302     break;
40303   }
40304   case X86ISD::MOVSD:
40305   case X86ISD::MOVSH:
40306   case X86ISD::MOVSS: {
40307     SDValue N0 = N.getOperand(0);
40308     SDValue N1 = N.getOperand(1);
40309 
40310     // Canonicalize scalar FPOps:
40311     // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
40312     // If commutable, allow OP(N1[0], N0[0]).
40313     unsigned Opcode1 = N1.getOpcode();
40314     if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
40315         Opcode1 == ISD::FDIV) {
40316       SDValue N10 = N1.getOperand(0);
40317       SDValue N11 = N1.getOperand(1);
40318       if (N10 == N0 ||
40319           (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
40320         if (N10 != N0)
40321           std::swap(N10, N11);
40322         MVT SVT = VT.getVectorElementType();
40323         SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
40324         N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
40325         N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
40326         SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
40327         SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
40328         return DAG.getNode(Opcode, DL, VT, N0, SclVec);
40329       }
40330     }
40331 
40332     return SDValue();
40333   }
40334   case X86ISD::INSERTPS: {
40335     assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
40336     SDValue Op0 = N.getOperand(0);
40337     SDValue Op1 = N.getOperand(1);
40338     unsigned InsertPSMask = N.getConstantOperandVal(2);
40339     unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
40340     unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
40341     unsigned ZeroMask = InsertPSMask & 0xF;
40342 
40343     // If we zero out all elements from Op0 then we don't need to reference it.
40344     if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
40345       return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
40346                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40347 
40348     // If we zero out the element from Op1 then we don't need to reference it.
40349     if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
40350       return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
40351                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40352 
40353     // Attempt to merge insertps Op1 with an inner target shuffle node.
40354     SmallVector<int, 8> TargetMask1;
40355     SmallVector<SDValue, 2> Ops1;
40356     APInt KnownUndef1, KnownZero1;
40357     if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
40358                                      KnownZero1)) {
40359       if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
40360         // Zero/UNDEF insertion - zero out element and remove dependency.
40361         InsertPSMask |= (1u << DstIdx);
40362         return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
40363                            DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40364       }
40365       // Update insertps mask srcidx and reference the source input directly.
40366       int M = TargetMask1[SrcIdx];
40367       assert(0 <= M && M < 8 && "Shuffle index out of range");
40368       InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
40369       Op1 = Ops1[M < 4 ? 0 : 1];
40370       return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
40371                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40372     }
40373 
40374     // Attempt to merge insertps Op0 with an inner target shuffle node.
40375     SmallVector<int, 8> TargetMask0;
40376     SmallVector<SDValue, 2> Ops0;
40377     APInt KnownUndef0, KnownZero0;
40378     if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
40379                                      KnownZero0)) {
40380       bool Updated = false;
40381       bool UseInput00 = false;
40382       bool UseInput01 = false;
40383       for (int i = 0; i != 4; ++i) {
40384         if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
40385           // No change if element is already zero or the inserted element.
40386           continue;
40387         }
40388 
40389         if (KnownUndef0[i] || KnownZero0[i]) {
40390           // If the target mask is undef/zero then we must zero the element.
40391           InsertPSMask |= (1u << i);
40392           Updated = true;
40393           continue;
40394         }
40395 
40396         // The input vector element must be inline.
40397         int M = TargetMask0[i];
40398         if (M != i && M != (i + 4))
40399           return SDValue();
40400 
40401         // Determine which inputs of the target shuffle we're using.
40402         UseInput00 |= (0 <= M && M < 4);
40403         UseInput01 |= (4 <= M);
40404       }
40405 
40406       // If we're not using both inputs of the target shuffle then use the
40407       // referenced input directly.
40408       if (UseInput00 && !UseInput01) {
40409         Updated = true;
40410         Op0 = Ops0[0];
40411       } else if (!UseInput00 && UseInput01) {
40412         Updated = true;
40413         Op0 = Ops0[1];
40414       }
40415 
40416       if (Updated)
40417         return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
40418                            DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40419     }
40420 
40421     // If we're inserting an element from a vbroadcast load, fold the
40422     // load into the X86insertps instruction. We need to convert the scalar
40423     // load to a vector and clear the source lane of the INSERTPS control.
40424     if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
40425       auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
40426       if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
40427         SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
40428                                    MemIntr->getBasePtr(),
40429                                    MemIntr->getMemOperand());
40430         SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
40431                            DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
40432                                        Load),
40433                            DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
40434         DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
40435         return Insert;
40436       }
40437     }
40438 
40439     return SDValue();
40440   }
40441   default:
40442     return SDValue();
40443   }
40444 
40445   // Nuke no-op shuffles that show up after combining.
40446   if (isNoopShuffleMask(Mask))
40447     return N.getOperand(0);
40448 
40449   // Look for simplifications involving one or two shuffle instructions.
40450   SDValue V = N.getOperand(0);
40451   switch (N.getOpcode()) {
40452   default:
40453     break;
40454   case X86ISD::PSHUFLW:
40455   case X86ISD::PSHUFHW:
40456     assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
40457 
40458     // See if this reduces to a PSHUFD which is no more expensive and can
40459     // combine with more operations. Note that it has to at least flip the
40460     // dwords as otherwise it would have been removed as a no-op.
40461     if (ArrayRef<int>(Mask).equals({2, 3, 0, 1})) {
40462       int DMask[] = {0, 1, 2, 3};
40463       int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
40464       DMask[DOffset + 0] = DOffset + 1;
40465       DMask[DOffset + 1] = DOffset + 0;
40466       MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
40467       V = DAG.getBitcast(DVT, V);
40468       V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
40469                       getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
40470       return DAG.getBitcast(VT, V);
40471     }
40472 
40473     // Look for shuffle patterns which can be implemented as a single unpack.
40474     // FIXME: This doesn't handle the location of the PSHUFD generically, and
40475     // only works when we have a PSHUFD followed by two half-shuffles.
40476     if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
40477         (V.getOpcode() == X86ISD::PSHUFLW ||
40478          V.getOpcode() == X86ISD::PSHUFHW) &&
40479         V.getOpcode() != N.getOpcode() &&
40480         V.hasOneUse() && V.getOperand(0).hasOneUse()) {
40481       SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
40482       if (D.getOpcode() == X86ISD::PSHUFD) {
40483         SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
40484         SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
40485         int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
40486         int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
40487         int WordMask[8];
40488         for (int i = 0; i < 4; ++i) {
40489           WordMask[i + NOffset] = Mask[i] + NOffset;
40490           WordMask[i + VOffset] = VMask[i] + VOffset;
40491         }
40492         // Map the word mask through the DWord mask.
40493         int MappedMask[8];
40494         for (int i = 0; i < 8; ++i)
40495           MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
40496         if (ArrayRef<int>(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
40497             ArrayRef<int>(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
40498           // We can replace all three shuffles with an unpack.
40499           V = DAG.getBitcast(VT, D.getOperand(0));
40500           return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
40501                                                 : X86ISD::UNPCKH,
40502                              DL, VT, V, V);
40503         }
40504       }
40505     }
40506 
40507     break;
40508 
40509   case X86ISD::PSHUFD:
40510     if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
40511       return NewN;
40512 
40513     break;
40514   }
40515 
40516   return SDValue();
40517 }
40518 
40519 /// Checks if the shuffle mask takes subsequent elements
40520 /// alternately from two vectors.
40521 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
40522 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
40523 
40524   int ParitySrc[2] = {-1, -1};
40525   unsigned Size = Mask.size();
40526   for (unsigned i = 0; i != Size; ++i) {
40527     int M = Mask[i];
40528     if (M < 0)
40529       continue;
40530 
40531     // Make sure we are using the matching element from the input.
40532     if ((M % Size) != i)
40533       return false;
40534 
40535     // Make sure we use the same input for all elements of the same parity.
40536     int Src = M / Size;
40537     if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
40538       return false;
40539     ParitySrc[i % 2] = Src;
40540   }
40541 
40542   // Make sure each input is used.
40543   if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
40544     return false;
40545 
40546   Op0Even = ParitySrc[0] == 0;
40547   return true;
40548 }
40549 
40550 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
40551 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
40552 /// are written to the parameters \p Opnd0 and \p Opnd1.
40553 ///
40554 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
40555 /// so it is easier to generically match. We also insert dummy vector shuffle
40556 /// nodes for the operands which explicitly discard the lanes which are unused
40557 /// by this operation to try to flow through the rest of the combiner
40558 /// the fact that they're unused.
40559 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
40560                              SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
40561                              bool &IsSubAdd) {
40562 
40563   EVT VT = N->getValueType(0);
40564   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40565   if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
40566       !VT.getSimpleVT().isFloatingPoint())
40567     return false;
40568 
40569   // We only handle target-independent shuffles.
40570   // FIXME: It would be easy and harmless to use the target shuffle mask
40571   // extraction tool to support more.
40572   if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
40573     return false;
40574 
40575   SDValue V1 = N->getOperand(0);
40576   SDValue V2 = N->getOperand(1);
40577 
40578   // Make sure we have an FADD and an FSUB.
40579   if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
40580       (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
40581       V1.getOpcode() == V2.getOpcode())
40582     return false;
40583 
40584   // If there are other uses of these operations we can't fold them.
40585   if (!V1->hasOneUse() || !V2->hasOneUse())
40586     return false;
40587 
40588   // Ensure that both operations have the same operands. Note that we can
40589   // commute the FADD operands.
40590   SDValue LHS, RHS;
40591   if (V1.getOpcode() == ISD::FSUB) {
40592     LHS = V1->getOperand(0); RHS = V1->getOperand(1);
40593     if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
40594         (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
40595       return false;
40596   } else {
40597     assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
40598     LHS = V2->getOperand(0); RHS = V2->getOperand(1);
40599     if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
40600         (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
40601       return false;
40602   }
40603 
40604   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
40605   bool Op0Even;
40606   if (!isAddSubOrSubAddMask(Mask, Op0Even))
40607     return false;
40608 
40609   // It's a subadd if the vector in the even parity is an FADD.
40610   IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
40611                      : V2->getOpcode() == ISD::FADD;
40612 
40613   Opnd0 = LHS;
40614   Opnd1 = RHS;
40615   return true;
40616 }
40617 
40618 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
40619 static SDValue combineShuffleToFMAddSub(SDNode *N,
40620                                         const X86Subtarget &Subtarget,
40621                                         SelectionDAG &DAG) {
40622   // We only handle target-independent shuffles.
40623   // FIXME: It would be easy and harmless to use the target shuffle mask
40624   // extraction tool to support more.
40625   if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
40626     return SDValue();
40627 
40628   MVT VT = N->getSimpleValueType(0);
40629   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40630   if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
40631     return SDValue();
40632 
40633   // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
40634   SDValue Op0 = N->getOperand(0);
40635   SDValue Op1 = N->getOperand(1);
40636   SDValue FMAdd = Op0, FMSub = Op1;
40637   if (FMSub.getOpcode() != X86ISD::FMSUB)
40638     std::swap(FMAdd, FMSub);
40639 
40640   if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
40641       FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
40642       FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
40643       FMAdd.getOperand(2) != FMSub.getOperand(2))
40644     return SDValue();
40645 
40646   // Check for correct shuffle mask.
40647   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
40648   bool Op0Even;
40649   if (!isAddSubOrSubAddMask(Mask, Op0Even))
40650     return SDValue();
40651 
40652   // FMAddSub takes zeroth operand from FMSub node.
40653   SDLoc DL(N);
40654   bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
40655   unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
40656   return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
40657                      FMAdd.getOperand(2));
40658 }
40659 
40660 /// Try to combine a shuffle into a target-specific add-sub or
40661 /// mul-add-sub node.
40662 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
40663                                                 const X86Subtarget &Subtarget,
40664                                                 SelectionDAG &DAG) {
40665   if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
40666     return V;
40667 
40668   SDValue Opnd0, Opnd1;
40669   bool IsSubAdd;
40670   if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
40671     return SDValue();
40672 
40673   MVT VT = N->getSimpleValueType(0);
40674   SDLoc DL(N);
40675 
40676   // Try to generate X86ISD::FMADDSUB node here.
40677   SDValue Opnd2;
40678   if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
40679     unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
40680     return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
40681   }
40682 
40683   if (IsSubAdd)
40684     return SDValue();
40685 
40686   // Do not generate X86ISD::ADDSUB node for 512-bit types even though
40687   // the ADDSUB idiom has been successfully recognized. There are no known
40688   // X86 targets with 512-bit ADDSUB instructions!
40689   if (VT.is512BitVector())
40690     return SDValue();
40691 
40692   // Do not generate X86ISD::ADDSUB node for FP16's vector types even though
40693   // the ADDSUB idiom has been successfully recognized. There are no known
40694   // X86 targets with FP16 ADDSUB instructions!
40695   if (VT.getVectorElementType() == MVT::f16)
40696     return SDValue();
40697 
40698   return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
40699 }
40700 
40701 // We are looking for a shuffle where both sources are concatenated with undef
40702 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
40703 // if we can express this as a single-source shuffle, that's preferable.
40704 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
40705                                            const X86Subtarget &Subtarget) {
40706   if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
40707     return SDValue();
40708 
40709   EVT VT = N->getValueType(0);
40710 
40711   // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
40712   if (!VT.is128BitVector() && !VT.is256BitVector())
40713     return SDValue();
40714 
40715   if (VT.getVectorElementType() != MVT::i32 &&
40716       VT.getVectorElementType() != MVT::i64 &&
40717       VT.getVectorElementType() != MVT::f32 &&
40718       VT.getVectorElementType() != MVT::f64)
40719     return SDValue();
40720 
40721   SDValue N0 = N->getOperand(0);
40722   SDValue N1 = N->getOperand(1);
40723 
40724   // Check that both sources are concats with undef.
40725   if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
40726       N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
40727       N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
40728       !N1.getOperand(1).isUndef())
40729     return SDValue();
40730 
40731   // Construct the new shuffle mask. Elements from the first source retain their
40732   // index, but elements from the second source no longer need to skip an undef.
40733   SmallVector<int, 8> Mask;
40734   int NumElts = VT.getVectorNumElements();
40735 
40736   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
40737   for (int Elt : SVOp->getMask())
40738     Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
40739 
40740   SDLoc DL(N);
40741   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
40742                                N1.getOperand(0));
40743   return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
40744 }
40745 
40746 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
40747 /// low half of each source vector and does not set any high half elements in
40748 /// the destination vector, narrow the shuffle to half its original size.
40749 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
40750   EVT VT = Shuf->getValueType(0);
40751   if (!DAG.getTargetLoweringInfo().isTypeLegal(Shuf->getValueType(0)))
40752     return SDValue();
40753   if (!VT.is256BitVector() && !VT.is512BitVector())
40754     return SDValue();
40755 
40756   // See if we can ignore all of the high elements of the shuffle.
40757   ArrayRef<int> Mask = Shuf->getMask();
40758   if (!isUndefUpperHalf(Mask))
40759     return SDValue();
40760 
40761   // Check if the shuffle mask accesses only the low half of each input vector
40762   // (half-index output is 0 or 2).
40763   int HalfIdx1, HalfIdx2;
40764   SmallVector<int, 8> HalfMask(Mask.size() / 2);
40765   if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
40766       (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
40767     return SDValue();
40768 
40769   // Create a half-width shuffle to replace the unnecessarily wide shuffle.
40770   // The trick is knowing that all of the insert/extract are actually free
40771   // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
40772   // of narrow inputs into a narrow output, and that is always cheaper than
40773   // the wide shuffle that we started with.
40774   return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
40775                                Shuf->getOperand(1), HalfMask, HalfIdx1,
40776                                HalfIdx2, false, DAG, /*UseConcat*/ true);
40777 }
40778 
40779 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
40780                               TargetLowering::DAGCombinerInfo &DCI,
40781                               const X86Subtarget &Subtarget) {
40782   if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
40783     if (SDValue V = narrowShuffle(Shuf, DAG))
40784       return V;
40785 
40786   // If we have legalized the vector types, look for blends of FADD and FSUB
40787   // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
40788   SDLoc dl(N);
40789   EVT VT = N->getValueType(0);
40790   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40791   if (TLI.isTypeLegal(VT) && !isSoftF16(VT, Subtarget))
40792     if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
40793       return AddSub;
40794 
40795   // Attempt to combine into a vector load/broadcast.
40796   if (SDValue LD = combineToConsecutiveLoads(
40797           VT, SDValue(N, 0), dl, DAG, Subtarget, /*IsAfterLegalize*/ true))
40798     return LD;
40799 
40800   // For AVX2, we sometimes want to combine
40801   // (vector_shuffle <mask> (concat_vectors t1, undef)
40802   //                        (concat_vectors t2, undef))
40803   // Into:
40804   // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
40805   // Since the latter can be efficiently lowered with VPERMD/VPERMQ
40806   if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
40807     return ShufConcat;
40808 
40809   if (isTargetShuffle(N->getOpcode())) {
40810     SDValue Op(N, 0);
40811     if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
40812       return Shuffle;
40813 
40814     // Try recursively combining arbitrary sequences of x86 shuffle
40815     // instructions into higher-order shuffles. We do this after combining
40816     // specific PSHUF instruction sequences into their minimal form so that we
40817     // can evaluate how many specialized shuffle instructions are involved in
40818     // a particular chain.
40819     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
40820       return Res;
40821 
40822     // Simplify source operands based on shuffle mask.
40823     // TODO - merge this into combineX86ShufflesRecursively.
40824     APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
40825     if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, DCI))
40826       return SDValue(N, 0);
40827 
40828     // Canonicalize SHUFFLE(UNARYOP(X)) -> UNARYOP(SHUFFLE(X)).
40829     // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
40830     // Perform this after other shuffle combines to allow inner shuffles to be
40831     // combined away first.
40832     if (SDValue BinOp = canonicalizeShuffleWithOp(Op, DAG, dl))
40833       return BinOp;
40834   }
40835 
40836   return SDValue();
40837 }
40838 
40839 // Simplify variable target shuffle masks based on the demanded elements.
40840 // TODO: Handle DemandedBits in mask indices as well?
40841 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetShuffle(
40842     SDValue Op, const APInt &DemandedElts, unsigned MaskIndex,
40843     TargetLowering::TargetLoweringOpt &TLO, unsigned Depth) const {
40844   // If we're demanding all elements don't bother trying to simplify the mask.
40845   unsigned NumElts = DemandedElts.getBitWidth();
40846   if (DemandedElts.isAllOnes())
40847     return false;
40848 
40849   SDValue Mask = Op.getOperand(MaskIndex);
40850   if (!Mask.hasOneUse())
40851     return false;
40852 
40853   // Attempt to generically simplify the variable shuffle mask.
40854   APInt MaskUndef, MaskZero;
40855   if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
40856                                  Depth + 1))
40857     return true;
40858 
40859   // Attempt to extract+simplify a (constant pool load) shuffle mask.
40860   // TODO: Support other types from getTargetShuffleMaskIndices?
40861   SDValue BC = peekThroughOneUseBitcasts(Mask);
40862   EVT BCVT = BC.getValueType();
40863   auto *Load = dyn_cast<LoadSDNode>(BC);
40864   if (!Load || !Load->getBasePtr().hasOneUse())
40865     return false;
40866 
40867   const Constant *C = getTargetConstantFromNode(Load);
40868   if (!C)
40869     return false;
40870 
40871   Type *CTy = C->getType();
40872   if (!CTy->isVectorTy() ||
40873       CTy->getPrimitiveSizeInBits() != Mask.getValueSizeInBits())
40874     return false;
40875 
40876   // Handle scaling for i64 elements on 32-bit targets.
40877   unsigned NumCstElts = cast<FixedVectorType>(CTy)->getNumElements();
40878   if (NumCstElts != NumElts && NumCstElts != (NumElts * 2))
40879     return false;
40880   unsigned Scale = NumCstElts / NumElts;
40881 
40882   // Simplify mask if we have an undemanded element that is not undef.
40883   bool Simplified = false;
40884   SmallVector<Constant *, 32> ConstVecOps;
40885   for (unsigned i = 0; i != NumCstElts; ++i) {
40886     Constant *Elt = C->getAggregateElement(i);
40887     if (!DemandedElts[i / Scale] && !isa<UndefValue>(Elt)) {
40888       ConstVecOps.push_back(UndefValue::get(Elt->getType()));
40889       Simplified = true;
40890       continue;
40891     }
40892     ConstVecOps.push_back(Elt);
40893   }
40894   if (!Simplified)
40895     return false;
40896 
40897   // Generate new constant pool entry + legalize immediately for the load.
40898   SDLoc DL(Op);
40899   SDValue CV = TLO.DAG.getConstantPool(ConstantVector::get(ConstVecOps), BCVT);
40900   SDValue LegalCV = LowerConstantPool(CV, TLO.DAG);
40901   SDValue NewMask = TLO.DAG.getLoad(
40902       BCVT, DL, TLO.DAG.getEntryNode(), LegalCV,
40903       MachinePointerInfo::getConstantPool(TLO.DAG.getMachineFunction()),
40904       Load->getAlign());
40905   return TLO.CombineTo(Mask, TLO.DAG.getBitcast(Mask.getValueType(), NewMask));
40906 }
40907 
40908 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
40909     SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
40910     TargetLoweringOpt &TLO, unsigned Depth) const {
40911   int NumElts = DemandedElts.getBitWidth();
40912   unsigned Opc = Op.getOpcode();
40913   EVT VT = Op.getValueType();
40914 
40915   // Handle special case opcodes.
40916   switch (Opc) {
40917   case X86ISD::PMULDQ:
40918   case X86ISD::PMULUDQ: {
40919     APInt LHSUndef, LHSZero;
40920     APInt RHSUndef, RHSZero;
40921     SDValue LHS = Op.getOperand(0);
40922     SDValue RHS = Op.getOperand(1);
40923     if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
40924                                    Depth + 1))
40925       return true;
40926     if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
40927                                    Depth + 1))
40928       return true;
40929     // Multiply by zero.
40930     KnownZero = LHSZero | RHSZero;
40931     break;
40932   }
40933   case X86ISD::VPMADDWD: {
40934     APInt LHSUndef, LHSZero;
40935     APInt RHSUndef, RHSZero;
40936     SDValue LHS = Op.getOperand(0);
40937     SDValue RHS = Op.getOperand(1);
40938     APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, 2 * NumElts);
40939 
40940     if (SimplifyDemandedVectorElts(LHS, DemandedSrcElts, LHSUndef, LHSZero, TLO,
40941                                    Depth + 1))
40942       return true;
40943     if (SimplifyDemandedVectorElts(RHS, DemandedSrcElts, RHSUndef, RHSZero, TLO,
40944                                    Depth + 1))
40945       return true;
40946 
40947     // TODO: Multiply by zero.
40948 
40949     // If RHS/LHS elements are known zero then we don't need the LHS/RHS equivalent.
40950     APInt DemandedLHSElts = DemandedSrcElts & ~RHSZero;
40951     if (SimplifyDemandedVectorElts(LHS, DemandedLHSElts, LHSUndef, LHSZero, TLO,
40952                                    Depth + 1))
40953       return true;
40954     APInt DemandedRHSElts = DemandedSrcElts & ~LHSZero;
40955     if (SimplifyDemandedVectorElts(RHS, DemandedRHSElts, RHSUndef, RHSZero, TLO,
40956                                    Depth + 1))
40957       return true;
40958     break;
40959   }
40960   case X86ISD::PSADBW: {
40961     SDValue LHS = Op.getOperand(0);
40962     SDValue RHS = Op.getOperand(1);
40963     assert(VT.getScalarType() == MVT::i64 &&
40964            LHS.getValueType() == RHS.getValueType() &&
40965            LHS.getValueType().getScalarType() == MVT::i8 &&
40966            "Unexpected PSADBW types");
40967 
40968     // Aggressively peek through ops to get at the demanded elts.
40969     if (!DemandedElts.isAllOnes()) {
40970       unsigned NumSrcElts = LHS.getValueType().getVectorNumElements();
40971       APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
40972       SDValue NewLHS = SimplifyMultipleUseDemandedVectorElts(
40973           LHS, DemandedSrcElts, TLO.DAG, Depth + 1);
40974       SDValue NewRHS = SimplifyMultipleUseDemandedVectorElts(
40975           RHS, DemandedSrcElts, TLO.DAG, Depth + 1);
40976       if (NewLHS || NewRHS) {
40977         NewLHS = NewLHS ? NewLHS : LHS;
40978         NewRHS = NewRHS ? NewRHS : RHS;
40979         return TLO.CombineTo(
40980             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
40981       }
40982     }
40983     break;
40984   }
40985   case X86ISD::VSHL:
40986   case X86ISD::VSRL:
40987   case X86ISD::VSRA: {
40988     // We only need the bottom 64-bits of the (128-bit) shift amount.
40989     SDValue Amt = Op.getOperand(1);
40990     MVT AmtVT = Amt.getSimpleValueType();
40991     assert(AmtVT.is128BitVector() && "Unexpected value type");
40992 
40993     // If we reuse the shift amount just for sse shift amounts then we know that
40994     // only the bottom 64-bits are only ever used.
40995     bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
40996       unsigned UseOpc = Use->getOpcode();
40997       return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
40998               UseOpc == X86ISD::VSRA) &&
40999              Use->getOperand(0) != Amt;
41000     });
41001 
41002     APInt AmtUndef, AmtZero;
41003     unsigned NumAmtElts = AmtVT.getVectorNumElements();
41004     APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
41005     if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
41006                                    Depth + 1, AssumeSingleUse))
41007       return true;
41008     [[fallthrough]];
41009   }
41010   case X86ISD::VSHLI:
41011   case X86ISD::VSRLI:
41012   case X86ISD::VSRAI: {
41013     SDValue Src = Op.getOperand(0);
41014     APInt SrcUndef;
41015     if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
41016                                    Depth + 1))
41017       return true;
41018 
41019     // Fold shift(0,x) -> 0
41020     if (DemandedElts.isSubsetOf(KnownZero))
41021       return TLO.CombineTo(
41022           Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
41023 
41024     // Aggressively peek through ops to get at the demanded elts.
41025     if (!DemandedElts.isAllOnes())
41026       if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
41027               Src, DemandedElts, TLO.DAG, Depth + 1))
41028         return TLO.CombineTo(
41029             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc, Op.getOperand(1)));
41030     break;
41031   }
41032   case X86ISD::VPSHA:
41033   case X86ISD::VPSHL:
41034   case X86ISD::VSHLV:
41035   case X86ISD::VSRLV:
41036   case X86ISD::VSRAV: {
41037     APInt LHSUndef, LHSZero;
41038     APInt RHSUndef, RHSZero;
41039     SDValue LHS = Op.getOperand(0);
41040     SDValue RHS = Op.getOperand(1);
41041     if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
41042                                    Depth + 1))
41043       return true;
41044 
41045     // Fold shift(0,x) -> 0
41046     if (DemandedElts.isSubsetOf(LHSZero))
41047       return TLO.CombineTo(
41048           Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
41049 
41050     if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
41051                                    Depth + 1))
41052       return true;
41053 
41054     KnownZero = LHSZero;
41055     break;
41056   }
41057   case X86ISD::KSHIFTL: {
41058     SDValue Src = Op.getOperand(0);
41059     auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
41060     assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
41061     unsigned ShiftAmt = Amt->getZExtValue();
41062 
41063     if (ShiftAmt == 0)
41064       return TLO.CombineTo(Op, Src);
41065 
41066     // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
41067     // single shift.  We can do this if the bottom bits (which are shifted
41068     // out) are never demanded.
41069     if (Src.getOpcode() == X86ISD::KSHIFTR) {
41070       if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
41071         unsigned C1 = Src.getConstantOperandVal(1);
41072         unsigned NewOpc = X86ISD::KSHIFTL;
41073         int Diff = ShiftAmt - C1;
41074         if (Diff < 0) {
41075           Diff = -Diff;
41076           NewOpc = X86ISD::KSHIFTR;
41077         }
41078 
41079         SDLoc dl(Op);
41080         SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
41081         return TLO.CombineTo(
41082             Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
41083       }
41084     }
41085 
41086     APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
41087     if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
41088                                    Depth + 1))
41089       return true;
41090 
41091     KnownUndef <<= ShiftAmt;
41092     KnownZero <<= ShiftAmt;
41093     KnownZero.setLowBits(ShiftAmt);
41094     break;
41095   }
41096   case X86ISD::KSHIFTR: {
41097     SDValue Src = Op.getOperand(0);
41098     auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
41099     assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
41100     unsigned ShiftAmt = Amt->getZExtValue();
41101 
41102     if (ShiftAmt == 0)
41103       return TLO.CombineTo(Op, Src);
41104 
41105     // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
41106     // single shift.  We can do this if the top bits (which are shifted
41107     // out) are never demanded.
41108     if (Src.getOpcode() == X86ISD::KSHIFTL) {
41109       if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
41110         unsigned C1 = Src.getConstantOperandVal(1);
41111         unsigned NewOpc = X86ISD::KSHIFTR;
41112         int Diff = ShiftAmt - C1;
41113         if (Diff < 0) {
41114           Diff = -Diff;
41115           NewOpc = X86ISD::KSHIFTL;
41116         }
41117 
41118         SDLoc dl(Op);
41119         SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
41120         return TLO.CombineTo(
41121             Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
41122       }
41123     }
41124 
41125     APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
41126     if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
41127                                    Depth + 1))
41128       return true;
41129 
41130     KnownUndef.lshrInPlace(ShiftAmt);
41131     KnownZero.lshrInPlace(ShiftAmt);
41132     KnownZero.setHighBits(ShiftAmt);
41133     break;
41134   }
41135   case X86ISD::ANDNP: {
41136     // ANDNP = (~LHS & RHS);
41137     SDValue LHS = Op.getOperand(0);
41138     SDValue RHS = Op.getOperand(1);
41139 
41140     auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
41141       APInt UndefElts;
41142       SmallVector<APInt> EltBits;
41143       int NumElts = VT.getVectorNumElements();
41144       int EltSizeInBits = VT.getScalarSizeInBits();
41145       APInt OpBits = APInt::getAllOnes(EltSizeInBits);
41146       APInt OpElts = DemandedElts;
41147       if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
41148                                         EltBits)) {
41149         OpBits.clearAllBits();
41150         OpElts.clearAllBits();
41151         for (int I = 0; I != NumElts; ++I) {
41152           if (!DemandedElts[I])
41153             continue;
41154           if (UndefElts[I]) {
41155             // We can't assume an undef src element gives an undef dst - the
41156             // other src might be zero.
41157             OpBits.setAllBits();
41158             OpElts.setBit(I);
41159           } else if ((Invert && !EltBits[I].isAllOnes()) ||
41160                      (!Invert && !EltBits[I].isZero())) {
41161             OpBits |= Invert ? ~EltBits[I] : EltBits[I];
41162             OpElts.setBit(I);
41163           }
41164         }
41165       }
41166       return std::make_pair(OpBits, OpElts);
41167     };
41168     APInt BitsLHS, EltsLHS;
41169     APInt BitsRHS, EltsRHS;
41170     std::tie(BitsLHS, EltsLHS) = GetDemandedMasks(RHS);
41171     std::tie(BitsRHS, EltsRHS) = GetDemandedMasks(LHS, true);
41172 
41173     APInt LHSUndef, LHSZero;
41174     APInt RHSUndef, RHSZero;
41175     if (SimplifyDemandedVectorElts(LHS, EltsLHS, LHSUndef, LHSZero, TLO,
41176                                    Depth + 1))
41177       return true;
41178     if (SimplifyDemandedVectorElts(RHS, EltsRHS, RHSUndef, RHSZero, TLO,
41179                                    Depth + 1))
41180       return true;
41181 
41182     if (!DemandedElts.isAllOnes()) {
41183       SDValue NewLHS = SimplifyMultipleUseDemandedBits(LHS, BitsLHS, EltsLHS,
41184                                                        TLO.DAG, Depth + 1);
41185       SDValue NewRHS = SimplifyMultipleUseDemandedBits(RHS, BitsRHS, EltsRHS,
41186                                                        TLO.DAG, Depth + 1);
41187       if (NewLHS || NewRHS) {
41188         NewLHS = NewLHS ? NewLHS : LHS;
41189         NewRHS = NewRHS ? NewRHS : RHS;
41190         return TLO.CombineTo(
41191             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
41192       }
41193     }
41194     break;
41195   }
41196   case X86ISD::CVTSI2P:
41197   case X86ISD::CVTUI2P: {
41198     SDValue Src = Op.getOperand(0);
41199     MVT SrcVT = Src.getSimpleValueType();
41200     APInt SrcUndef, SrcZero;
41201     APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
41202     if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
41203                                    Depth + 1))
41204       return true;
41205     break;
41206   }
41207   case X86ISD::PACKSS:
41208   case X86ISD::PACKUS: {
41209     SDValue N0 = Op.getOperand(0);
41210     SDValue N1 = Op.getOperand(1);
41211 
41212     APInt DemandedLHS, DemandedRHS;
41213     getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
41214 
41215     APInt LHSUndef, LHSZero;
41216     if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
41217                                    Depth + 1))
41218       return true;
41219     APInt RHSUndef, RHSZero;
41220     if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
41221                                    Depth + 1))
41222       return true;
41223 
41224     // TODO - pass on known zero/undef.
41225 
41226     // Aggressively peek through ops to get at the demanded elts.
41227     // TODO - we should do this for all target/faux shuffles ops.
41228     if (!DemandedElts.isAllOnes()) {
41229       SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
41230                                                             TLO.DAG, Depth + 1);
41231       SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
41232                                                             TLO.DAG, Depth + 1);
41233       if (NewN0 || NewN1) {
41234         NewN0 = NewN0 ? NewN0 : N0;
41235         NewN1 = NewN1 ? NewN1 : N1;
41236         return TLO.CombineTo(Op,
41237                              TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
41238       }
41239     }
41240     break;
41241   }
41242   case X86ISD::HADD:
41243   case X86ISD::HSUB:
41244   case X86ISD::FHADD:
41245   case X86ISD::FHSUB: {
41246     SDValue N0 = Op.getOperand(0);
41247     SDValue N1 = Op.getOperand(1);
41248 
41249     APInt DemandedLHS, DemandedRHS;
41250     getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
41251 
41252     APInt LHSUndef, LHSZero;
41253     if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
41254                                    Depth + 1))
41255       return true;
41256     APInt RHSUndef, RHSZero;
41257     if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
41258                                    Depth + 1))
41259       return true;
41260 
41261     // TODO - pass on known zero/undef.
41262 
41263     // Aggressively peek through ops to get at the demanded elts.
41264     // TODO: Handle repeated operands.
41265     if (N0 != N1 && !DemandedElts.isAllOnes()) {
41266       SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
41267                                                             TLO.DAG, Depth + 1);
41268       SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
41269                                                             TLO.DAG, Depth + 1);
41270       if (NewN0 || NewN1) {
41271         NewN0 = NewN0 ? NewN0 : N0;
41272         NewN1 = NewN1 ? NewN1 : N1;
41273         return TLO.CombineTo(Op,
41274                              TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
41275       }
41276     }
41277     break;
41278   }
41279   case X86ISD::VTRUNC:
41280   case X86ISD::VTRUNCS:
41281   case X86ISD::VTRUNCUS: {
41282     SDValue Src = Op.getOperand(0);
41283     MVT SrcVT = Src.getSimpleValueType();
41284     APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
41285     APInt SrcUndef, SrcZero;
41286     if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
41287                                    Depth + 1))
41288       return true;
41289     KnownZero = SrcZero.zextOrTrunc(NumElts);
41290     KnownUndef = SrcUndef.zextOrTrunc(NumElts);
41291     break;
41292   }
41293   case X86ISD::BLENDV: {
41294     APInt SelUndef, SelZero;
41295     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
41296                                    SelZero, TLO, Depth + 1))
41297       return true;
41298 
41299     // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
41300     APInt LHSUndef, LHSZero;
41301     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
41302                                    LHSZero, TLO, Depth + 1))
41303       return true;
41304 
41305     APInt RHSUndef, RHSZero;
41306     if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
41307                                    RHSZero, TLO, Depth + 1))
41308       return true;
41309 
41310     KnownZero = LHSZero & RHSZero;
41311     KnownUndef = LHSUndef & RHSUndef;
41312     break;
41313   }
41314   case X86ISD::VZEXT_MOVL: {
41315     // If upper demanded elements are already zero then we have nothing to do.
41316     SDValue Src = Op.getOperand(0);
41317     APInt DemandedUpperElts = DemandedElts;
41318     DemandedUpperElts.clearLowBits(1);
41319     if (TLO.DAG.MaskedVectorIsZero(Src, DemandedUpperElts, Depth + 1))
41320       return TLO.CombineTo(Op, Src);
41321     break;
41322   }
41323   case X86ISD::VBROADCAST: {
41324     SDValue Src = Op.getOperand(0);
41325     MVT SrcVT = Src.getSimpleValueType();
41326     if (!SrcVT.isVector())
41327       break;
41328     // Don't bother broadcasting if we just need the 0'th element.
41329     if (DemandedElts == 1) {
41330       if (Src.getValueType() != VT)
41331         Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
41332                              SDLoc(Op));
41333       return TLO.CombineTo(Op, Src);
41334     }
41335     APInt SrcUndef, SrcZero;
41336     APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
41337     if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
41338                                    Depth + 1))
41339       return true;
41340     // Aggressively peek through src to get at the demanded elt.
41341     // TODO - we should do this for all target/faux shuffles ops.
41342     if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
41343             Src, SrcElts, TLO.DAG, Depth + 1))
41344       return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
41345     break;
41346   }
41347   case X86ISD::VPERMV:
41348     if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 0, TLO,
41349                                                    Depth))
41350       return true;
41351     break;
41352   case X86ISD::PSHUFB:
41353   case X86ISD::VPERMV3:
41354   case X86ISD::VPERMILPV:
41355     if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 1, TLO,
41356                                                    Depth))
41357       return true;
41358     break;
41359   case X86ISD::VPPERM:
41360   case X86ISD::VPERMIL2:
41361     if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 2, TLO,
41362                                                    Depth))
41363       return true;
41364     break;
41365   }
41366 
41367   // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
41368   // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
41369   // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
41370   if ((VT.is256BitVector() || VT.is512BitVector()) &&
41371       DemandedElts.lshr(NumElts / 2) == 0) {
41372     unsigned SizeInBits = VT.getSizeInBits();
41373     unsigned ExtSizeInBits = SizeInBits / 2;
41374 
41375     // See if 512-bit ops only use the bottom 128-bits.
41376     if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
41377       ExtSizeInBits = SizeInBits / 4;
41378 
41379     switch (Opc) {
41380       // Scalar broadcast.
41381     case X86ISD::VBROADCAST: {
41382       SDLoc DL(Op);
41383       SDValue Src = Op.getOperand(0);
41384       if (Src.getValueSizeInBits() > ExtSizeInBits)
41385         Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
41386       EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
41387                                     ExtSizeInBits / VT.getScalarSizeInBits());
41388       SDValue Bcst = TLO.DAG.getNode(X86ISD::VBROADCAST, DL, BcstVT, Src);
41389       return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
41390                                                TLO.DAG, DL, ExtSizeInBits));
41391     }
41392     case X86ISD::VBROADCAST_LOAD: {
41393       SDLoc DL(Op);
41394       auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
41395       EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
41396                                     ExtSizeInBits / VT.getScalarSizeInBits());
41397       SDVTList Tys = TLO.DAG.getVTList(BcstVT, MVT::Other);
41398       SDValue Ops[] = {MemIntr->getOperand(0), MemIntr->getOperand(1)};
41399       SDValue Bcst = TLO.DAG.getMemIntrinsicNode(
41400           X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MemIntr->getMemoryVT(),
41401           MemIntr->getMemOperand());
41402       TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
41403                                            Bcst.getValue(1));
41404       return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
41405                                                TLO.DAG, DL, ExtSizeInBits));
41406     }
41407       // Subvector broadcast.
41408     case X86ISD::SUBV_BROADCAST_LOAD: {
41409       auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
41410       EVT MemVT = MemIntr->getMemoryVT();
41411       if (ExtSizeInBits == MemVT.getStoreSizeInBits()) {
41412         SDLoc DL(Op);
41413         SDValue Ld =
41414             TLO.DAG.getLoad(MemVT, DL, MemIntr->getChain(),
41415                             MemIntr->getBasePtr(), MemIntr->getMemOperand());
41416         TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
41417                                              Ld.getValue(1));
41418         return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Ld, 0,
41419                                                  TLO.DAG, DL, ExtSizeInBits));
41420       } else if ((ExtSizeInBits % MemVT.getStoreSizeInBits()) == 0) {
41421         SDLoc DL(Op);
41422         EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
41423                                       ExtSizeInBits / VT.getScalarSizeInBits());
41424         if (SDValue BcstLd =
41425                 getBROADCAST_LOAD(Opc, DL, BcstVT, MemVT, MemIntr, 0, TLO.DAG))
41426           return TLO.CombineTo(Op,
41427                                insertSubVector(TLO.DAG.getUNDEF(VT), BcstLd, 0,
41428                                                TLO.DAG, DL, ExtSizeInBits));
41429       }
41430       break;
41431     }
41432       // Byte shifts by immediate.
41433     case X86ISD::VSHLDQ:
41434     case X86ISD::VSRLDQ:
41435       // Shift by uniform.
41436     case X86ISD::VSHL:
41437     case X86ISD::VSRL:
41438     case X86ISD::VSRA:
41439       // Shift by immediate.
41440     case X86ISD::VSHLI:
41441     case X86ISD::VSRLI:
41442     case X86ISD::VSRAI: {
41443       SDLoc DL(Op);
41444       SDValue Ext0 =
41445           extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
41446       SDValue ExtOp =
41447           TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
41448       SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41449       SDValue Insert =
41450           insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
41451       return TLO.CombineTo(Op, Insert);
41452     }
41453     case X86ISD::VPERMI: {
41454       // Simplify PERMPD/PERMQ to extract_subvector.
41455       // TODO: This should be done in shuffle combining.
41456       if (VT == MVT::v4f64 || VT == MVT::v4i64) {
41457         SmallVector<int, 4> Mask;
41458         DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
41459         if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
41460           SDLoc DL(Op);
41461           SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
41462           SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41463           SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
41464           return TLO.CombineTo(Op, Insert);
41465         }
41466       }
41467       break;
41468     }
41469     case X86ISD::VPERM2X128: {
41470       // Simplify VPERM2F128/VPERM2I128 to extract_subvector.
41471       SDLoc DL(Op);
41472       unsigned LoMask = Op.getConstantOperandVal(2) & 0xF;
41473       if (LoMask & 0x8)
41474         return TLO.CombineTo(
41475             Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, DL));
41476       unsigned EltIdx = (LoMask & 0x1) * (NumElts / 2);
41477       unsigned SrcIdx = (LoMask & 0x2) >> 1;
41478       SDValue ExtOp =
41479           extractSubVector(Op.getOperand(SrcIdx), EltIdx, TLO.DAG, DL, 128);
41480       SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41481       SDValue Insert =
41482           insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
41483       return TLO.CombineTo(Op, Insert);
41484     }
41485       // Zero upper elements.
41486     case X86ISD::VZEXT_MOVL:
41487       // Target unary shuffles by immediate:
41488     case X86ISD::PSHUFD:
41489     case X86ISD::PSHUFLW:
41490     case X86ISD::PSHUFHW:
41491     case X86ISD::VPERMILPI:
41492       // (Non-Lane Crossing) Target Shuffles.
41493     case X86ISD::VPERMILPV:
41494     case X86ISD::VPERMIL2:
41495     case X86ISD::PSHUFB:
41496     case X86ISD::UNPCKL:
41497     case X86ISD::UNPCKH:
41498     case X86ISD::BLENDI:
41499       // Integer ops.
41500     case X86ISD::PACKSS:
41501     case X86ISD::PACKUS:
41502     case X86ISD::PCMPEQ:
41503     case X86ISD::PCMPGT:
41504     case X86ISD::PMULUDQ:
41505     case X86ISD::PMULDQ:
41506     case X86ISD::VSHLV:
41507     case X86ISD::VSRLV:
41508     case X86ISD::VSRAV:
41509       // Float ops.
41510     case X86ISD::FMAX:
41511     case X86ISD::FMIN:
41512     case X86ISD::FMAXC:
41513     case X86ISD::FMINC:
41514       // Horizontal Ops.
41515     case X86ISD::HADD:
41516     case X86ISD::HSUB:
41517     case X86ISD::FHADD:
41518     case X86ISD::FHSUB: {
41519       SDLoc DL(Op);
41520       SmallVector<SDValue, 4> Ops;
41521       for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
41522         SDValue SrcOp = Op.getOperand(i);
41523         EVT SrcVT = SrcOp.getValueType();
41524         assert((!SrcVT.isVector() || SrcVT.getSizeInBits() == SizeInBits) &&
41525                "Unsupported vector size");
41526         Ops.push_back(SrcVT.isVector() ? extractSubVector(SrcOp, 0, TLO.DAG, DL,
41527                                                           ExtSizeInBits)
41528                                        : SrcOp);
41529       }
41530       MVT ExtVT = VT.getSimpleVT();
41531       ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
41532                                ExtSizeInBits / ExtVT.getScalarSizeInBits());
41533       SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ops);
41534       SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41535       SDValue Insert =
41536           insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
41537       return TLO.CombineTo(Op, Insert);
41538     }
41539     }
41540   }
41541 
41542   // For splats, unless we *only* demand the 0'th element,
41543   // stop attempts at simplification here, we aren't going to improve things,
41544   // this is better than any potential shuffle.
41545   if (!DemandedElts.isOne() && TLO.DAG.isSplatValue(Op, /*AllowUndefs*/false))
41546     return false;
41547 
41548   // Get target/faux shuffle mask.
41549   APInt OpUndef, OpZero;
41550   SmallVector<int, 64> OpMask;
41551   SmallVector<SDValue, 2> OpInputs;
41552   if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
41553                               OpZero, TLO.DAG, Depth, false))
41554     return false;
41555 
41556   // Shuffle inputs must be the same size as the result.
41557   if (OpMask.size() != (unsigned)NumElts ||
41558       llvm::any_of(OpInputs, [VT](SDValue V) {
41559         return VT.getSizeInBits() != V.getValueSizeInBits() ||
41560                !V.getValueType().isVector();
41561       }))
41562     return false;
41563 
41564   KnownZero = OpZero;
41565   KnownUndef = OpUndef;
41566 
41567   // Check if shuffle mask can be simplified to undef/zero/identity.
41568   int NumSrcs = OpInputs.size();
41569   for (int i = 0; i != NumElts; ++i)
41570     if (!DemandedElts[i])
41571       OpMask[i] = SM_SentinelUndef;
41572 
41573   if (isUndefInRange(OpMask, 0, NumElts)) {
41574     KnownUndef.setAllBits();
41575     return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
41576   }
41577   if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
41578     KnownZero.setAllBits();
41579     return TLO.CombineTo(
41580         Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
41581   }
41582   for (int Src = 0; Src != NumSrcs; ++Src)
41583     if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
41584       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
41585 
41586   // Attempt to simplify inputs.
41587   for (int Src = 0; Src != NumSrcs; ++Src) {
41588     // TODO: Support inputs of different types.
41589     if (OpInputs[Src].getValueType() != VT)
41590       continue;
41591 
41592     int Lo = Src * NumElts;
41593     APInt SrcElts = APInt::getZero(NumElts);
41594     for (int i = 0; i != NumElts; ++i)
41595       if (DemandedElts[i]) {
41596         int M = OpMask[i] - Lo;
41597         if (0 <= M && M < NumElts)
41598           SrcElts.setBit(M);
41599       }
41600 
41601     // TODO - Propagate input undef/zero elts.
41602     APInt SrcUndef, SrcZero;
41603     if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
41604                                    TLO, Depth + 1))
41605       return true;
41606   }
41607 
41608   // If we don't demand all elements, then attempt to combine to a simpler
41609   // shuffle.
41610   // We need to convert the depth to something combineX86ShufflesRecursively
41611   // can handle - so pretend its Depth == 0 again, and reduce the max depth
41612   // to match. This prevents combineX86ShuffleChain from returning a
41613   // combined shuffle that's the same as the original root, causing an
41614   // infinite loop.
41615   if (!DemandedElts.isAllOnes()) {
41616     assert(Depth < X86::MaxShuffleCombineDepth && "Depth out of range");
41617 
41618     SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
41619     for (int i = 0; i != NumElts; ++i)
41620       if (DemandedElts[i])
41621         DemandedMask[i] = i;
41622 
41623     SDValue NewShuffle = combineX86ShufflesRecursively(
41624         {Op}, 0, Op, DemandedMask, {}, 0, X86::MaxShuffleCombineDepth - Depth,
41625         /*HasVarMask*/ false,
41626         /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, TLO.DAG,
41627         Subtarget);
41628     if (NewShuffle)
41629       return TLO.CombineTo(Op, NewShuffle);
41630   }
41631 
41632   return false;
41633 }
41634 
41635 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
41636     SDValue Op, const APInt &OriginalDemandedBits,
41637     const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
41638     unsigned Depth) const {
41639   EVT VT = Op.getValueType();
41640   unsigned BitWidth = OriginalDemandedBits.getBitWidth();
41641   unsigned Opc = Op.getOpcode();
41642   switch(Opc) {
41643   case X86ISD::VTRUNC: {
41644     KnownBits KnownOp;
41645     SDValue Src = Op.getOperand(0);
41646     MVT SrcVT = Src.getSimpleValueType();
41647 
41648     // Simplify the input, using demanded bit information.
41649     APInt TruncMask = OriginalDemandedBits.zext(SrcVT.getScalarSizeInBits());
41650     APInt DemandedElts = OriginalDemandedElts.trunc(SrcVT.getVectorNumElements());
41651     if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, KnownOp, TLO, Depth + 1))
41652       return true;
41653     break;
41654   }
41655   case X86ISD::PMULDQ:
41656   case X86ISD::PMULUDQ: {
41657     // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
41658     KnownBits KnownLHS, KnownRHS;
41659     SDValue LHS = Op.getOperand(0);
41660     SDValue RHS = Op.getOperand(1);
41661 
41662     // Don't mask bits on 32-bit AVX512 targets which might lose a broadcast.
41663     // FIXME: Can we bound this better?
41664     APInt DemandedMask = APInt::getLowBitsSet(64, 32);
41665     APInt DemandedMaskLHS = APInt::getAllOnes(64);
41666     APInt DemandedMaskRHS = APInt::getAllOnes(64);
41667 
41668     bool Is32BitAVX512 = !Subtarget.is64Bit() && Subtarget.hasAVX512();
41669     if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(LHS))
41670       DemandedMaskLHS = DemandedMask;
41671     if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(RHS))
41672       DemandedMaskRHS = DemandedMask;
41673 
41674     if (SimplifyDemandedBits(LHS, DemandedMaskLHS, OriginalDemandedElts,
41675                              KnownLHS, TLO, Depth + 1))
41676       return true;
41677     if (SimplifyDemandedBits(RHS, DemandedMaskRHS, OriginalDemandedElts,
41678                              KnownRHS, TLO, Depth + 1))
41679       return true;
41680 
41681     // PMULUDQ(X,1) -> AND(X,(1<<32)-1) 'getZeroExtendInReg'.
41682     KnownRHS = KnownRHS.trunc(32);
41683     if (Opc == X86ISD::PMULUDQ && KnownRHS.isConstant() &&
41684         KnownRHS.getConstant().isOne()) {
41685       SDLoc DL(Op);
41686       SDValue Mask = TLO.DAG.getConstant(DemandedMask, DL, VT);
41687       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, DL, VT, LHS, Mask));
41688     }
41689 
41690     // Aggressively peek through ops to get at the demanded low bits.
41691     SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
41692         LHS, DemandedMaskLHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
41693     SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
41694         RHS, DemandedMaskRHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
41695     if (DemandedLHS || DemandedRHS) {
41696       DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
41697       DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
41698       return TLO.CombineTo(
41699           Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
41700     }
41701     break;
41702   }
41703   case X86ISD::ANDNP: {
41704     KnownBits Known2;
41705     SDValue Op0 = Op.getOperand(0);
41706     SDValue Op1 = Op.getOperand(1);
41707 
41708     if (SimplifyDemandedBits(Op1, OriginalDemandedBits, OriginalDemandedElts,
41709                              Known, TLO, Depth + 1))
41710       return true;
41711     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41712 
41713     if (SimplifyDemandedBits(Op0, ~Known.Zero & OriginalDemandedBits,
41714                              OriginalDemandedElts, Known2, TLO, Depth + 1))
41715       return true;
41716     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
41717 
41718     // If the RHS is a constant, see if we can simplify it.
41719     if (ShrinkDemandedConstant(Op, ~Known2.One & OriginalDemandedBits,
41720                                OriginalDemandedElts, TLO))
41721       return true;
41722 
41723     // ANDNP = (~Op0 & Op1);
41724     Known.One &= Known2.Zero;
41725     Known.Zero |= Known2.One;
41726     break;
41727   }
41728   case X86ISD::VSHLI: {
41729     SDValue Op0 = Op.getOperand(0);
41730 
41731     unsigned ShAmt = Op.getConstantOperandVal(1);
41732     if (ShAmt >= BitWidth)
41733       break;
41734 
41735     APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
41736 
41737     // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
41738     // single shift.  We can do this if the bottom bits (which are shifted
41739     // out) are never demanded.
41740     if (Op0.getOpcode() == X86ISD::VSRLI &&
41741         OriginalDemandedBits.countr_zero() >= ShAmt) {
41742       unsigned Shift2Amt = Op0.getConstantOperandVal(1);
41743       if (Shift2Amt < BitWidth) {
41744         int Diff = ShAmt - Shift2Amt;
41745         if (Diff == 0)
41746           return TLO.CombineTo(Op, Op0.getOperand(0));
41747 
41748         unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
41749         SDValue NewShift = TLO.DAG.getNode(
41750             NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
41751             TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
41752         return TLO.CombineTo(Op, NewShift);
41753       }
41754     }
41755 
41756     // If we are only demanding sign bits then we can use the shift source directly.
41757     unsigned NumSignBits =
41758         TLO.DAG.ComputeNumSignBits(Op0, OriginalDemandedElts, Depth + 1);
41759     unsigned UpperDemandedBits = BitWidth - OriginalDemandedBits.countr_zero();
41760     if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
41761       return TLO.CombineTo(Op, Op0);
41762 
41763     if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
41764                              TLO, Depth + 1))
41765       return true;
41766 
41767     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41768     Known.Zero <<= ShAmt;
41769     Known.One <<= ShAmt;
41770 
41771     // Low bits known zero.
41772     Known.Zero.setLowBits(ShAmt);
41773     return false;
41774   }
41775   case X86ISD::VSRLI: {
41776     unsigned ShAmt = Op.getConstantOperandVal(1);
41777     if (ShAmt >= BitWidth)
41778       break;
41779 
41780     APInt DemandedMask = OriginalDemandedBits << ShAmt;
41781 
41782     if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
41783                              OriginalDemandedElts, Known, TLO, Depth + 1))
41784       return true;
41785 
41786     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41787     Known.Zero.lshrInPlace(ShAmt);
41788     Known.One.lshrInPlace(ShAmt);
41789 
41790     // High bits known zero.
41791     Known.Zero.setHighBits(ShAmt);
41792     return false;
41793   }
41794   case X86ISD::VSRAI: {
41795     SDValue Op0 = Op.getOperand(0);
41796     SDValue Op1 = Op.getOperand(1);
41797 
41798     unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
41799     if (ShAmt >= BitWidth)
41800       break;
41801 
41802     APInt DemandedMask = OriginalDemandedBits << ShAmt;
41803 
41804     // If we just want the sign bit then we don't need to shift it.
41805     if (OriginalDemandedBits.isSignMask())
41806       return TLO.CombineTo(Op, Op0);
41807 
41808     // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
41809     if (Op0.getOpcode() == X86ISD::VSHLI &&
41810         Op.getOperand(1) == Op0.getOperand(1)) {
41811       SDValue Op00 = Op0.getOperand(0);
41812       unsigned NumSignBits =
41813           TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
41814       if (ShAmt < NumSignBits)
41815         return TLO.CombineTo(Op, Op00);
41816     }
41817 
41818     // If any of the demanded bits are produced by the sign extension, we also
41819     // demand the input sign bit.
41820     if (OriginalDemandedBits.countl_zero() < ShAmt)
41821       DemandedMask.setSignBit();
41822 
41823     if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
41824                              TLO, Depth + 1))
41825       return true;
41826 
41827     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41828     Known.Zero.lshrInPlace(ShAmt);
41829     Known.One.lshrInPlace(ShAmt);
41830 
41831     // If the input sign bit is known to be zero, or if none of the top bits
41832     // are demanded, turn this into an unsigned shift right.
41833     if (Known.Zero[BitWidth - ShAmt - 1] ||
41834         OriginalDemandedBits.countl_zero() >= ShAmt)
41835       return TLO.CombineTo(
41836           Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
41837 
41838     // High bits are known one.
41839     if (Known.One[BitWidth - ShAmt - 1])
41840       Known.One.setHighBits(ShAmt);
41841     return false;
41842   }
41843   case X86ISD::BLENDV: {
41844     SDValue Sel = Op.getOperand(0);
41845     SDValue LHS = Op.getOperand(1);
41846     SDValue RHS = Op.getOperand(2);
41847 
41848     APInt SignMask = APInt::getSignMask(BitWidth);
41849     SDValue NewSel = SimplifyMultipleUseDemandedBits(
41850         Sel, SignMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
41851     SDValue NewLHS = SimplifyMultipleUseDemandedBits(
41852         LHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
41853     SDValue NewRHS = SimplifyMultipleUseDemandedBits(
41854         RHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
41855 
41856     if (NewSel || NewLHS || NewRHS) {
41857       NewSel = NewSel ? NewSel : Sel;
41858       NewLHS = NewLHS ? NewLHS : LHS;
41859       NewRHS = NewRHS ? NewRHS : RHS;
41860       return TLO.CombineTo(Op, TLO.DAG.getNode(X86ISD::BLENDV, SDLoc(Op), VT,
41861                                                NewSel, NewLHS, NewRHS));
41862     }
41863     break;
41864   }
41865   case X86ISD::PEXTRB:
41866   case X86ISD::PEXTRW: {
41867     SDValue Vec = Op.getOperand(0);
41868     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
41869     MVT VecVT = Vec.getSimpleValueType();
41870     unsigned NumVecElts = VecVT.getVectorNumElements();
41871 
41872     if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
41873       unsigned Idx = CIdx->getZExtValue();
41874       unsigned VecBitWidth = VecVT.getScalarSizeInBits();
41875 
41876       // If we demand no bits from the vector then we must have demanded
41877       // bits from the implict zext - simplify to zero.
41878       APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
41879       if (DemandedVecBits == 0)
41880         return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
41881 
41882       APInt KnownUndef, KnownZero;
41883       APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
41884       if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
41885                                      KnownZero, TLO, Depth + 1))
41886         return true;
41887 
41888       KnownBits KnownVec;
41889       if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
41890                                KnownVec, TLO, Depth + 1))
41891         return true;
41892 
41893       if (SDValue V = SimplifyMultipleUseDemandedBits(
41894               Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
41895         return TLO.CombineTo(
41896             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
41897 
41898       Known = KnownVec.zext(BitWidth);
41899       return false;
41900     }
41901     break;
41902   }
41903   case X86ISD::PINSRB:
41904   case X86ISD::PINSRW: {
41905     SDValue Vec = Op.getOperand(0);
41906     SDValue Scl = Op.getOperand(1);
41907     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
41908     MVT VecVT = Vec.getSimpleValueType();
41909 
41910     if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
41911       unsigned Idx = CIdx->getZExtValue();
41912       if (!OriginalDemandedElts[Idx])
41913         return TLO.CombineTo(Op, Vec);
41914 
41915       KnownBits KnownVec;
41916       APInt DemandedVecElts(OriginalDemandedElts);
41917       DemandedVecElts.clearBit(Idx);
41918       if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
41919                                KnownVec, TLO, Depth + 1))
41920         return true;
41921 
41922       KnownBits KnownScl;
41923       unsigned NumSclBits = Scl.getScalarValueSizeInBits();
41924       APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
41925       if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
41926         return true;
41927 
41928       KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
41929       Known = KnownVec.intersectWith(KnownScl);
41930       return false;
41931     }
41932     break;
41933   }
41934   case X86ISD::PACKSS:
41935     // PACKSS saturates to MIN/MAX integer values. So if we just want the
41936     // sign bit then we can just ask for the source operands sign bit.
41937     // TODO - add known bits handling.
41938     if (OriginalDemandedBits.isSignMask()) {
41939       APInt DemandedLHS, DemandedRHS;
41940       getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
41941 
41942       KnownBits KnownLHS, KnownRHS;
41943       APInt SignMask = APInt::getSignMask(BitWidth * 2);
41944       if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
41945                                KnownLHS, TLO, Depth + 1))
41946         return true;
41947       if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
41948                                KnownRHS, TLO, Depth + 1))
41949         return true;
41950 
41951       // Attempt to avoid multi-use ops if we don't need anything from them.
41952       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
41953           Op.getOperand(0), SignMask, DemandedLHS, TLO.DAG, Depth + 1);
41954       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
41955           Op.getOperand(1), SignMask, DemandedRHS, TLO.DAG, Depth + 1);
41956       if (DemandedOp0 || DemandedOp1) {
41957         SDValue Op0 = DemandedOp0 ? DemandedOp0 : Op.getOperand(0);
41958         SDValue Op1 = DemandedOp1 ? DemandedOp1 : Op.getOperand(1);
41959         return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, Op0, Op1));
41960       }
41961     }
41962     // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
41963     break;
41964   case X86ISD::VBROADCAST: {
41965     SDValue Src = Op.getOperand(0);
41966     MVT SrcVT = Src.getSimpleValueType();
41967     APInt DemandedElts = APInt::getOneBitSet(
41968         SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1, 0);
41969     if (SimplifyDemandedBits(Src, OriginalDemandedBits, DemandedElts, Known,
41970                              TLO, Depth + 1))
41971       return true;
41972     // If we don't need the upper bits, attempt to narrow the broadcast source.
41973     // Don't attempt this on AVX512 as it might affect broadcast folding.
41974     // TODO: Should we attempt this for i32/i16 splats? They tend to be slower.
41975     if ((BitWidth == 64) && SrcVT.isScalarInteger() && !Subtarget.hasAVX512() &&
41976         OriginalDemandedBits.countl_zero() >= (BitWidth / 2) &&
41977         Src->hasOneUse()) {
41978       MVT NewSrcVT = MVT::getIntegerVT(BitWidth / 2);
41979       SDValue NewSrc =
41980           TLO.DAG.getNode(ISD::TRUNCATE, SDLoc(Src), NewSrcVT, Src);
41981       MVT NewVT = MVT::getVectorVT(NewSrcVT, VT.getVectorNumElements() * 2);
41982       SDValue NewBcst =
41983           TLO.DAG.getNode(X86ISD::VBROADCAST, SDLoc(Op), NewVT, NewSrc);
41984       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, NewBcst));
41985     }
41986     break;
41987   }
41988   case X86ISD::PCMPGT:
41989     // icmp sgt(0, R) == ashr(R, BitWidth-1).
41990     // iff we only need the sign bit then we can use R directly.
41991     if (OriginalDemandedBits.isSignMask() &&
41992         ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
41993       return TLO.CombineTo(Op, Op.getOperand(1));
41994     break;
41995   case X86ISD::MOVMSK: {
41996     SDValue Src = Op.getOperand(0);
41997     MVT SrcVT = Src.getSimpleValueType();
41998     unsigned SrcBits = SrcVT.getScalarSizeInBits();
41999     unsigned NumElts = SrcVT.getVectorNumElements();
42000 
42001     // If we don't need the sign bits at all just return zero.
42002     if (OriginalDemandedBits.countr_zero() >= NumElts)
42003       return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
42004 
42005     // See if we only demand bits from the lower 128-bit vector.
42006     if (SrcVT.is256BitVector() &&
42007         OriginalDemandedBits.getActiveBits() <= (NumElts / 2)) {
42008       SDValue NewSrc = extract128BitVector(Src, 0, TLO.DAG, SDLoc(Src));
42009       return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
42010     }
42011 
42012     // Only demand the vector elements of the sign bits we need.
42013     APInt KnownUndef, KnownZero;
42014     APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
42015     if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
42016                                    TLO, Depth + 1))
42017       return true;
42018 
42019     Known.Zero = KnownZero.zext(BitWidth);
42020     Known.Zero.setHighBits(BitWidth - NumElts);
42021 
42022     // MOVMSK only uses the MSB from each vector element.
42023     KnownBits KnownSrc;
42024     APInt DemandedSrcBits = APInt::getSignMask(SrcBits);
42025     if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, KnownSrc, TLO,
42026                              Depth + 1))
42027       return true;
42028 
42029     if (KnownSrc.One[SrcBits - 1])
42030       Known.One.setLowBits(NumElts);
42031     else if (KnownSrc.Zero[SrcBits - 1])
42032       Known.Zero.setLowBits(NumElts);
42033 
42034     // Attempt to avoid multi-use os if we don't need anything from it.
42035     if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
42036             Src, DemandedSrcBits, DemandedElts, TLO.DAG, Depth + 1))
42037       return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
42038     return false;
42039   }
42040   case X86ISD::TESTP: {
42041     SDValue Op0 = Op.getOperand(0);
42042     SDValue Op1 = Op.getOperand(1);
42043     MVT OpVT = Op0.getSimpleValueType();
42044     assert((OpVT.getVectorElementType() == MVT::f32 ||
42045             OpVT.getVectorElementType() == MVT::f64) &&
42046            "Illegal vector type for X86ISD::TESTP");
42047 
42048     // TESTPS/TESTPD only demands the sign bits of ALL the elements.
42049     KnownBits KnownSrc;
42050     APInt SignMask = APInt::getSignMask(OpVT.getScalarSizeInBits());
42051     bool AssumeSingleUse = (Op0 == Op1) && Op->isOnlyUserOf(Op0.getNode());
42052     return SimplifyDemandedBits(Op0, SignMask, KnownSrc, TLO, Depth + 1,
42053                                 AssumeSingleUse) ||
42054            SimplifyDemandedBits(Op1, SignMask, KnownSrc, TLO, Depth + 1,
42055                                 AssumeSingleUse);
42056   }
42057   case X86ISD::BEXTR:
42058   case X86ISD::BEXTRI: {
42059     SDValue Op0 = Op.getOperand(0);
42060     SDValue Op1 = Op.getOperand(1);
42061 
42062     // Only bottom 16-bits of the control bits are required.
42063     if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
42064       // NOTE: SimplifyDemandedBits won't do this for constants.
42065       uint64_t Val1 = Cst1->getZExtValue();
42066       uint64_t MaskedVal1 = Val1 & 0xFFFF;
42067       if (Opc == X86ISD::BEXTR && MaskedVal1 != Val1) {
42068         SDLoc DL(Op);
42069         return TLO.CombineTo(
42070             Op, TLO.DAG.getNode(X86ISD::BEXTR, DL, VT, Op0,
42071                                 TLO.DAG.getConstant(MaskedVal1, DL, VT)));
42072       }
42073 
42074       unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
42075       unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
42076 
42077       // If the length is 0, the result is 0.
42078       if (Length == 0) {
42079         Known.setAllZero();
42080         return false;
42081       }
42082 
42083       if ((Shift + Length) <= BitWidth) {
42084         APInt DemandedMask = APInt::getBitsSet(BitWidth, Shift, Shift + Length);
42085         if (SimplifyDemandedBits(Op0, DemandedMask, Known, TLO, Depth + 1))
42086           return true;
42087 
42088         Known = Known.extractBits(Length, Shift);
42089         Known = Known.zextOrTrunc(BitWidth);
42090         return false;
42091       }
42092     } else {
42093       assert(Opc == X86ISD::BEXTR && "Unexpected opcode!");
42094       KnownBits Known1;
42095       APInt DemandedMask(APInt::getLowBitsSet(BitWidth, 16));
42096       if (SimplifyDemandedBits(Op1, DemandedMask, Known1, TLO, Depth + 1))
42097         return true;
42098 
42099       // If the length is 0, replace with 0.
42100       KnownBits LengthBits = Known1.extractBits(8, 8);
42101       if (LengthBits.isZero())
42102         return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
42103     }
42104 
42105     break;
42106   }
42107   case X86ISD::PDEP: {
42108     SDValue Op0 = Op.getOperand(0);
42109     SDValue Op1 = Op.getOperand(1);
42110 
42111     unsigned DemandedBitsLZ = OriginalDemandedBits.countl_zero();
42112     APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
42113 
42114     // If the demanded bits has leading zeroes, we don't demand those from the
42115     // mask.
42116     if (SimplifyDemandedBits(Op1, LoMask, Known, TLO, Depth + 1))
42117       return true;
42118 
42119     // The number of possible 1s in the mask determines the number of LSBs of
42120     // operand 0 used. Undemanded bits from the mask don't matter so filter
42121     // them before counting.
42122     KnownBits Known2;
42123     uint64_t Count = (~Known.Zero & LoMask).popcount();
42124     APInt DemandedMask(APInt::getLowBitsSet(BitWidth, Count));
42125     if (SimplifyDemandedBits(Op0, DemandedMask, Known2, TLO, Depth + 1))
42126       return true;
42127 
42128     // Zeroes are retained from the mask, but not ones.
42129     Known.One.clearAllBits();
42130     // The result will have at least as many trailing zeros as the non-mask
42131     // operand since bits can only map to the same or higher bit position.
42132     Known.Zero.setLowBits(Known2.countMinTrailingZeros());
42133     return false;
42134   }
42135   }
42136 
42137   return TargetLowering::SimplifyDemandedBitsForTargetNode(
42138       Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
42139 }
42140 
42141 SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
42142     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
42143     SelectionDAG &DAG, unsigned Depth) const {
42144   int NumElts = DemandedElts.getBitWidth();
42145   unsigned Opc = Op.getOpcode();
42146   EVT VT = Op.getValueType();
42147 
42148   switch (Opc) {
42149   case X86ISD::PINSRB:
42150   case X86ISD::PINSRW: {
42151     // If we don't demand the inserted element, return the base vector.
42152     SDValue Vec = Op.getOperand(0);
42153     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
42154     MVT VecVT = Vec.getSimpleValueType();
42155     if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
42156         !DemandedElts[CIdx->getZExtValue()])
42157       return Vec;
42158     break;
42159   }
42160   case X86ISD::VSHLI: {
42161     // If we are only demanding sign bits then we can use the shift source
42162     // directly.
42163     SDValue Op0 = Op.getOperand(0);
42164     unsigned ShAmt = Op.getConstantOperandVal(1);
42165     unsigned BitWidth = DemandedBits.getBitWidth();
42166     unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
42167     unsigned UpperDemandedBits = BitWidth - DemandedBits.countr_zero();
42168     if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
42169       return Op0;
42170     break;
42171   }
42172   case X86ISD::VSRAI:
42173     // iff we only need the sign bit then we can use the source directly.
42174     // TODO: generalize where we only demand extended signbits.
42175     if (DemandedBits.isSignMask())
42176       return Op.getOperand(0);
42177     break;
42178   case X86ISD::PCMPGT:
42179     // icmp sgt(0, R) == ashr(R, BitWidth-1).
42180     // iff we only need the sign bit then we can use R directly.
42181     if (DemandedBits.isSignMask() &&
42182         ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
42183       return Op.getOperand(1);
42184     break;
42185   case X86ISD::BLENDV: {
42186     // BLENDV: Cond (MSB) ? LHS : RHS
42187     SDValue Cond = Op.getOperand(0);
42188     SDValue LHS = Op.getOperand(1);
42189     SDValue RHS = Op.getOperand(2);
42190 
42191     KnownBits CondKnown = DAG.computeKnownBits(Cond, DemandedElts, Depth + 1);
42192     if (CondKnown.isNegative())
42193       return LHS;
42194     if (CondKnown.isNonNegative())
42195       return RHS;
42196     break;
42197   }
42198   case X86ISD::ANDNP: {
42199     // ANDNP = (~LHS & RHS);
42200     SDValue LHS = Op.getOperand(0);
42201     SDValue RHS = Op.getOperand(1);
42202 
42203     KnownBits LHSKnown = DAG.computeKnownBits(LHS, DemandedElts, Depth + 1);
42204     KnownBits RHSKnown = DAG.computeKnownBits(RHS, DemandedElts, Depth + 1);
42205 
42206     // If all of the demanded bits are known 0 on LHS and known 0 on RHS, then
42207     // the (inverted) LHS bits cannot contribute to the result of the 'andn' in
42208     // this context, so return RHS.
42209     if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero))
42210       return RHS;
42211     break;
42212   }
42213   }
42214 
42215   APInt ShuffleUndef, ShuffleZero;
42216   SmallVector<int, 16> ShuffleMask;
42217   SmallVector<SDValue, 2> ShuffleOps;
42218   if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
42219                              ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
42220     // If all the demanded elts are from one operand and are inline,
42221     // then we can use the operand directly.
42222     int NumOps = ShuffleOps.size();
42223     if (ShuffleMask.size() == (unsigned)NumElts &&
42224         llvm::all_of(ShuffleOps, [VT](SDValue V) {
42225           return VT.getSizeInBits() == V.getValueSizeInBits();
42226         })) {
42227 
42228       if (DemandedElts.isSubsetOf(ShuffleUndef))
42229         return DAG.getUNDEF(VT);
42230       if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
42231         return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
42232 
42233       // Bitmask that indicates which ops have only been accessed 'inline'.
42234       APInt IdentityOp = APInt::getAllOnes(NumOps);
42235       for (int i = 0; i != NumElts; ++i) {
42236         int M = ShuffleMask[i];
42237         if (!DemandedElts[i] || ShuffleUndef[i])
42238           continue;
42239         int OpIdx = M / NumElts;
42240         int EltIdx = M % NumElts;
42241         if (M < 0 || EltIdx != i) {
42242           IdentityOp.clearAllBits();
42243           break;
42244         }
42245         IdentityOp &= APInt::getOneBitSet(NumOps, OpIdx);
42246         if (IdentityOp == 0)
42247           break;
42248       }
42249       assert((IdentityOp == 0 || IdentityOp.popcount() == 1) &&
42250              "Multiple identity shuffles detected");
42251 
42252       if (IdentityOp != 0)
42253         return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countr_zero()]);
42254     }
42255   }
42256 
42257   return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
42258       Op, DemandedBits, DemandedElts, DAG, Depth);
42259 }
42260 
42261 bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
42262     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
42263     bool PoisonOnly, unsigned Depth) const {
42264   unsigned EltsBits = Op.getScalarValueSizeInBits();
42265   unsigned NumElts = DemandedElts.getBitWidth();
42266 
42267   // TODO: Add more target shuffles.
42268   switch (Op.getOpcode()) {
42269   case X86ISD::PSHUFD:
42270   case X86ISD::VPERMILPI: {
42271     SmallVector<int, 8> Mask;
42272     DecodePSHUFMask(NumElts, EltsBits, Op.getConstantOperandVal(1), Mask);
42273 
42274     APInt DemandedSrcElts = APInt::getZero(NumElts);
42275     for (unsigned I = 0; I != NumElts; ++I)
42276       if (DemandedElts[I])
42277         DemandedSrcElts.setBit(Mask[I]);
42278 
42279     return DAG.isGuaranteedNotToBeUndefOrPoison(
42280         Op.getOperand(0), DemandedSrcElts, PoisonOnly, Depth + 1);
42281   }
42282   }
42283   return TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
42284       Op, DemandedElts, DAG, PoisonOnly, Depth);
42285 }
42286 
42287 bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode(
42288     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
42289     bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const {
42290 
42291   // TODO: Add more target shuffles.
42292   switch (Op.getOpcode()) {
42293   case X86ISD::PSHUFD:
42294   case X86ISD::VPERMILPI:
42295     return false;
42296   }
42297   return TargetLowering::canCreateUndefOrPoisonForTargetNode(
42298       Op, DemandedElts, DAG, PoisonOnly, ConsiderFlags, Depth);
42299 }
42300 
42301 bool X86TargetLowering::isSplatValueForTargetNode(SDValue Op,
42302                                                   const APInt &DemandedElts,
42303                                                   APInt &UndefElts,
42304                                                   const SelectionDAG &DAG,
42305                                                   unsigned Depth) const {
42306   unsigned NumElts = DemandedElts.getBitWidth();
42307   unsigned Opc = Op.getOpcode();
42308 
42309   switch (Opc) {
42310   case X86ISD::VBROADCAST:
42311   case X86ISD::VBROADCAST_LOAD:
42312     UndefElts = APInt::getZero(NumElts);
42313     return true;
42314   }
42315 
42316   return TargetLowering::isSplatValueForTargetNode(Op, DemandedElts, UndefElts,
42317                                                    DAG, Depth);
42318 }
42319 
42320 // Helper to peek through bitops/trunc/setcc to determine size of source vector.
42321 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
42322 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size,
42323                                       bool AllowTruncate) {
42324   switch (Src.getOpcode()) {
42325   case ISD::TRUNCATE:
42326     if (!AllowTruncate)
42327       return false;
42328     [[fallthrough]];
42329   case ISD::SETCC:
42330     return Src.getOperand(0).getValueSizeInBits() == Size;
42331   case ISD::AND:
42332   case ISD::XOR:
42333   case ISD::OR:
42334     return checkBitcastSrcVectorSize(Src.getOperand(0), Size, AllowTruncate) &&
42335            checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate);
42336   case ISD::SELECT:
42337   case ISD::VSELECT:
42338     return Src.getOperand(0).getScalarValueSizeInBits() == 1 &&
42339            checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate) &&
42340            checkBitcastSrcVectorSize(Src.getOperand(2), Size, AllowTruncate);
42341   case ISD::BUILD_VECTOR:
42342     return ISD::isBuildVectorAllZeros(Src.getNode()) ||
42343            ISD::isBuildVectorAllOnes(Src.getNode());
42344   }
42345   return false;
42346 }
42347 
42348 // Helper to flip between AND/OR/XOR opcodes and their X86ISD FP equivalents.
42349 static unsigned getAltBitOpcode(unsigned Opcode) {
42350   switch(Opcode) {
42351   case ISD::AND: return X86ISD::FAND;
42352   case ISD::OR: return X86ISD::FOR;
42353   case ISD::XOR: return X86ISD::FXOR;
42354   case X86ISD::ANDNP: return X86ISD::FANDN;
42355   }
42356   llvm_unreachable("Unknown bitwise opcode");
42357 }
42358 
42359 // Helper to adjust v4i32 MOVMSK expansion to work with SSE1-only targets.
42360 static SDValue adjustBitcastSrcVectorSSE1(SelectionDAG &DAG, SDValue Src,
42361                                           const SDLoc &DL) {
42362   EVT SrcVT = Src.getValueType();
42363   if (SrcVT != MVT::v4i1)
42364     return SDValue();
42365 
42366   switch (Src.getOpcode()) {
42367   case ISD::SETCC:
42368     if (Src.getOperand(0).getValueType() == MVT::v4i32 &&
42369         ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode()) &&
42370         cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT) {
42371       SDValue Op0 = Src.getOperand(0);
42372       if (ISD::isNormalLoad(Op0.getNode()))
42373         return DAG.getBitcast(MVT::v4f32, Op0);
42374       if (Op0.getOpcode() == ISD::BITCAST &&
42375           Op0.getOperand(0).getValueType() == MVT::v4f32)
42376         return Op0.getOperand(0);
42377     }
42378     break;
42379   case ISD::AND:
42380   case ISD::XOR:
42381   case ISD::OR: {
42382     SDValue Op0 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(0), DL);
42383     SDValue Op1 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(1), DL);
42384     if (Op0 && Op1)
42385       return DAG.getNode(getAltBitOpcode(Src.getOpcode()), DL, MVT::v4f32, Op0,
42386                          Op1);
42387     break;
42388   }
42389   }
42390   return SDValue();
42391 }
42392 
42393 // Helper to push sign extension of vXi1 SETCC result through bitops.
42394 static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
42395                                           SDValue Src, const SDLoc &DL) {
42396   switch (Src.getOpcode()) {
42397   case ISD::SETCC:
42398   case ISD::TRUNCATE:
42399   case ISD::BUILD_VECTOR:
42400     return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
42401   case ISD::AND:
42402   case ISD::XOR:
42403   case ISD::OR:
42404     return DAG.getNode(
42405         Src.getOpcode(), DL, SExtVT,
42406         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
42407         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
42408   case ISD::SELECT:
42409   case ISD::VSELECT:
42410     return DAG.getSelect(
42411         DL, SExtVT, Src.getOperand(0),
42412         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL),
42413         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(2), DL));
42414   }
42415   llvm_unreachable("Unexpected node type for vXi1 sign extension");
42416 }
42417 
42418 // Try to match patterns such as
42419 // (i16 bitcast (v16i1 x))
42420 // ->
42421 // (i16 movmsk (16i8 sext (v16i1 x)))
42422 // before the illegal vector is scalarized on subtargets that don't have legal
42423 // vxi1 types.
42424 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
42425                                   const SDLoc &DL,
42426                                   const X86Subtarget &Subtarget) {
42427   EVT SrcVT = Src.getValueType();
42428   if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
42429     return SDValue();
42430 
42431   // Recognize the IR pattern for the movmsk intrinsic under SSE1 before type
42432   // legalization destroys the v4i32 type.
42433   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2()) {
42434     if (SDValue V = adjustBitcastSrcVectorSSE1(DAG, Src, DL)) {
42435       V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32,
42436                       DAG.getBitcast(MVT::v4f32, V));
42437       return DAG.getZExtOrTrunc(V, DL, VT);
42438     }
42439   }
42440 
42441   // If the input is a truncate from v16i8 or v32i8 go ahead and use a
42442   // movmskb even with avx512. This will be better than truncating to vXi1 and
42443   // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
42444   // vpcmpeqb/vpcmpgtb.
42445   bool PreferMovMsk = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
42446                       (Src.getOperand(0).getValueType() == MVT::v16i8 ||
42447                        Src.getOperand(0).getValueType() == MVT::v32i8 ||
42448                        Src.getOperand(0).getValueType() == MVT::v64i8);
42449 
42450   // Prefer movmsk for AVX512 for (bitcast (setlt X, 0)) which can be handled
42451   // directly with vpmovmskb/vmovmskps/vmovmskpd.
42452   if (Src.getOpcode() == ISD::SETCC && Src.hasOneUse() &&
42453       cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT &&
42454       ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode())) {
42455     EVT CmpVT = Src.getOperand(0).getValueType();
42456     EVT EltVT = CmpVT.getVectorElementType();
42457     if (CmpVT.getSizeInBits() <= 256 &&
42458         (EltVT == MVT::i8 || EltVT == MVT::i32 || EltVT == MVT::i64))
42459       PreferMovMsk = true;
42460   }
42461 
42462   // With AVX512 vxi1 types are legal and we prefer using k-regs.
42463   // MOVMSK is supported in SSE2 or later.
42464   if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !PreferMovMsk))
42465     return SDValue();
42466 
42467   // If the upper ops of a concatenation are undef, then try to bitcast the
42468   // lower op and extend.
42469   SmallVector<SDValue, 4> SubSrcOps;
42470   if (collectConcatOps(Src.getNode(), SubSrcOps, DAG) &&
42471       SubSrcOps.size() >= 2) {
42472     SDValue LowerOp = SubSrcOps[0];
42473     ArrayRef<SDValue> UpperOps(std::next(SubSrcOps.begin()), SubSrcOps.end());
42474     if (LowerOp.getOpcode() == ISD::SETCC &&
42475         all_of(UpperOps, [](SDValue Op) { return Op.isUndef(); })) {
42476       EVT SubVT = VT.getIntegerVT(
42477           *DAG.getContext(), LowerOp.getValueType().getVectorMinNumElements());
42478       if (SDValue V = combineBitcastvxi1(DAG, SubVT, LowerOp, DL, Subtarget)) {
42479         EVT IntVT = VT.getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
42480         return DAG.getBitcast(VT, DAG.getNode(ISD::ANY_EXTEND, DL, IntVT, V));
42481       }
42482     }
42483   }
42484 
42485   // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
42486   // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
42487   // v8i16 and v16i16.
42488   // For these two cases, we can shuffle the upper element bytes to a
42489   // consecutive sequence at the start of the vector and treat the results as
42490   // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
42491   // for v16i16 this is not the case, because the shuffle is expensive, so we
42492   // avoid sign-extending to this type entirely.
42493   // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
42494   // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
42495   MVT SExtVT;
42496   bool PropagateSExt = false;
42497   switch (SrcVT.getSimpleVT().SimpleTy) {
42498   default:
42499     return SDValue();
42500   case MVT::v2i1:
42501     SExtVT = MVT::v2i64;
42502     break;
42503   case MVT::v4i1:
42504     SExtVT = MVT::v4i32;
42505     // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
42506     // sign-extend to a 256-bit operation to avoid truncation.
42507     if (Subtarget.hasAVX() &&
42508         checkBitcastSrcVectorSize(Src, 256, Subtarget.hasAVX2())) {
42509       SExtVT = MVT::v4i64;
42510       PropagateSExt = true;
42511     }
42512     break;
42513   case MVT::v8i1:
42514     SExtVT = MVT::v8i16;
42515     // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
42516     // sign-extend to a 256-bit operation to match the compare.
42517     // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
42518     // 256-bit because the shuffle is cheaper than sign extending the result of
42519     // the compare.
42520     if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256, true) ||
42521                                checkBitcastSrcVectorSize(Src, 512, true))) {
42522       SExtVT = MVT::v8i32;
42523       PropagateSExt = true;
42524     }
42525     break;
42526   case MVT::v16i1:
42527     SExtVT = MVT::v16i8;
42528     // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
42529     // it is not profitable to sign-extend to 256-bit because this will
42530     // require an extra cross-lane shuffle which is more expensive than
42531     // truncating the result of the compare to 128-bits.
42532     break;
42533   case MVT::v32i1:
42534     SExtVT = MVT::v32i8;
42535     break;
42536   case MVT::v64i1:
42537     // If we have AVX512F, but not AVX512BW and the input is truncated from
42538     // v64i8 checked earlier. Then split the input and make two pmovmskbs.
42539     if (Subtarget.hasAVX512()) {
42540       if (Subtarget.hasBWI())
42541         return SDValue();
42542       SExtVT = MVT::v64i8;
42543       break;
42544     }
42545     // Split if this is a <64 x i8> comparison result.
42546     if (checkBitcastSrcVectorSize(Src, 512, false)) {
42547       SExtVT = MVT::v64i8;
42548       break;
42549     }
42550     return SDValue();
42551   };
42552 
42553   SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
42554                             : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
42555 
42556   if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
42557     V = getPMOVMSKB(DL, V, DAG, Subtarget);
42558   } else {
42559     if (SExtVT == MVT::v8i16) {
42560       V = widenSubVector(V, false, Subtarget, DAG, DL, 256);
42561       V = DAG.getNode(ISD::TRUNCATE, DL, MVT::v16i8, V);
42562     }
42563     V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
42564   }
42565 
42566   EVT IntVT =
42567       EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
42568   V = DAG.getZExtOrTrunc(V, DL, IntVT);
42569   return DAG.getBitcast(VT, V);
42570 }
42571 
42572 // Convert a vXi1 constant build vector to the same width scalar integer.
42573 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
42574   EVT SrcVT = Op.getValueType();
42575   assert(SrcVT.getVectorElementType() == MVT::i1 &&
42576          "Expected a vXi1 vector");
42577   assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
42578          "Expected a constant build vector");
42579 
42580   APInt Imm(SrcVT.getVectorNumElements(), 0);
42581   for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
42582     SDValue In = Op.getOperand(Idx);
42583     if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
42584       Imm.setBit(Idx);
42585   }
42586   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
42587   return DAG.getConstant(Imm, SDLoc(Op), IntVT);
42588 }
42589 
42590 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
42591                                            TargetLowering::DAGCombinerInfo &DCI,
42592                                            const X86Subtarget &Subtarget) {
42593   assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
42594 
42595   if (!DCI.isBeforeLegalizeOps())
42596     return SDValue();
42597 
42598   // Only do this if we have k-registers.
42599   if (!Subtarget.hasAVX512())
42600     return SDValue();
42601 
42602   EVT DstVT = N->getValueType(0);
42603   SDValue Op = N->getOperand(0);
42604   EVT SrcVT = Op.getValueType();
42605 
42606   if (!Op.hasOneUse())
42607     return SDValue();
42608 
42609   // Look for logic ops.
42610   if (Op.getOpcode() != ISD::AND &&
42611       Op.getOpcode() != ISD::OR &&
42612       Op.getOpcode() != ISD::XOR)
42613     return SDValue();
42614 
42615   // Make sure we have a bitcast between mask registers and a scalar type.
42616   if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
42617         DstVT.isScalarInteger()) &&
42618       !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
42619         SrcVT.isScalarInteger()))
42620     return SDValue();
42621 
42622   SDValue LHS = Op.getOperand(0);
42623   SDValue RHS = Op.getOperand(1);
42624 
42625   if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
42626       LHS.getOperand(0).getValueType() == DstVT)
42627     return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
42628                        DAG.getBitcast(DstVT, RHS));
42629 
42630   if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
42631       RHS.getOperand(0).getValueType() == DstVT)
42632     return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
42633                        DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
42634 
42635   // If the RHS is a vXi1 build vector, this is a good reason to flip too.
42636   // Most of these have to move a constant from the scalar domain anyway.
42637   if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
42638     RHS = combinevXi1ConstantToInteger(RHS, DAG);
42639     return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
42640                        DAG.getBitcast(DstVT, LHS), RHS);
42641   }
42642 
42643   return SDValue();
42644 }
42645 
42646 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
42647                                     const X86Subtarget &Subtarget) {
42648   SDLoc DL(BV);
42649   unsigned NumElts = BV->getNumOperands();
42650   SDValue Splat = BV->getSplatValue();
42651 
42652   // Build MMX element from integer GPR or SSE float values.
42653   auto CreateMMXElement = [&](SDValue V) {
42654     if (V.isUndef())
42655       return DAG.getUNDEF(MVT::x86mmx);
42656     if (V.getValueType().isFloatingPoint()) {
42657       if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
42658         V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
42659         V = DAG.getBitcast(MVT::v2i64, V);
42660         return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
42661       }
42662       V = DAG.getBitcast(MVT::i32, V);
42663     } else {
42664       V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
42665     }
42666     return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
42667   };
42668 
42669   // Convert build vector ops to MMX data in the bottom elements.
42670   SmallVector<SDValue, 8> Ops;
42671 
42672   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42673 
42674   // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
42675   if (Splat) {
42676     if (Splat.isUndef())
42677       return DAG.getUNDEF(MVT::x86mmx);
42678 
42679     Splat = CreateMMXElement(Splat);
42680 
42681     if (Subtarget.hasSSE1()) {
42682       // Unpack v8i8 to splat i8 elements to lowest 16-bits.
42683       if (NumElts == 8)
42684         Splat = DAG.getNode(
42685             ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
42686             DAG.getTargetConstant(Intrinsic::x86_mmx_punpcklbw, DL,
42687                                   TLI.getPointerTy(DAG.getDataLayout())),
42688             Splat, Splat);
42689 
42690       // Use PSHUFW to repeat 16-bit elements.
42691       unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
42692       return DAG.getNode(
42693           ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
42694           DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL,
42695                                 TLI.getPointerTy(DAG.getDataLayout())),
42696           Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
42697     }
42698     Ops.append(NumElts, Splat);
42699   } else {
42700     for (unsigned i = 0; i != NumElts; ++i)
42701       Ops.push_back(CreateMMXElement(BV->getOperand(i)));
42702   }
42703 
42704   // Use tree of PUNPCKLs to build up general MMX vector.
42705   while (Ops.size() > 1) {
42706     unsigned NumOps = Ops.size();
42707     unsigned IntrinOp =
42708         (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
42709                      : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
42710                                     : Intrinsic::x86_mmx_punpcklbw));
42711     SDValue Intrin = DAG.getTargetConstant(
42712         IntrinOp, DL, TLI.getPointerTy(DAG.getDataLayout()));
42713     for (unsigned i = 0; i != NumOps; i += 2)
42714       Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
42715                                Ops[i], Ops[i + 1]);
42716     Ops.resize(NumOps / 2);
42717   }
42718 
42719   return Ops[0];
42720 }
42721 
42722 // Recursive function that attempts to find if a bool vector node was originally
42723 // a vector/float/double that got truncated/extended/bitcast to/from a scalar
42724 // integer. If so, replace the scalar ops with bool vector equivalents back down
42725 // the chain.
42726 static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, const SDLoc &DL,
42727                                           SelectionDAG &DAG,
42728                                           const X86Subtarget &Subtarget) {
42729   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42730   unsigned Opc = V.getOpcode();
42731   switch (Opc) {
42732   case ISD::BITCAST: {
42733     // Bitcast from a vector/float/double, we can cheaply bitcast to VT.
42734     SDValue Src = V.getOperand(0);
42735     EVT SrcVT = Src.getValueType();
42736     if (SrcVT.isVector() || SrcVT.isFloatingPoint())
42737       return DAG.getBitcast(VT, Src);
42738     break;
42739   }
42740   case ISD::TRUNCATE: {
42741     // If we find a suitable source, a truncated scalar becomes a subvector.
42742     SDValue Src = V.getOperand(0);
42743     EVT NewSrcVT =
42744         EVT::getVectorVT(*DAG.getContext(), MVT::i1, Src.getValueSizeInBits());
42745     if (TLI.isTypeLegal(NewSrcVT))
42746       if (SDValue N0 =
42747               combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
42748         return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N0,
42749                            DAG.getIntPtrConstant(0, DL));
42750     break;
42751   }
42752   case ISD::ANY_EXTEND:
42753   case ISD::ZERO_EXTEND: {
42754     // If we find a suitable source, an extended scalar becomes a subvector.
42755     SDValue Src = V.getOperand(0);
42756     EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
42757                                     Src.getScalarValueSizeInBits());
42758     if (TLI.isTypeLegal(NewSrcVT))
42759       if (SDValue N0 =
42760               combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
42761         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
42762                            Opc == ISD::ANY_EXTEND ? DAG.getUNDEF(VT)
42763                                                   : DAG.getConstant(0, DL, VT),
42764                            N0, DAG.getIntPtrConstant(0, DL));
42765     break;
42766   }
42767   case ISD::OR: {
42768     // If we find suitable sources, we can just move an OR to the vector domain.
42769     SDValue Src0 = V.getOperand(0);
42770     SDValue Src1 = V.getOperand(1);
42771     if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
42772       if (SDValue N1 = combineBitcastToBoolVector(VT, Src1, DL, DAG, Subtarget))
42773         return DAG.getNode(Opc, DL, VT, N0, N1);
42774     break;
42775   }
42776   case ISD::SHL: {
42777     // If we find a suitable source, a SHL becomes a KSHIFTL.
42778     SDValue Src0 = V.getOperand(0);
42779     if ((VT == MVT::v8i1 && !Subtarget.hasDQI()) ||
42780         ((VT == MVT::v32i1 || VT == MVT::v64i1) && !Subtarget.hasBWI()))
42781       break;
42782 
42783     if (auto *Amt = dyn_cast<ConstantSDNode>(V.getOperand(1)))
42784       if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
42785         return DAG.getNode(
42786             X86ISD::KSHIFTL, DL, VT, N0,
42787             DAG.getTargetConstant(Amt->getZExtValue(), DL, MVT::i8));
42788     break;
42789   }
42790   }
42791   return SDValue();
42792 }
42793 
42794 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
42795                               TargetLowering::DAGCombinerInfo &DCI,
42796                               const X86Subtarget &Subtarget) {
42797   SDValue N0 = N->getOperand(0);
42798   EVT VT = N->getValueType(0);
42799   EVT SrcVT = N0.getValueType();
42800   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42801 
42802   // Try to match patterns such as
42803   // (i16 bitcast (v16i1 x))
42804   // ->
42805   // (i16 movmsk (16i8 sext (v16i1 x)))
42806   // before the setcc result is scalarized on subtargets that don't have legal
42807   // vxi1 types.
42808   if (DCI.isBeforeLegalize()) {
42809     SDLoc dl(N);
42810     if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
42811       return V;
42812 
42813     // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
42814     // type, widen both sides to avoid a trip through memory.
42815     if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
42816         Subtarget.hasAVX512()) {
42817       N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
42818       N0 = DAG.getBitcast(MVT::v8i1, N0);
42819       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
42820                          DAG.getIntPtrConstant(0, dl));
42821     }
42822 
42823     // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
42824     // type, widen both sides to avoid a trip through memory.
42825     if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
42826         Subtarget.hasAVX512()) {
42827       // Use zeros for the widening if we already have some zeroes. This can
42828       // allow SimplifyDemandedBits to remove scalar ANDs that may be down
42829       // stream of this.
42830       // FIXME: It might make sense to detect a concat_vectors with a mix of
42831       // zeroes and undef and turn it into insert_subvector for i1 vectors as
42832       // a separate combine. What we can't do is canonicalize the operands of
42833       // such a concat or we'll get into a loop with SimplifyDemandedBits.
42834       if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
42835         SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
42836         if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
42837           SrcVT = LastOp.getValueType();
42838           unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
42839           SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
42840           Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
42841           N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
42842           N0 = DAG.getBitcast(MVT::i8, N0);
42843           return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
42844         }
42845       }
42846 
42847       unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
42848       SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
42849       Ops[0] = N0;
42850       N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
42851       N0 = DAG.getBitcast(MVT::i8, N0);
42852       return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
42853     }
42854   } else {
42855     // If we're bitcasting from iX to vXi1, see if the integer originally
42856     // began as a vXi1 and whether we can remove the bitcast entirely.
42857     if (VT.isVector() && VT.getScalarType() == MVT::i1 &&
42858         SrcVT.isScalarInteger() && TLI.isTypeLegal(VT)) {
42859       if (SDValue V =
42860               combineBitcastToBoolVector(VT, N0, SDLoc(N), DAG, Subtarget))
42861         return V;
42862     }
42863   }
42864 
42865   // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
42866   // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
42867   // due to insert_subvector legalization on KNL. By promoting the copy to i16
42868   // we can help with known bits propagation from the vXi1 domain to the
42869   // scalar domain.
42870   if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
42871       !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
42872       N0.getOperand(0).getValueType() == MVT::v16i1 &&
42873       isNullConstant(N0.getOperand(1)))
42874     return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
42875                        DAG.getBitcast(MVT::i16, N0.getOperand(0)));
42876 
42877   // Canonicalize (bitcast (vbroadcast_load)) so that the output of the bitcast
42878   // and the vbroadcast_load are both integer or both fp. In some cases this
42879   // will remove the bitcast entirely.
42880   if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
42881        VT.isFloatingPoint() != SrcVT.isFloatingPoint() && VT.isVector()) {
42882     auto *BCast = cast<MemIntrinsicSDNode>(N0);
42883     unsigned SrcVTSize = SrcVT.getScalarSizeInBits();
42884     unsigned MemSize = BCast->getMemoryVT().getScalarSizeInBits();
42885     // Don't swap i8/i16 since don't have fp types that size.
42886     if (MemSize >= 32) {
42887       MVT MemVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(MemSize)
42888                                        : MVT::getIntegerVT(MemSize);
42889       MVT LoadVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(SrcVTSize)
42890                                         : MVT::getIntegerVT(SrcVTSize);
42891       LoadVT = MVT::getVectorVT(LoadVT, SrcVT.getVectorNumElements());
42892 
42893       SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
42894       SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
42895       SDValue ResNode =
42896           DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
42897                                   MemVT, BCast->getMemOperand());
42898       DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
42899       return DAG.getBitcast(VT, ResNode);
42900     }
42901   }
42902 
42903   // Since MMX types are special and don't usually play with other vector types,
42904   // it's better to handle them early to be sure we emit efficient code by
42905   // avoiding store-load conversions.
42906   if (VT == MVT::x86mmx) {
42907     // Detect MMX constant vectors.
42908     APInt UndefElts;
42909     SmallVector<APInt, 1> EltBits;
42910     if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
42911       SDLoc DL(N0);
42912       // Handle zero-extension of i32 with MOVD.
42913       if (EltBits[0].countl_zero() >= 32)
42914         return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
42915                            DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
42916       // Else, bitcast to a double.
42917       // TODO - investigate supporting sext 32-bit immediates on x86_64.
42918       APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
42919       return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
42920     }
42921 
42922     // Detect bitcasts to x86mmx low word.
42923     if (N0.getOpcode() == ISD::BUILD_VECTOR &&
42924         (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
42925         N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
42926       bool LowUndef = true, AllUndefOrZero = true;
42927       for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
42928         SDValue Op = N0.getOperand(i);
42929         LowUndef &= Op.isUndef() || (i >= e/2);
42930         AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
42931       }
42932       if (AllUndefOrZero) {
42933         SDValue N00 = N0.getOperand(0);
42934         SDLoc dl(N00);
42935         N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
42936                        : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
42937         return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
42938       }
42939     }
42940 
42941     // Detect bitcasts of 64-bit build vectors and convert to a
42942     // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
42943     // lowest element.
42944     if (N0.getOpcode() == ISD::BUILD_VECTOR &&
42945         (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
42946          SrcVT == MVT::v8i8))
42947       return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
42948 
42949     // Detect bitcasts between element or subvector extraction to x86mmx.
42950     if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
42951          N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
42952         isNullConstant(N0.getOperand(1))) {
42953       SDValue N00 = N0.getOperand(0);
42954       if (N00.getValueType().is128BitVector())
42955         return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
42956                            DAG.getBitcast(MVT::v2i64, N00));
42957     }
42958 
42959     // Detect bitcasts from FP_TO_SINT to x86mmx.
42960     if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
42961       SDLoc DL(N0);
42962       SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
42963                                 DAG.getUNDEF(MVT::v2i32));
42964       return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
42965                          DAG.getBitcast(MVT::v2i64, Res));
42966     }
42967   }
42968 
42969   // Try to remove a bitcast of constant vXi1 vector. We have to legalize
42970   // most of these to scalar anyway.
42971   if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
42972       SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
42973       ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
42974     return combinevXi1ConstantToInteger(N0, DAG);
42975   }
42976 
42977   if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
42978       VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
42979       isa<ConstantSDNode>(N0)) {
42980     auto *C = cast<ConstantSDNode>(N0);
42981     if (C->isAllOnes())
42982       return DAG.getConstant(1, SDLoc(N0), VT);
42983     if (C->isZero())
42984       return DAG.getConstant(0, SDLoc(N0), VT);
42985   }
42986 
42987   // Look for MOVMSK that is maybe truncated and then bitcasted to vXi1.
42988   // Turn it into a sign bit compare that produces a k-register. This avoids
42989   // a trip through a GPR.
42990   if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
42991       VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
42992       isPowerOf2_32(VT.getVectorNumElements())) {
42993     unsigned NumElts = VT.getVectorNumElements();
42994     SDValue Src = N0;
42995 
42996     // Peek through truncate.
42997     if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
42998       Src = N0.getOperand(0);
42999 
43000     if (Src.getOpcode() == X86ISD::MOVMSK && Src.hasOneUse()) {
43001       SDValue MovmskIn = Src.getOperand(0);
43002       MVT MovmskVT = MovmskIn.getSimpleValueType();
43003       unsigned MovMskElts = MovmskVT.getVectorNumElements();
43004 
43005       // We allow extra bits of the movmsk to be used since they are known zero.
43006       // We can't convert a VPMOVMSKB without avx512bw.
43007       if (MovMskElts <= NumElts &&
43008           (Subtarget.hasBWI() || MovmskVT.getVectorElementType() != MVT::i8)) {
43009         EVT IntVT = EVT(MovmskVT).changeVectorElementTypeToInteger();
43010         MovmskIn = DAG.getBitcast(IntVT, MovmskIn);
43011         SDLoc dl(N);
43012         MVT CmpVT = MVT::getVectorVT(MVT::i1, MovMskElts);
43013         SDValue Cmp = DAG.getSetCC(dl, CmpVT, MovmskIn,
43014                                    DAG.getConstant(0, dl, IntVT), ISD::SETLT);
43015         if (EVT(CmpVT) == VT)
43016           return Cmp;
43017 
43018         // Pad with zeroes up to original VT to replace the zeroes that were
43019         // being used from the MOVMSK.
43020         unsigned NumConcats = NumElts / MovMskElts;
43021         SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, CmpVT));
43022         Ops[0] = Cmp;
43023         return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Ops);
43024       }
43025     }
43026   }
43027 
43028   // Try to remove bitcasts from input and output of mask arithmetic to
43029   // remove GPR<->K-register crossings.
43030   if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
43031     return V;
43032 
43033   // Convert a bitcasted integer logic operation that has one bitcasted
43034   // floating-point operand into a floating-point logic operation. This may
43035   // create a load of a constant, but that is cheaper than materializing the
43036   // constant in an integer register and transferring it to an SSE register or
43037   // transferring the SSE operand to integer register and back.
43038   unsigned FPOpcode;
43039   switch (N0.getOpcode()) {
43040     case ISD::AND: FPOpcode = X86ISD::FAND; break;
43041     case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
43042     case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
43043     default: return SDValue();
43044   }
43045 
43046   // Check if we have a bitcast from another integer type as well.
43047   if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
43048         (Subtarget.hasSSE2() && VT == MVT::f64) ||
43049         (Subtarget.hasFP16() && VT == MVT::f16) ||
43050         (Subtarget.hasSSE2() && VT.isInteger() && VT.isVector() &&
43051          TLI.isTypeLegal(VT))))
43052     return SDValue();
43053 
43054   SDValue LogicOp0 = N0.getOperand(0);
43055   SDValue LogicOp1 = N0.getOperand(1);
43056   SDLoc DL0(N0);
43057 
43058   // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
43059   if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
43060       LogicOp0.hasOneUse() && LogicOp0.getOperand(0).hasOneUse() &&
43061       LogicOp0.getOperand(0).getValueType() == VT &&
43062       !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
43063     SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
43064     unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
43065     return DAG.getNode(Opcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
43066   }
43067   // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
43068   if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
43069       LogicOp1.hasOneUse() && LogicOp1.getOperand(0).hasOneUse() &&
43070       LogicOp1.getOperand(0).getValueType() == VT &&
43071       !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
43072     SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
43073     unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
43074     return DAG.getNode(Opcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
43075   }
43076 
43077   return SDValue();
43078 }
43079 
43080 // (mul (zext a), (sext, b))
43081 static bool detectExtMul(SelectionDAG &DAG, const SDValue &Mul, SDValue &Op0,
43082                          SDValue &Op1) {
43083   Op0 = Mul.getOperand(0);
43084   Op1 = Mul.getOperand(1);
43085 
43086   // The operand1 should be signed extend
43087   if (Op0.getOpcode() == ISD::SIGN_EXTEND)
43088     std::swap(Op0, Op1);
43089 
43090   auto IsFreeTruncation = [](SDValue &Op) -> bool {
43091     if ((Op.getOpcode() == ISD::ZERO_EXTEND ||
43092          Op.getOpcode() == ISD::SIGN_EXTEND) &&
43093         Op.getOperand(0).getScalarValueSizeInBits() <= 8)
43094       return true;
43095 
43096     auto *BV = dyn_cast<BuildVectorSDNode>(Op);
43097     return (BV && BV->isConstant());
43098   };
43099 
43100   // (dpbusd (zext a), (sext, b)). Since the first operand should be unsigned
43101   // value, we need to check Op0 is zero extended value. Op1 should be signed
43102   // value, so we just check the signed bits.
43103   if ((IsFreeTruncation(Op0) &&
43104        DAG.computeKnownBits(Op0).countMaxActiveBits() <= 8) &&
43105       (IsFreeTruncation(Op1) && DAG.ComputeMaxSignificantBits(Op1) <= 8))
43106     return true;
43107 
43108   return false;
43109 }
43110 
43111 // Given a ABS node, detect the following pattern:
43112 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
43113 // This is useful as it is the input into a SAD pattern.
43114 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
43115   SDValue AbsOp1 = Abs->getOperand(0);
43116   if (AbsOp1.getOpcode() != ISD::SUB)
43117     return false;
43118 
43119   Op0 = AbsOp1.getOperand(0);
43120   Op1 = AbsOp1.getOperand(1);
43121 
43122   // Check if the operands of the sub are zero-extended from vectors of i8.
43123   if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
43124       Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
43125       Op1.getOpcode() != ISD::ZERO_EXTEND ||
43126       Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
43127     return false;
43128 
43129   return true;
43130 }
43131 
43132 static SDValue createVPDPBUSD(SelectionDAG &DAG, SDValue LHS, SDValue RHS,
43133                               unsigned &LogBias, const SDLoc &DL,
43134                               const X86Subtarget &Subtarget) {
43135   // Extend or truncate to MVT::i8 first.
43136   MVT Vi8VT =
43137       MVT::getVectorVT(MVT::i8, LHS.getValueType().getVectorElementCount());
43138   LHS = DAG.getZExtOrTrunc(LHS, DL, Vi8VT);
43139   RHS = DAG.getSExtOrTrunc(RHS, DL, Vi8VT);
43140 
43141   // VPDPBUSD(<16 x i32>C, <16 x i8>A, <16 x i8>B). For each dst element
43142   // C[0] = C[0] + A[0]B[0] + A[1]B[1] + A[2]B[2] + A[3]B[3].
43143   // The src A, B element type is i8, but the dst C element type is i32.
43144   // When we calculate the reduce stage, we use src vector type vXi8 for it
43145   // so we need logbias 2 to avoid extra 2 stages.
43146   LogBias = 2;
43147 
43148   unsigned RegSize = std::max(128u, (unsigned)Vi8VT.getSizeInBits());
43149   if (Subtarget.hasVNNI() && !Subtarget.hasVLX())
43150     RegSize = std::max(512u, RegSize);
43151 
43152   // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
43153   // fill in the missing vector elements with 0.
43154   unsigned NumConcat = RegSize / Vi8VT.getSizeInBits();
43155   SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, Vi8VT));
43156   Ops[0] = LHS;
43157   MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
43158   SDValue DpOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
43159   Ops[0] = RHS;
43160   SDValue DpOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
43161 
43162   // Actually build the DotProduct, split as 256/512 bits for
43163   // AVXVNNI/AVX512VNNI.
43164   auto DpBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43165                        ArrayRef<SDValue> Ops) {
43166     MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
43167     return DAG.getNode(X86ISD::VPDPBUSD, DL, VT, Ops);
43168   };
43169   MVT DpVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
43170   SDValue Zero = DAG.getConstant(0, DL, DpVT);
43171 
43172   return SplitOpsAndApply(DAG, Subtarget, DL, DpVT, {Zero, DpOp0, DpOp1},
43173                           DpBuilder, false);
43174 }
43175 
43176 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
43177 // to these zexts.
43178 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
43179                             const SDValue &Zext1, const SDLoc &DL,
43180                             const X86Subtarget &Subtarget) {
43181   // Find the appropriate width for the PSADBW.
43182   EVT InVT = Zext0.getOperand(0).getValueType();
43183   unsigned RegSize = std::max(128u, (unsigned)InVT.getSizeInBits());
43184 
43185   // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
43186   // fill in the missing vector elements with 0.
43187   unsigned NumConcat = RegSize / InVT.getSizeInBits();
43188   SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
43189   Ops[0] = Zext0.getOperand(0);
43190   MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
43191   SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
43192   Ops[0] = Zext1.getOperand(0);
43193   SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
43194 
43195   // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
43196   auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43197                           ArrayRef<SDValue> Ops) {
43198     MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
43199     return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
43200   };
43201   MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
43202   return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
43203                           PSADBWBuilder);
43204 }
43205 
43206 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
43207 // PHMINPOSUW.
43208 static SDValue combineMinMaxReduction(SDNode *Extract, SelectionDAG &DAG,
43209                                       const X86Subtarget &Subtarget) {
43210   // Bail without SSE41.
43211   if (!Subtarget.hasSSE41())
43212     return SDValue();
43213 
43214   EVT ExtractVT = Extract->getValueType(0);
43215   if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
43216     return SDValue();
43217 
43218   // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
43219   ISD::NodeType BinOp;
43220   SDValue Src = DAG.matchBinOpReduction(
43221       Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
43222   if (!Src)
43223     return SDValue();
43224 
43225   EVT SrcVT = Src.getValueType();
43226   EVT SrcSVT = SrcVT.getScalarType();
43227   if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
43228     return SDValue();
43229 
43230   SDLoc DL(Extract);
43231   SDValue MinPos = Src;
43232 
43233   // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
43234   while (SrcVT.getSizeInBits() > 128) {
43235     SDValue Lo, Hi;
43236     std::tie(Lo, Hi) = splitVector(MinPos, DAG, DL);
43237     SrcVT = Lo.getValueType();
43238     MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
43239   }
43240   assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
43241           (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
43242          "Unexpected value type");
43243 
43244   // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
43245   // to flip the value accordingly.
43246   SDValue Mask;
43247   unsigned MaskEltsBits = ExtractVT.getSizeInBits();
43248   if (BinOp == ISD::SMAX)
43249     Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
43250   else if (BinOp == ISD::SMIN)
43251     Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
43252   else if (BinOp == ISD::UMAX)
43253     Mask = DAG.getAllOnesConstant(DL, SrcVT);
43254 
43255   if (Mask)
43256     MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
43257 
43258   // For v16i8 cases we need to perform UMIN on pairs of byte elements,
43259   // shuffling each upper element down and insert zeros. This means that the
43260   // v16i8 UMIN will leave the upper element as zero, performing zero-extension
43261   // ready for the PHMINPOS.
43262   if (ExtractVT == MVT::i8) {
43263     SDValue Upper = DAG.getVectorShuffle(
43264         SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
43265         {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
43266     MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
43267   }
43268 
43269   // Perform the PHMINPOS on a v8i16 vector,
43270   MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
43271   MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
43272   MinPos = DAG.getBitcast(SrcVT, MinPos);
43273 
43274   if (Mask)
43275     MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
43276 
43277   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
43278                      DAG.getIntPtrConstant(0, DL));
43279 }
43280 
43281 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
43282 static SDValue combinePredicateReduction(SDNode *Extract, SelectionDAG &DAG,
43283                                          const X86Subtarget &Subtarget) {
43284   // Bail without SSE2.
43285   if (!Subtarget.hasSSE2())
43286     return SDValue();
43287 
43288   EVT ExtractVT = Extract->getValueType(0);
43289   unsigned BitWidth = ExtractVT.getSizeInBits();
43290   if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
43291       ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
43292     return SDValue();
43293 
43294   // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
43295   ISD::NodeType BinOp;
43296   SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
43297   if (!Match && ExtractVT == MVT::i1)
43298     Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
43299   if (!Match)
43300     return SDValue();
43301 
43302   // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
43303   // which we can't support here for now.
43304   if (Match.getScalarValueSizeInBits() != BitWidth)
43305     return SDValue();
43306 
43307   SDValue Movmsk;
43308   SDLoc DL(Extract);
43309   EVT MatchVT = Match.getValueType();
43310   unsigned NumElts = MatchVT.getVectorNumElements();
43311   unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
43312   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43313   LLVMContext &Ctx = *DAG.getContext();
43314 
43315   if (ExtractVT == MVT::i1) {
43316     // Special case for (pre-legalization) vXi1 reductions.
43317     if (NumElts > 64 || !isPowerOf2_32(NumElts))
43318       return SDValue();
43319     if (Match.getOpcode() == ISD::SETCC) {
43320       ISD::CondCode CC = cast<CondCodeSDNode>(Match.getOperand(2))->get();
43321       if ((BinOp == ISD::AND && CC == ISD::CondCode::SETEQ) ||
43322           (BinOp == ISD::OR && CC == ISD::CondCode::SETNE)) {
43323         // For all_of(setcc(x,y,eq)) - use (iX)x == (iX)y.
43324         // For any_of(setcc(x,y,ne)) - use (iX)x != (iX)y.
43325         X86::CondCode X86CC;
43326         SDValue LHS = DAG.getFreeze(Match.getOperand(0));
43327         SDValue RHS = DAG.getFreeze(Match.getOperand(1));
43328         APInt Mask = APInt::getAllOnes(LHS.getScalarValueSizeInBits());
43329         if (SDValue V = LowerVectorAllEqual(DL, LHS, RHS, CC, Mask, Subtarget,
43330                                             DAG, X86CC))
43331           return DAG.getNode(ISD::TRUNCATE, DL, ExtractVT,
43332                              getSETCC(X86CC, V, DL, DAG));
43333       }
43334     }
43335     if (TLI.isTypeLegal(MatchVT)) {
43336       // If this is a legal AVX512 predicate type then we can just bitcast.
43337       EVT MovmskVT = EVT::getIntegerVT(Ctx, NumElts);
43338       Movmsk = DAG.getBitcast(MovmskVT, Match);
43339     } else {
43340       // Use combineBitcastvxi1 to create the MOVMSK.
43341       while (NumElts > MaxElts) {
43342         SDValue Lo, Hi;
43343         std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
43344         Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
43345         NumElts /= 2;
43346       }
43347       EVT MovmskVT = EVT::getIntegerVT(Ctx, NumElts);
43348       Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
43349     }
43350     if (!Movmsk)
43351       return SDValue();
43352     Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
43353   } else {
43354     // FIXME: Better handling of k-registers or 512-bit vectors?
43355     unsigned MatchSizeInBits = Match.getValueSizeInBits();
43356     if (!(MatchSizeInBits == 128 ||
43357           (MatchSizeInBits == 256 && Subtarget.hasAVX())))
43358       return SDValue();
43359 
43360     // Make sure this isn't a vector of 1 element. The perf win from using
43361     // MOVMSK diminishes with less elements in the reduction, but it is
43362     // generally better to get the comparison over to the GPRs as soon as
43363     // possible to reduce the number of vector ops.
43364     if (Match.getValueType().getVectorNumElements() < 2)
43365       return SDValue();
43366 
43367     // Check that we are extracting a reduction of all sign bits.
43368     if (DAG.ComputeNumSignBits(Match) != BitWidth)
43369       return SDValue();
43370 
43371     if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
43372       SDValue Lo, Hi;
43373       std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
43374       Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
43375       MatchSizeInBits = Match.getValueSizeInBits();
43376     }
43377 
43378     // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
43379     MVT MaskSrcVT;
43380     if (64 == BitWidth || 32 == BitWidth)
43381       MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
43382                                    MatchSizeInBits / BitWidth);
43383     else
43384       MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
43385 
43386     SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
43387     Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
43388     NumElts = MaskSrcVT.getVectorNumElements();
43389   }
43390   assert((NumElts <= 32 || NumElts == 64) &&
43391          "Not expecting more than 64 elements");
43392 
43393   MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
43394   if (BinOp == ISD::XOR) {
43395     // parity -> (PARITY(MOVMSK X))
43396     SDValue Result = DAG.getNode(ISD::PARITY, DL, CmpVT, Movmsk);
43397     return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
43398   }
43399 
43400   SDValue CmpC;
43401   ISD::CondCode CondCode;
43402   if (BinOp == ISD::OR) {
43403     // any_of -> MOVMSK != 0
43404     CmpC = DAG.getConstant(0, DL, CmpVT);
43405     CondCode = ISD::CondCode::SETNE;
43406   } else {
43407     // all_of -> MOVMSK == ((1 << NumElts) - 1)
43408     CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
43409                            DL, CmpVT);
43410     CondCode = ISD::CondCode::SETEQ;
43411   }
43412 
43413   // The setcc produces an i8 of 0/1, so extend that to the result width and
43414   // negate to get the final 0/-1 mask value.
43415   EVT SetccVT = TLI.getSetCCResultType(DAG.getDataLayout(), Ctx, CmpVT);
43416   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
43417   SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
43418   SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
43419   return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
43420 }
43421 
43422 static SDValue combineVPDPBUSDPattern(SDNode *Extract, SelectionDAG &DAG,
43423                                       const X86Subtarget &Subtarget) {
43424   if (!Subtarget.hasVNNI() && !Subtarget.hasAVXVNNI())
43425     return SDValue();
43426 
43427   EVT ExtractVT = Extract->getValueType(0);
43428   // Verify the type we're extracting is i32, as the output element type of
43429   // vpdpbusd is i32.
43430   if (ExtractVT != MVT::i32)
43431     return SDValue();
43432 
43433   EVT VT = Extract->getOperand(0).getValueType();
43434   if (!isPowerOf2_32(VT.getVectorNumElements()))
43435     return SDValue();
43436 
43437   // Match shuffle + add pyramid.
43438   ISD::NodeType BinOp;
43439   SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
43440 
43441   // We can't combine to vpdpbusd for zext, because each of the 4 multiplies
43442   // done by vpdpbusd compute a signed 16-bit product that will be sign extended
43443   // before adding into the accumulator.
43444   // TODO:
43445   // We also need to verify that the multiply has at least 2x the number of bits
43446   // of the input. We shouldn't match
43447   // (sign_extend (mul (vXi9 (zext (vXi8 X))), (vXi9 (zext (vXi8 Y)))).
43448   // if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND))
43449   //   Root = Root.getOperand(0);
43450 
43451   // If there was a match, we want Root to be a mul.
43452   if (!Root || Root.getOpcode() != ISD::MUL)
43453     return SDValue();
43454 
43455   // Check whether we have an extend and mul pattern
43456   SDValue LHS, RHS;
43457   if (!detectExtMul(DAG, Root, LHS, RHS))
43458     return SDValue();
43459 
43460   // Create the dot product instruction.
43461   SDLoc DL(Extract);
43462   unsigned StageBias;
43463   SDValue DP = createVPDPBUSD(DAG, LHS, RHS, StageBias, DL, Subtarget);
43464 
43465   // If the original vector was wider than 4 elements, sum over the results
43466   // in the DP vector.
43467   unsigned Stages = Log2_32(VT.getVectorNumElements());
43468   EVT DpVT = DP.getValueType();
43469 
43470   if (Stages > StageBias) {
43471     unsigned DpElems = DpVT.getVectorNumElements();
43472 
43473     for (unsigned i = Stages - StageBias; i > 0; --i) {
43474       SmallVector<int, 16> Mask(DpElems, -1);
43475       for (unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
43476         Mask[j] = MaskEnd + j;
43477 
43478       SDValue Shuffle =
43479           DAG.getVectorShuffle(DpVT, DL, DP, DAG.getUNDEF(DpVT), Mask);
43480       DP = DAG.getNode(ISD::ADD, DL, DpVT, DP, Shuffle);
43481     }
43482   }
43483 
43484   // Return the lowest ExtractSizeInBits bits.
43485   EVT ResVT =
43486       EVT::getVectorVT(*DAG.getContext(), ExtractVT,
43487                        DpVT.getSizeInBits() / ExtractVT.getSizeInBits());
43488   DP = DAG.getBitcast(ResVT, DP);
43489   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, DP,
43490                      Extract->getOperand(1));
43491 }
43492 
43493 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
43494                                       const X86Subtarget &Subtarget) {
43495   // PSADBW is only supported on SSE2 and up.
43496   if (!Subtarget.hasSSE2())
43497     return SDValue();
43498 
43499   EVT ExtractVT = Extract->getValueType(0);
43500   // Verify the type we're extracting is either i32 or i64.
43501   // FIXME: Could support other types, but this is what we have coverage for.
43502   if (ExtractVT != MVT::i32 && ExtractVT != MVT::i64)
43503     return SDValue();
43504 
43505   EVT VT = Extract->getOperand(0).getValueType();
43506   if (!isPowerOf2_32(VT.getVectorNumElements()))
43507     return SDValue();
43508 
43509   // Match shuffle + add pyramid.
43510   ISD::NodeType BinOp;
43511   SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
43512 
43513   // The operand is expected to be zero extended from i8
43514   // (verified in detectZextAbsDiff).
43515   // In order to convert to i64 and above, additional any/zero/sign
43516   // extend is expected.
43517   // The zero extend from 32 bit has no mathematical effect on the result.
43518   // Also the sign extend is basically zero extend
43519   // (extends the sign bit which is zero).
43520   // So it is correct to skip the sign/zero extend instruction.
43521   if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
43522                Root.getOpcode() == ISD::ZERO_EXTEND ||
43523                Root.getOpcode() == ISD::ANY_EXTEND))
43524     Root = Root.getOperand(0);
43525 
43526   // If there was a match, we want Root to be a select that is the root of an
43527   // abs-diff pattern.
43528   if (!Root || Root.getOpcode() != ISD::ABS)
43529     return SDValue();
43530 
43531   // Check whether we have an abs-diff pattern feeding into the select.
43532   SDValue Zext0, Zext1;
43533   if (!detectZextAbsDiff(Root, Zext0, Zext1))
43534     return SDValue();
43535 
43536   // Create the SAD instruction.
43537   SDLoc DL(Extract);
43538   SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
43539 
43540   // If the original vector was wider than 8 elements, sum over the results
43541   // in the SAD vector.
43542   unsigned Stages = Log2_32(VT.getVectorNumElements());
43543   EVT SadVT = SAD.getValueType();
43544   if (Stages > 3) {
43545     unsigned SadElems = SadVT.getVectorNumElements();
43546 
43547     for(unsigned i = Stages - 3; i > 0; --i) {
43548       SmallVector<int, 16> Mask(SadElems, -1);
43549       for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
43550         Mask[j] = MaskEnd + j;
43551 
43552       SDValue Shuffle =
43553           DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
43554       SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
43555     }
43556   }
43557 
43558   unsigned ExtractSizeInBits = ExtractVT.getSizeInBits();
43559   // Return the lowest ExtractSizeInBits bits.
43560   EVT ResVT = EVT::getVectorVT(*DAG.getContext(), ExtractVT,
43561                                SadVT.getSizeInBits() / ExtractSizeInBits);
43562   SAD = DAG.getBitcast(ResVT, SAD);
43563   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, SAD,
43564                      Extract->getOperand(1));
43565 }
43566 
43567 // Attempt to peek through a target shuffle and extract the scalar from the
43568 // source.
43569 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
43570                                          TargetLowering::DAGCombinerInfo &DCI,
43571                                          const X86Subtarget &Subtarget) {
43572   if (DCI.isBeforeLegalizeOps())
43573     return SDValue();
43574 
43575   SDLoc dl(N);
43576   SDValue Src = N->getOperand(0);
43577   SDValue Idx = N->getOperand(1);
43578 
43579   EVT VT = N->getValueType(0);
43580   EVT SrcVT = Src.getValueType();
43581   EVT SrcSVT = SrcVT.getVectorElementType();
43582   unsigned SrcEltBits = SrcSVT.getSizeInBits();
43583   unsigned NumSrcElts = SrcVT.getVectorNumElements();
43584 
43585   // Don't attempt this for boolean mask vectors or unknown extraction indices.
43586   if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
43587     return SDValue();
43588 
43589   const APInt &IdxC = N->getConstantOperandAPInt(1);
43590   if (IdxC.uge(NumSrcElts))
43591     return SDValue();
43592 
43593   SDValue SrcBC = peekThroughBitcasts(Src);
43594 
43595   // Handle extract(bitcast(broadcast(scalar_value))).
43596   if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
43597     SDValue SrcOp = SrcBC.getOperand(0);
43598     EVT SrcOpVT = SrcOp.getValueType();
43599     if (SrcOpVT.isScalarInteger() && VT.isInteger() &&
43600         (SrcOpVT.getSizeInBits() % SrcEltBits) == 0) {
43601       unsigned Scale = SrcOpVT.getSizeInBits() / SrcEltBits;
43602       unsigned Offset = IdxC.urem(Scale) * SrcEltBits;
43603       // TODO support non-zero offsets.
43604       if (Offset == 0) {
43605         SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, SrcVT.getScalarType());
43606         SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, VT);
43607         return SrcOp;
43608       }
43609     }
43610   }
43611 
43612   // If we're extracting a single element from a broadcast load and there are
43613   // no other users, just create a single load.
43614   if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
43615     auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
43616     unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
43617     if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
43618         VT.getSizeInBits() == SrcBCWidth && SrcEltBits == SrcBCWidth) {
43619       SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
43620                                  MemIntr->getBasePtr(),
43621                                  MemIntr->getPointerInfo(),
43622                                  MemIntr->getOriginalAlign(),
43623                                  MemIntr->getMemOperand()->getFlags());
43624       DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
43625       return Load;
43626     }
43627   }
43628 
43629   // Handle extract(bitcast(scalar_to_vector(scalar_value))) for integers.
43630   // TODO: Move to DAGCombine?
43631   if (SrcBC.getOpcode() == ISD::SCALAR_TO_VECTOR && VT.isInteger() &&
43632       SrcBC.getValueType().isInteger() &&
43633       (SrcBC.getScalarValueSizeInBits() % SrcEltBits) == 0 &&
43634       SrcBC.getScalarValueSizeInBits() ==
43635           SrcBC.getOperand(0).getValueSizeInBits()) {
43636     unsigned Scale = SrcBC.getScalarValueSizeInBits() / SrcEltBits;
43637     if (IdxC.ult(Scale)) {
43638       unsigned Offset = IdxC.getZExtValue() * SrcVT.getScalarSizeInBits();
43639       SDValue Scl = SrcBC.getOperand(0);
43640       EVT SclVT = Scl.getValueType();
43641       if (Offset) {
43642         Scl = DAG.getNode(ISD::SRL, dl, SclVT, Scl,
43643                           DAG.getShiftAmountConstant(Offset, SclVT, dl));
43644       }
43645       Scl = DAG.getZExtOrTrunc(Scl, dl, SrcVT.getScalarType());
43646       Scl = DAG.getZExtOrTrunc(Scl, dl, VT);
43647       return Scl;
43648     }
43649   }
43650 
43651   // Handle extract(truncate(x)) for 0'th index.
43652   // TODO: Treat this as a faux shuffle?
43653   // TODO: When can we use this for general indices?
43654   if (ISD::TRUNCATE == Src.getOpcode() && IdxC == 0 &&
43655       (SrcVT.getSizeInBits() % 128) == 0) {
43656     Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
43657     MVT ExtractVT = MVT::getVectorVT(SrcSVT.getSimpleVT(), 128 / SrcEltBits);
43658     return DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(ExtractVT, Src),
43659                        Idx);
43660   }
43661 
43662   // We can only legally extract other elements from 128-bit vectors and in
43663   // certain circumstances, depending on SSE-level.
43664   // TODO: Investigate float/double extraction if it will be just stored.
43665   auto GetLegalExtract = [&Subtarget, &DAG, &dl](SDValue Vec, EVT VecVT,
43666                                                  unsigned Idx) {
43667     EVT VecSVT = VecVT.getScalarType();
43668     if ((VecVT.is256BitVector() || VecVT.is512BitVector()) &&
43669         (VecSVT == MVT::i8 || VecSVT == MVT::i16 || VecSVT == MVT::i32 ||
43670          VecSVT == MVT::i64)) {
43671       unsigned EltSizeInBits = VecSVT.getSizeInBits();
43672       unsigned NumEltsPerLane = 128 / EltSizeInBits;
43673       unsigned LaneOffset = (Idx & ~(NumEltsPerLane - 1)) * EltSizeInBits;
43674       unsigned LaneIdx = LaneOffset / Vec.getScalarValueSizeInBits();
43675       VecVT = EVT::getVectorVT(*DAG.getContext(), VecSVT, NumEltsPerLane);
43676       Vec = extract128BitVector(Vec, LaneIdx, DAG, dl);
43677       Idx &= (NumEltsPerLane - 1);
43678     }
43679     if ((VecVT == MVT::v4i32 || VecVT == MVT::v2i64) &&
43680         ((Idx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
43681       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VecVT.getScalarType(),
43682                          DAG.getBitcast(VecVT, Vec),
43683                          DAG.getIntPtrConstant(Idx, dl));
43684     }
43685     if ((VecVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
43686         (VecVT == MVT::v16i8 && Subtarget.hasSSE41())) {
43687       unsigned OpCode = (VecVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
43688       return DAG.getNode(OpCode, dl, MVT::i32, DAG.getBitcast(VecVT, Vec),
43689                          DAG.getTargetConstant(Idx, dl, MVT::i8));
43690     }
43691     return SDValue();
43692   };
43693 
43694   // Resolve the target shuffle inputs and mask.
43695   SmallVector<int, 16> Mask;
43696   SmallVector<SDValue, 2> Ops;
43697   if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
43698     return SDValue();
43699 
43700   // Shuffle inputs must be the same size as the result.
43701   if (llvm::any_of(Ops, [SrcVT](SDValue Op) {
43702         return SrcVT.getSizeInBits() != Op.getValueSizeInBits();
43703       }))
43704     return SDValue();
43705 
43706   // Attempt to narrow/widen the shuffle mask to the correct size.
43707   if (Mask.size() != NumSrcElts) {
43708     if ((NumSrcElts % Mask.size()) == 0) {
43709       SmallVector<int, 16> ScaledMask;
43710       int Scale = NumSrcElts / Mask.size();
43711       narrowShuffleMaskElts(Scale, Mask, ScaledMask);
43712       Mask = std::move(ScaledMask);
43713     } else if ((Mask.size() % NumSrcElts) == 0) {
43714       // Simplify Mask based on demanded element.
43715       int ExtractIdx = (int)IdxC.getZExtValue();
43716       int Scale = Mask.size() / NumSrcElts;
43717       int Lo = Scale * ExtractIdx;
43718       int Hi = Scale * (ExtractIdx + 1);
43719       for (int i = 0, e = (int)Mask.size(); i != e; ++i)
43720         if (i < Lo || Hi <= i)
43721           Mask[i] = SM_SentinelUndef;
43722 
43723       SmallVector<int, 16> WidenedMask;
43724       while (Mask.size() > NumSrcElts &&
43725              canWidenShuffleElements(Mask, WidenedMask))
43726         Mask = std::move(WidenedMask);
43727     }
43728   }
43729 
43730   // If narrowing/widening failed, see if we can extract+zero-extend.
43731   int ExtractIdx;
43732   EVT ExtractVT;
43733   if (Mask.size() == NumSrcElts) {
43734     ExtractIdx = Mask[IdxC.getZExtValue()];
43735     ExtractVT = SrcVT;
43736   } else {
43737     unsigned Scale = Mask.size() / NumSrcElts;
43738     if ((Mask.size() % NumSrcElts) != 0 || SrcVT.isFloatingPoint())
43739       return SDValue();
43740     unsigned ScaledIdx = Scale * IdxC.getZExtValue();
43741     if (!isUndefOrZeroInRange(Mask, ScaledIdx + 1, Scale - 1))
43742       return SDValue();
43743     ExtractIdx = Mask[ScaledIdx];
43744     EVT ExtractSVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltBits / Scale);
43745     ExtractVT = EVT::getVectorVT(*DAG.getContext(), ExtractSVT, Mask.size());
43746     assert(SrcVT.getSizeInBits() == ExtractVT.getSizeInBits() &&
43747            "Failed to widen vector type");
43748   }
43749 
43750   // If the shuffle source element is undef/zero then we can just accept it.
43751   if (ExtractIdx == SM_SentinelUndef)
43752     return DAG.getUNDEF(VT);
43753 
43754   if (ExtractIdx == SM_SentinelZero)
43755     return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
43756                                 : DAG.getConstant(0, dl, VT);
43757 
43758   SDValue SrcOp = Ops[ExtractIdx / Mask.size()];
43759   ExtractIdx = ExtractIdx % Mask.size();
43760   if (SDValue V = GetLegalExtract(SrcOp, ExtractVT, ExtractIdx))
43761     return DAG.getZExtOrTrunc(V, dl, VT);
43762 
43763   return SDValue();
43764 }
43765 
43766 /// Extracting a scalar FP value from vector element 0 is free, so extract each
43767 /// operand first, then perform the math as a scalar op.
43768 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG,
43769                                  const X86Subtarget &Subtarget) {
43770   assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
43771   SDValue Vec = ExtElt->getOperand(0);
43772   SDValue Index = ExtElt->getOperand(1);
43773   EVT VT = ExtElt->getValueType(0);
43774   EVT VecVT = Vec.getValueType();
43775 
43776   // TODO: If this is a unary/expensive/expand op, allow extraction from a
43777   // non-zero element because the shuffle+scalar op will be cheaper?
43778   if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
43779     return SDValue();
43780 
43781   // Vector FP compares don't fit the pattern of FP math ops (propagate, not
43782   // extract, the condition code), so deal with those as a special-case.
43783   if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
43784     EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
43785     if (OpVT != MVT::f32 && OpVT != MVT::f64)
43786       return SDValue();
43787 
43788     // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
43789     SDLoc DL(ExtElt);
43790     SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
43791                                Vec.getOperand(0), Index);
43792     SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
43793                                Vec.getOperand(1), Index);
43794     return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
43795   }
43796 
43797   if (!(VT == MVT::f16 && Subtarget.hasFP16()) && VT != MVT::f32 &&
43798       VT != MVT::f64)
43799     return SDValue();
43800 
43801   // Vector FP selects don't fit the pattern of FP math ops (because the
43802   // condition has a different type and we have to change the opcode), so deal
43803   // with those here.
43804   // FIXME: This is restricted to pre type legalization by ensuring the setcc
43805   // has i1 elements. If we loosen this we need to convert vector bool to a
43806   // scalar bool.
43807   if (Vec.getOpcode() == ISD::VSELECT &&
43808       Vec.getOperand(0).getOpcode() == ISD::SETCC &&
43809       Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
43810       Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
43811     // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
43812     SDLoc DL(ExtElt);
43813     SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
43814                                Vec.getOperand(0).getValueType().getScalarType(),
43815                                Vec.getOperand(0), Index);
43816     SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
43817                                Vec.getOperand(1), Index);
43818     SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
43819                                Vec.getOperand(2), Index);
43820     return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
43821   }
43822 
43823   // TODO: This switch could include FNEG and the x86-specific FP logic ops
43824   // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
43825   // missed load folding and fma+fneg combining.
43826   switch (Vec.getOpcode()) {
43827   case ISD::FMA: // Begin 3 operands
43828   case ISD::FMAD:
43829   case ISD::FADD: // Begin 2 operands
43830   case ISD::FSUB:
43831   case ISD::FMUL:
43832   case ISD::FDIV:
43833   case ISD::FREM:
43834   case ISD::FCOPYSIGN:
43835   case ISD::FMINNUM:
43836   case ISD::FMAXNUM:
43837   case ISD::FMINNUM_IEEE:
43838   case ISD::FMAXNUM_IEEE:
43839   case ISD::FMAXIMUM:
43840   case ISD::FMINIMUM:
43841   case X86ISD::FMAX:
43842   case X86ISD::FMIN:
43843   case ISD::FABS: // Begin 1 operand
43844   case ISD::FSQRT:
43845   case ISD::FRINT:
43846   case ISD::FCEIL:
43847   case ISD::FTRUNC:
43848   case ISD::FNEARBYINT:
43849   case ISD::FROUNDEVEN:
43850   case ISD::FROUND:
43851   case ISD::FFLOOR:
43852   case X86ISD::FRCP:
43853   case X86ISD::FRSQRT: {
43854     // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
43855     SDLoc DL(ExtElt);
43856     SmallVector<SDValue, 4> ExtOps;
43857     for (SDValue Op : Vec->ops())
43858       ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
43859     return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
43860   }
43861   default:
43862     return SDValue();
43863   }
43864   llvm_unreachable("All opcodes should return within switch");
43865 }
43866 
43867 /// Try to convert a vector reduction sequence composed of binops and shuffles
43868 /// into horizontal ops.
43869 static SDValue combineArithReduction(SDNode *ExtElt, SelectionDAG &DAG,
43870                                      const X86Subtarget &Subtarget) {
43871   assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
43872 
43873   // We need at least SSE2 to anything here.
43874   if (!Subtarget.hasSSE2())
43875     return SDValue();
43876 
43877   ISD::NodeType Opc;
43878   SDValue Rdx = DAG.matchBinOpReduction(ExtElt, Opc,
43879                                         {ISD::ADD, ISD::MUL, ISD::FADD}, true);
43880   if (!Rdx)
43881     return SDValue();
43882 
43883   SDValue Index = ExtElt->getOperand(1);
43884   assert(isNullConstant(Index) &&
43885          "Reduction doesn't end in an extract from index 0");
43886 
43887   EVT VT = ExtElt->getValueType(0);
43888   EVT VecVT = Rdx.getValueType();
43889   if (VecVT.getScalarType() != VT)
43890     return SDValue();
43891 
43892   SDLoc DL(ExtElt);
43893   unsigned NumElts = VecVT.getVectorNumElements();
43894   unsigned EltSizeInBits = VecVT.getScalarSizeInBits();
43895 
43896   // Extend v4i8/v8i8 vector to v16i8, with undef upper 64-bits.
43897   auto WidenToV16I8 = [&](SDValue V, bool ZeroExtend) {
43898     if (V.getValueType() == MVT::v4i8) {
43899       if (ZeroExtend && Subtarget.hasSSE41()) {
43900         V = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
43901                         DAG.getConstant(0, DL, MVT::v4i32),
43902                         DAG.getBitcast(MVT::i32, V),
43903                         DAG.getIntPtrConstant(0, DL));
43904         return DAG.getBitcast(MVT::v16i8, V);
43905       }
43906       V = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, V,
43907                       ZeroExtend ? DAG.getConstant(0, DL, MVT::v4i8)
43908                                  : DAG.getUNDEF(MVT::v4i8));
43909     }
43910     return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V,
43911                        DAG.getUNDEF(MVT::v8i8));
43912   };
43913 
43914   // vXi8 mul reduction - promote to vXi16 mul reduction.
43915   if (Opc == ISD::MUL) {
43916     if (VT != MVT::i8 || NumElts < 4 || !isPowerOf2_32(NumElts))
43917       return SDValue();
43918     if (VecVT.getSizeInBits() >= 128) {
43919       EVT WideVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts / 2);
43920       SDValue Lo = getUnpackl(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
43921       SDValue Hi = getUnpackh(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
43922       Lo = DAG.getBitcast(WideVT, Lo);
43923       Hi = DAG.getBitcast(WideVT, Hi);
43924       Rdx = DAG.getNode(Opc, DL, WideVT, Lo, Hi);
43925       while (Rdx.getValueSizeInBits() > 128) {
43926         std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
43927         Rdx = DAG.getNode(Opc, DL, Lo.getValueType(), Lo, Hi);
43928       }
43929     } else {
43930       Rdx = WidenToV16I8(Rdx, false);
43931       Rdx = getUnpackl(DAG, DL, MVT::v16i8, Rdx, DAG.getUNDEF(MVT::v16i8));
43932       Rdx = DAG.getBitcast(MVT::v8i16, Rdx);
43933     }
43934     if (NumElts >= 8)
43935       Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
43936                         DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
43937                                              {4, 5, 6, 7, -1, -1, -1, -1}));
43938     Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
43939                       DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
43940                                            {2, 3, -1, -1, -1, -1, -1, -1}));
43941     Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
43942                       DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
43943                                            {1, -1, -1, -1, -1, -1, -1, -1}));
43944     Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
43945     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
43946   }
43947 
43948   // vXi8 add reduction - sub 128-bit vector.
43949   if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
43950     Rdx = WidenToV16I8(Rdx, true);
43951     Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
43952                       DAG.getConstant(0, DL, MVT::v16i8));
43953     Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
43954     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
43955   }
43956 
43957   // Must be a >=128-bit vector with pow2 elements.
43958   if ((VecVT.getSizeInBits() % 128) != 0 || !isPowerOf2_32(NumElts))
43959     return SDValue();
43960 
43961   // vXi8 add reduction - sum lo/hi halves then use PSADBW.
43962   if (VT == MVT::i8) {
43963     while (Rdx.getValueSizeInBits() > 128) {
43964       SDValue Lo, Hi;
43965       std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
43966       VecVT = Lo.getValueType();
43967       Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
43968     }
43969     assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
43970 
43971     SDValue Hi = DAG.getVectorShuffle(
43972         MVT::v16i8, DL, Rdx, Rdx,
43973         {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
43974     Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
43975     Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
43976                       getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
43977     Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
43978     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
43979   }
43980 
43981   // See if we can use vXi8 PSADBW add reduction for larger zext types.
43982   // If the source vector values are 0-255, then we can use PSADBW to
43983   // sum+zext v8i8 subvectors to vXi64, then perform the reduction.
43984   // TODO: See if its worth avoiding vXi16/i32 truncations?
43985   if (Opc == ISD::ADD && NumElts >= 4 && EltSizeInBits >= 16 &&
43986       DAG.computeKnownBits(Rdx).getMaxValue().ule(255) &&
43987       (EltSizeInBits == 16 || Rdx.getOpcode() == ISD::ZERO_EXTEND ||
43988        Subtarget.hasAVX512())) {
43989     if (Rdx.getValueType() == MVT::v8i16) {
43990       Rdx = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Rdx,
43991                         DAG.getUNDEF(MVT::v8i16));
43992     } else {
43993       EVT ByteVT = VecVT.changeVectorElementType(MVT::i8);
43994       Rdx = DAG.getNode(ISD::TRUNCATE, DL, ByteVT, Rdx);
43995       if (ByteVT.getSizeInBits() < 128)
43996         Rdx = WidenToV16I8(Rdx, true);
43997     }
43998 
43999     // Build the PSADBW, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
44000     auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44001                             ArrayRef<SDValue> Ops) {
44002       MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
44003       SDValue Zero = DAG.getConstant(0, DL, Ops[0].getValueType());
44004       return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops[0], Zero);
44005     };
44006     MVT SadVT = MVT::getVectorVT(MVT::i64, Rdx.getValueSizeInBits() / 64);
44007     Rdx = SplitOpsAndApply(DAG, Subtarget, DL, SadVT, {Rdx}, PSADBWBuilder);
44008 
44009     // TODO: We could truncate to vXi16/vXi32 before performing the reduction.
44010     while (Rdx.getValueSizeInBits() > 128) {
44011       SDValue Lo, Hi;
44012       std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
44013       VecVT = Lo.getValueType();
44014       Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
44015     }
44016     assert(Rdx.getValueType() == MVT::v2i64 && "v2i64 reduction expected");
44017 
44018     if (NumElts > 8) {
44019       SDValue RdxHi = DAG.getVectorShuffle(MVT::v2i64, DL, Rdx, Rdx, {1, -1});
44020       Rdx = DAG.getNode(ISD::ADD, DL, MVT::v2i64, Rdx, RdxHi);
44021     }
44022 
44023     VecVT = MVT::getVectorVT(VT.getSimpleVT(), 128 / VT.getSizeInBits());
44024     Rdx = DAG.getBitcast(VecVT, Rdx);
44025     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44026   }
44027 
44028   // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
44029   if (!shouldUseHorizontalOp(true, DAG, Subtarget))
44030     return SDValue();
44031 
44032   unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
44033 
44034   // 256-bit horizontal instructions operate on 128-bit chunks rather than
44035   // across the whole vector, so we need an extract + hop preliminary stage.
44036   // This is the only step where the operands of the hop are not the same value.
44037   // TODO: We could extend this to handle 512-bit or even longer vectors.
44038   if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
44039       ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
44040     unsigned NumElts = VecVT.getVectorNumElements();
44041     SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
44042     SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
44043     Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
44044     VecVT = Rdx.getValueType();
44045   }
44046   if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
44047       !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
44048     return SDValue();
44049 
44050   // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
44051   unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
44052   for (unsigned i = 0; i != ReductionSteps; ++i)
44053     Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
44054 
44055   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44056 }
44057 
44058 /// Detect vector gather/scatter index generation and convert it from being a
44059 /// bunch of shuffles and extracts into a somewhat faster sequence.
44060 /// For i686, the best sequence is apparently storing the value and loading
44061 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
44062 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
44063                                        TargetLowering::DAGCombinerInfo &DCI,
44064                                        const X86Subtarget &Subtarget) {
44065   if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
44066     return NewOp;
44067 
44068   SDValue InputVector = N->getOperand(0);
44069   SDValue EltIdx = N->getOperand(1);
44070   auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
44071 
44072   EVT SrcVT = InputVector.getValueType();
44073   EVT VT = N->getValueType(0);
44074   SDLoc dl(InputVector);
44075   bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
44076   unsigned NumSrcElts = SrcVT.getVectorNumElements();
44077   unsigned NumEltBits = VT.getScalarSizeInBits();
44078   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44079 
44080   if (CIdx && CIdx->getAPIntValue().uge(NumSrcElts))
44081     return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
44082 
44083   // Integer Constant Folding.
44084   if (CIdx && VT.isInteger()) {
44085     APInt UndefVecElts;
44086     SmallVector<APInt, 16> EltBits;
44087     unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
44088     if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
44089                                       EltBits, true, false)) {
44090       uint64_t Idx = CIdx->getZExtValue();
44091       if (UndefVecElts[Idx])
44092         return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
44093       return DAG.getConstant(EltBits[Idx].zext(NumEltBits), dl, VT);
44094     }
44095 
44096     // Convert extract_element(bitcast(<X x i1>) -> bitcast(extract_subvector()).
44097     // Improves lowering of bool masks on rust which splits them into byte array.
44098     if (InputVector.getOpcode() == ISD::BITCAST && (NumEltBits % 8) == 0) {
44099       SDValue Src = peekThroughBitcasts(InputVector);
44100       if (Src.getValueType().getScalarType() == MVT::i1 &&
44101           TLI.isTypeLegal(Src.getValueType())) {
44102         MVT SubVT = MVT::getVectorVT(MVT::i1, NumEltBits);
44103         SDValue Sub = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, Src,
44104             DAG.getIntPtrConstant(CIdx->getZExtValue() * NumEltBits, dl));
44105         return DAG.getBitcast(VT, Sub);
44106       }
44107     }
44108   }
44109 
44110   if (IsPextr) {
44111     if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumEltBits),
44112                                  DCI))
44113       return SDValue(N, 0);
44114 
44115     // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
44116     if ((InputVector.getOpcode() == X86ISD::PINSRB ||
44117          InputVector.getOpcode() == X86ISD::PINSRW) &&
44118         InputVector.getOperand(2) == EltIdx) {
44119       assert(SrcVT == InputVector.getOperand(0).getValueType() &&
44120              "Vector type mismatch");
44121       SDValue Scl = InputVector.getOperand(1);
44122       Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
44123       return DAG.getZExtOrTrunc(Scl, dl, VT);
44124     }
44125 
44126     // TODO - Remove this once we can handle the implicit zero-extension of
44127     // X86ISD::PEXTRW/X86ISD::PEXTRB in combinePredicateReduction and
44128     // combineBasicSADPattern.
44129     return SDValue();
44130   }
44131 
44132   // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
44133   if (VT == MVT::i64 && SrcVT == MVT::v1i64 &&
44134       InputVector.getOpcode() == ISD::BITCAST &&
44135       InputVector.getOperand(0).getValueType() == MVT::x86mmx &&
44136       isNullConstant(EltIdx) && InputVector.hasOneUse())
44137     return DAG.getBitcast(VT, InputVector);
44138 
44139   // Detect mmx to i32 conversion through a v2i32 elt extract.
44140   if (VT == MVT::i32 && SrcVT == MVT::v2i32 &&
44141       InputVector.getOpcode() == ISD::BITCAST &&
44142       InputVector.getOperand(0).getValueType() == MVT::x86mmx &&
44143       isNullConstant(EltIdx) && InputVector.hasOneUse())
44144     return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32,
44145                        InputVector.getOperand(0));
44146 
44147   // Check whether this extract is the root of a sum of absolute differences
44148   // pattern. This has to be done here because we really want it to happen
44149   // pre-legalization,
44150   if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
44151     return SAD;
44152 
44153   if (SDValue VPDPBUSD = combineVPDPBUSDPattern(N, DAG, Subtarget))
44154     return VPDPBUSD;
44155 
44156   // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
44157   if (SDValue Cmp = combinePredicateReduction(N, DAG, Subtarget))
44158     return Cmp;
44159 
44160   // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
44161   if (SDValue MinMax = combineMinMaxReduction(N, DAG, Subtarget))
44162     return MinMax;
44163 
44164   // Attempt to optimize ADD/FADD/MUL reductions with HADD, promotion etc..
44165   if (SDValue V = combineArithReduction(N, DAG, Subtarget))
44166     return V;
44167 
44168   if (SDValue V = scalarizeExtEltFP(N, DAG, Subtarget))
44169     return V;
44170 
44171   // Attempt to extract a i1 element by using MOVMSK to extract the signbits
44172   // and then testing the relevant element.
44173   //
44174   // Note that we only combine extracts on the *same* result number, i.e.
44175   //   t0 = merge_values a0, a1, a2, a3
44176   //   i1 = extract_vector_elt t0, Constant:i64<2>
44177   //   i1 = extract_vector_elt t0, Constant:i64<3>
44178   // but not
44179   //   i1 = extract_vector_elt t0:1, Constant:i64<2>
44180   // since the latter would need its own MOVMSK.
44181   if (SrcVT.getScalarType() == MVT::i1) {
44182     bool IsVar = !CIdx;
44183     SmallVector<SDNode *, 16> BoolExtracts;
44184     unsigned ResNo = InputVector.getResNo();
44185     auto IsBoolExtract = [&BoolExtracts, &ResNo, &IsVar](SDNode *Use) {
44186       if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
44187           Use->getOperand(0).getResNo() == ResNo &&
44188           Use->getValueType(0) == MVT::i1) {
44189         BoolExtracts.push_back(Use);
44190         IsVar |= !isa<ConstantSDNode>(Use->getOperand(1));
44191         return true;
44192       }
44193       return false;
44194     };
44195     // TODO: Can we drop the oneuse check for constant extracts?
44196     if (all_of(InputVector->uses(), IsBoolExtract) &&
44197         (IsVar || BoolExtracts.size() > 1)) {
44198       EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
44199       if (SDValue BC =
44200               combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
44201         for (SDNode *Use : BoolExtracts) {
44202           // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
44203           // Mask = 1 << MaskIdx
44204           SDValue MaskIdx = DAG.getZExtOrTrunc(Use->getOperand(1), dl, MVT::i8);
44205           SDValue MaskBit = DAG.getConstant(1, dl, BCVT);
44206           SDValue Mask = DAG.getNode(ISD::SHL, dl, BCVT, MaskBit, MaskIdx);
44207           SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
44208           Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
44209           DCI.CombineTo(Use, Res);
44210         }
44211         return SDValue(N, 0);
44212       }
44213     }
44214   }
44215 
44216   // If this extract is from a loaded vector value and will be used as an
44217   // integer, that requires a potentially expensive XMM -> GPR transfer.
44218   // Additionally, if we can convert to a scalar integer load, that will likely
44219   // be folded into a subsequent integer op.
44220   // Note: Unlike the related fold for this in DAGCombiner, this is not limited
44221   //       to a single-use of the loaded vector. For the reasons above, we
44222   //       expect this to be profitable even if it creates an extra load.
44223   bool LikelyUsedAsVector = any_of(N->uses(), [](SDNode *Use) {
44224     return Use->getOpcode() == ISD::STORE ||
44225            Use->getOpcode() == ISD::INSERT_VECTOR_ELT ||
44226            Use->getOpcode() == ISD::SCALAR_TO_VECTOR;
44227   });
44228   auto *LoadVec = dyn_cast<LoadSDNode>(InputVector);
44229   if (LoadVec && CIdx && ISD::isNormalLoad(LoadVec) && VT.isInteger() &&
44230       SrcVT.getVectorElementType() == VT && DCI.isAfterLegalizeDAG() &&
44231       !LikelyUsedAsVector && LoadVec->isSimple()) {
44232     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44233     SDValue NewPtr =
44234         TLI.getVectorElementPointer(DAG, LoadVec->getBasePtr(), SrcVT, EltIdx);
44235     unsigned PtrOff = VT.getSizeInBits() * CIdx->getZExtValue() / 8;
44236     MachinePointerInfo MPI = LoadVec->getPointerInfo().getWithOffset(PtrOff);
44237     Align Alignment = commonAlignment(LoadVec->getAlign(), PtrOff);
44238     SDValue Load =
44239         DAG.getLoad(VT, dl, LoadVec->getChain(), NewPtr, MPI, Alignment,
44240                     LoadVec->getMemOperand()->getFlags(), LoadVec->getAAInfo());
44241     DAG.makeEquivalentMemoryOrdering(LoadVec, Load);
44242     return Load;
44243   }
44244 
44245   return SDValue();
44246 }
44247 
44248 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
44249 // This is more or less the reverse of combineBitcastvxi1.
44250 static SDValue combineToExtendBoolVectorInReg(
44251     unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N0, SelectionDAG &DAG,
44252     TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) {
44253   if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
44254       Opcode != ISD::ANY_EXTEND)
44255     return SDValue();
44256   if (!DCI.isBeforeLegalizeOps())
44257     return SDValue();
44258   if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
44259     return SDValue();
44260 
44261   EVT SVT = VT.getScalarType();
44262   EVT InSVT = N0.getValueType().getScalarType();
44263   unsigned EltSizeInBits = SVT.getSizeInBits();
44264 
44265   // Input type must be extending a bool vector (bit-casted from a scalar
44266   // integer) to legal integer types.
44267   if (!VT.isVector())
44268     return SDValue();
44269   if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
44270     return SDValue();
44271   if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
44272     return SDValue();
44273 
44274   SDValue N00 = N0.getOperand(0);
44275   EVT SclVT = N00.getValueType();
44276   if (!SclVT.isScalarInteger())
44277     return SDValue();
44278 
44279   SDValue Vec;
44280   SmallVector<int> ShuffleMask;
44281   unsigned NumElts = VT.getVectorNumElements();
44282   assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
44283 
44284   // Broadcast the scalar integer to the vector elements.
44285   if (NumElts > EltSizeInBits) {
44286     // If the scalar integer is greater than the vector element size, then we
44287     // must split it down into sub-sections for broadcasting. For example:
44288     //   i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
44289     //   i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
44290     assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
44291     unsigned Scale = NumElts / EltSizeInBits;
44292     EVT BroadcastVT = EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
44293     Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
44294     Vec = DAG.getBitcast(VT, Vec);
44295 
44296     for (unsigned i = 0; i != Scale; ++i)
44297       ShuffleMask.append(EltSizeInBits, i);
44298     Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
44299   } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits &&
44300              (SclVT == MVT::i8 || SclVT == MVT::i16 || SclVT == MVT::i32)) {
44301     // If we have register broadcast instructions, use the scalar size as the
44302     // element type for the shuffle. Then cast to the wider element type. The
44303     // widened bits won't be used, and this might allow the use of a broadcast
44304     // load.
44305     assert((EltSizeInBits % NumElts) == 0 && "Unexpected integer scale");
44306     unsigned Scale = EltSizeInBits / NumElts;
44307     EVT BroadcastVT =
44308         EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts * Scale);
44309     Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
44310     ShuffleMask.append(NumElts * Scale, 0);
44311     Vec = DAG.getVectorShuffle(BroadcastVT, DL, Vec, Vec, ShuffleMask);
44312     Vec = DAG.getBitcast(VT, Vec);
44313   } else {
44314     // For smaller scalar integers, we can simply any-extend it to the vector
44315     // element size (we don't care about the upper bits) and broadcast it to all
44316     // elements.
44317     SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
44318     Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
44319     ShuffleMask.append(NumElts, 0);
44320     Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
44321   }
44322 
44323   // Now, mask the relevant bit in each element.
44324   SmallVector<SDValue, 32> Bits;
44325   for (unsigned i = 0; i != NumElts; ++i) {
44326     int BitIdx = (i % EltSizeInBits);
44327     APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
44328     Bits.push_back(DAG.getConstant(Bit, DL, SVT));
44329   }
44330   SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
44331   Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
44332 
44333   // Compare against the bitmask and extend the result.
44334   EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
44335   Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
44336   Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
44337 
44338   // For SEXT, this is now done, otherwise shift the result down for
44339   // zero-extension.
44340   if (Opcode == ISD::SIGN_EXTEND)
44341     return Vec;
44342   return DAG.getNode(ISD::SRL, DL, VT, Vec,
44343                      DAG.getConstant(EltSizeInBits - 1, DL, VT));
44344 }
44345 
44346 /// If a vector select has an operand that is -1 or 0, try to simplify the
44347 /// select to a bitwise logic operation.
44348 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
44349 static SDValue
44350 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
44351                                  TargetLowering::DAGCombinerInfo &DCI,
44352                                  const X86Subtarget &Subtarget) {
44353   SDValue Cond = N->getOperand(0);
44354   SDValue LHS = N->getOperand(1);
44355   SDValue RHS = N->getOperand(2);
44356   EVT VT = LHS.getValueType();
44357   EVT CondVT = Cond.getValueType();
44358   SDLoc DL(N);
44359   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44360 
44361   if (N->getOpcode() != ISD::VSELECT)
44362     return SDValue();
44363 
44364   assert(CondVT.isVector() && "Vector select expects a vector selector!");
44365 
44366   // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
44367   // TODO: Can we assert that both operands are not zeros (because that should
44368   //       get simplified at node creation time)?
44369   bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
44370   bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
44371 
44372   // If both inputs are 0/undef, create a complete zero vector.
44373   // FIXME: As noted above this should be handled by DAGCombiner/getNode.
44374   if (TValIsAllZeros && FValIsAllZeros) {
44375     if (VT.isFloatingPoint())
44376       return DAG.getConstantFP(0.0, DL, VT);
44377     return DAG.getConstant(0, DL, VT);
44378   }
44379 
44380   // To use the condition operand as a bitwise mask, it must have elements that
44381   // are the same size as the select elements. Ie, the condition operand must
44382   // have already been promoted from the IR select condition type <N x i1>.
44383   // Don't check if the types themselves are equal because that excludes
44384   // vector floating-point selects.
44385   if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
44386     return SDValue();
44387 
44388   // Try to invert the condition if true value is not all 1s and false value is
44389   // not all 0s. Only do this if the condition has one use.
44390   bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
44391   if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
44392       // Check if the selector will be produced by CMPP*/PCMP*.
44393       Cond.getOpcode() == ISD::SETCC &&
44394       // Check if SETCC has already been promoted.
44395       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
44396           CondVT) {
44397     bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
44398 
44399     if (TValIsAllZeros || FValIsAllOnes) {
44400       SDValue CC = Cond.getOperand(2);
44401       ISD::CondCode NewCC = ISD::getSetCCInverse(
44402           cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
44403       Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
44404                           NewCC);
44405       std::swap(LHS, RHS);
44406       TValIsAllOnes = FValIsAllOnes;
44407       FValIsAllZeros = TValIsAllZeros;
44408     }
44409   }
44410 
44411   // Cond value must be 'sign splat' to be converted to a logical op.
44412   if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
44413     return SDValue();
44414 
44415   // vselect Cond, 111..., 000... -> Cond
44416   if (TValIsAllOnes && FValIsAllZeros)
44417     return DAG.getBitcast(VT, Cond);
44418 
44419   if (!TLI.isTypeLegal(CondVT))
44420     return SDValue();
44421 
44422   // vselect Cond, 111..., X -> or Cond, X
44423   if (TValIsAllOnes) {
44424     SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
44425     SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
44426     return DAG.getBitcast(VT, Or);
44427   }
44428 
44429   // vselect Cond, X, 000... -> and Cond, X
44430   if (FValIsAllZeros) {
44431     SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
44432     SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
44433     return DAG.getBitcast(VT, And);
44434   }
44435 
44436   // vselect Cond, 000..., X -> andn Cond, X
44437   if (TValIsAllZeros) {
44438     SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
44439     SDValue AndN;
44440     // The canonical form differs for i1 vectors - x86andnp is not used
44441     if (CondVT.getScalarType() == MVT::i1)
44442       AndN = DAG.getNode(ISD::AND, DL, CondVT, DAG.getNOT(DL, Cond, CondVT),
44443                          CastRHS);
44444     else
44445       AndN = DAG.getNode(X86ISD::ANDNP, DL, CondVT, Cond, CastRHS);
44446     return DAG.getBitcast(VT, AndN);
44447   }
44448 
44449   return SDValue();
44450 }
44451 
44452 /// If both arms of a vector select are concatenated vectors, split the select,
44453 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
44454 ///   vselect Cond, (concat T0, T1), (concat F0, F1) -->
44455 ///   concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
44456 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
44457                                   const X86Subtarget &Subtarget) {
44458   unsigned Opcode = N->getOpcode();
44459   if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
44460     return SDValue();
44461 
44462   // TODO: Split 512-bit vectors too?
44463   EVT VT = N->getValueType(0);
44464   if (!VT.is256BitVector())
44465     return SDValue();
44466 
44467   // TODO: Split as long as any 2 of the 3 operands are concatenated?
44468   SDValue Cond = N->getOperand(0);
44469   SDValue TVal = N->getOperand(1);
44470   SDValue FVal = N->getOperand(2);
44471   if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
44472       !isFreeToSplitVector(TVal.getNode(), DAG) ||
44473       !isFreeToSplitVector(FVal.getNode(), DAG))
44474     return SDValue();
44475 
44476   auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
44477                             ArrayRef<SDValue> Ops) {
44478     return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
44479   };
44480   return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
44481                           makeBlend, /*CheckBWI*/ false);
44482 }
44483 
44484 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
44485   SDValue Cond = N->getOperand(0);
44486   SDValue LHS = N->getOperand(1);
44487   SDValue RHS = N->getOperand(2);
44488   SDLoc DL(N);
44489 
44490   auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
44491   auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
44492   if (!TrueC || !FalseC)
44493     return SDValue();
44494 
44495   // Don't do this for crazy integer types.
44496   EVT VT = N->getValueType(0);
44497   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
44498     return SDValue();
44499 
44500   // We're going to use the condition bit in math or logic ops. We could allow
44501   // this with a wider condition value (post-legalization it becomes an i8),
44502   // but if nothing is creating selects that late, it doesn't matter.
44503   if (Cond.getValueType() != MVT::i1)
44504     return SDValue();
44505 
44506   // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
44507   // 3, 5, or 9 with i32/i64, so those get transformed too.
44508   // TODO: For constants that overflow or do not differ by power-of-2 or small
44509   // multiplier, convert to 'and' + 'add'.
44510   const APInt &TrueVal = TrueC->getAPIntValue();
44511   const APInt &FalseVal = FalseC->getAPIntValue();
44512 
44513   // We have a more efficient lowering for "(X == 0) ? Y : -1" using SBB.
44514   if ((TrueVal.isAllOnes() || FalseVal.isAllOnes()) &&
44515       Cond.getOpcode() == ISD::SETCC && isNullConstant(Cond.getOperand(1))) {
44516     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
44517     if (CC == ISD::SETEQ || CC == ISD::SETNE)
44518       return SDValue();
44519   }
44520 
44521   bool OV;
44522   APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
44523   if (OV)
44524     return SDValue();
44525 
44526   APInt AbsDiff = Diff.abs();
44527   if (AbsDiff.isPowerOf2() ||
44528       ((VT == MVT::i32 || VT == MVT::i64) &&
44529        (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
44530 
44531     // We need a positive multiplier constant for shift/LEA codegen. The 'not'
44532     // of the condition can usually be folded into a compare predicate, but even
44533     // without that, the sequence should be cheaper than a CMOV alternative.
44534     if (TrueVal.slt(FalseVal)) {
44535       Cond = DAG.getNOT(DL, Cond, MVT::i1);
44536       std::swap(TrueC, FalseC);
44537     }
44538 
44539     // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
44540     SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
44541 
44542     // Multiply condition by the difference if non-one.
44543     if (!AbsDiff.isOne())
44544       R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
44545 
44546     // Add the base if non-zero.
44547     if (!FalseC->isZero())
44548       R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
44549 
44550     return R;
44551   }
44552 
44553   return SDValue();
44554 }
44555 
44556 /// If this is a *dynamic* select (non-constant condition) and we can match
44557 /// this node with one of the variable blend instructions, restructure the
44558 /// condition so that blends can use the high (sign) bit of each element.
44559 /// This function will also call SimplifyDemandedBits on already created
44560 /// BLENDV to perform additional simplifications.
44561 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
44562                                       TargetLowering::DAGCombinerInfo &DCI,
44563                                       const X86Subtarget &Subtarget) {
44564   SDValue Cond = N->getOperand(0);
44565   if ((N->getOpcode() != ISD::VSELECT &&
44566        N->getOpcode() != X86ISD::BLENDV) ||
44567       ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
44568     return SDValue();
44569 
44570   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44571   unsigned BitWidth = Cond.getScalarValueSizeInBits();
44572   EVT VT = N->getValueType(0);
44573 
44574   // We can only handle the cases where VSELECT is directly legal on the
44575   // subtarget. We custom lower VSELECT nodes with constant conditions and
44576   // this makes it hard to see whether a dynamic VSELECT will correctly
44577   // lower, so we both check the operation's status and explicitly handle the
44578   // cases where a *dynamic* blend will fail even though a constant-condition
44579   // blend could be custom lowered.
44580   // FIXME: We should find a better way to handle this class of problems.
44581   // Potentially, we should combine constant-condition vselect nodes
44582   // pre-legalization into shuffles and not mark as many types as custom
44583   // lowered.
44584   if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
44585     return SDValue();
44586   // FIXME: We don't support i16-element blends currently. We could and
44587   // should support them by making *all* the bits in the condition be set
44588   // rather than just the high bit and using an i8-element blend.
44589   if (VT.getVectorElementType() == MVT::i16)
44590     return SDValue();
44591   // Dynamic blending was only available from SSE4.1 onward.
44592   if (VT.is128BitVector() && !Subtarget.hasSSE41())
44593     return SDValue();
44594   // Byte blends are only available in AVX2
44595   if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
44596     return SDValue();
44597   // There are no 512-bit blend instructions that use sign bits.
44598   if (VT.is512BitVector())
44599     return SDValue();
44600 
44601   // Don't optimize before the condition has been transformed to a legal type
44602   // and don't ever optimize vector selects that map to AVX512 mask-registers.
44603   if (BitWidth < 8 || BitWidth > 64)
44604     return SDValue();
44605 
44606   auto OnlyUsedAsSelectCond = [](SDValue Cond) {
44607     for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
44608          UI != UE; ++UI)
44609       if ((UI->getOpcode() != ISD::VSELECT &&
44610            UI->getOpcode() != X86ISD::BLENDV) ||
44611           UI.getOperandNo() != 0)
44612         return false;
44613 
44614     return true;
44615   };
44616 
44617   APInt DemandedBits(APInt::getSignMask(BitWidth));
44618 
44619   if (OnlyUsedAsSelectCond(Cond)) {
44620     KnownBits Known;
44621     TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
44622                                           !DCI.isBeforeLegalizeOps());
44623     if (!TLI.SimplifyDemandedBits(Cond, DemandedBits, Known, TLO, 0, true))
44624       return SDValue();
44625 
44626     // If we changed the computation somewhere in the DAG, this change will
44627     // affect all users of Cond. Update all the nodes so that we do not use
44628     // the generic VSELECT anymore. Otherwise, we may perform wrong
44629     // optimizations as we messed with the actual expectation for the vector
44630     // boolean values.
44631     for (SDNode *U : Cond->uses()) {
44632       if (U->getOpcode() == X86ISD::BLENDV)
44633         continue;
44634 
44635       SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
44636                                Cond, U->getOperand(1), U->getOperand(2));
44637       DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
44638       DCI.AddToWorklist(U);
44639     }
44640     DCI.CommitTargetLoweringOpt(TLO);
44641     return SDValue(N, 0);
44642   }
44643 
44644   // Otherwise we can still at least try to simplify multiple use bits.
44645   if (SDValue V = TLI.SimplifyMultipleUseDemandedBits(Cond, DemandedBits, DAG))
44646       return DAG.getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0), V,
44647                          N->getOperand(1), N->getOperand(2));
44648 
44649   return SDValue();
44650 }
44651 
44652 // Try to match:
44653 //   (or (and (M, (sub 0, X)), (pandn M, X)))
44654 // which is a special case of:
44655 //   (select M, (sub 0, X), X)
44656 // Per:
44657 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
44658 // We know that, if fNegate is 0 or 1:
44659 //   (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
44660 //
44661 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
44662 //   ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
44663 //   ( M      ? -X : X) == ((X ^   M     ) + (M & 1))
44664 // This lets us transform our vselect to:
44665 //   (add (xor X, M), (and M, 1))
44666 // And further to:
44667 //   (sub (xor X, M), M)
44668 static SDValue combineLogicBlendIntoConditionalNegate(
44669     EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
44670     SelectionDAG &DAG, const X86Subtarget &Subtarget) {
44671   EVT MaskVT = Mask.getValueType();
44672   assert(MaskVT.isInteger() &&
44673          DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
44674          "Mask must be zero/all-bits");
44675 
44676   if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
44677     return SDValue();
44678   if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
44679     return SDValue();
44680 
44681   auto IsNegV = [](SDNode *N, SDValue V) {
44682     return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
44683            ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
44684   };
44685 
44686   SDValue V;
44687   if (IsNegV(Y.getNode(), X))
44688     V = X;
44689   else if (IsNegV(X.getNode(), Y))
44690     V = Y;
44691   else
44692     return SDValue();
44693 
44694   SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
44695   SDValue SubOp2 = Mask;
44696 
44697   // If the negate was on the false side of the select, then
44698   // the operands of the SUB need to be swapped. PR 27251.
44699   // This is because the pattern being matched above is
44700   // (vselect M, (sub (0, X), X)  -> (sub (xor X, M), M)
44701   // but if the pattern matched was
44702   // (vselect M, X, (sub (0, X))), that is really negation of the pattern
44703   // above, -(vselect M, (sub 0, X), X), and therefore the replacement
44704   // pattern also needs to be a negation of the replacement pattern above.
44705   // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
44706   // sub accomplishes the negation of the replacement pattern.
44707   if (V == Y)
44708     std::swap(SubOp1, SubOp2);
44709 
44710   SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
44711   return DAG.getBitcast(VT, Res);
44712 }
44713 
44714 static SDValue commuteSelect(SDNode *N, SelectionDAG &DAG,
44715                                   const X86Subtarget &Subtarget) {
44716   if (!Subtarget.hasAVX512())
44717     return SDValue();
44718   if (N->getOpcode() != ISD::VSELECT)
44719     return SDValue();
44720 
44721   SDLoc DL(N);
44722   SDValue Cond = N->getOperand(0);
44723   SDValue LHS = N->getOperand(1);
44724   SDValue RHS = N->getOperand(2);
44725 
44726   if (canCombineAsMaskOperation(LHS, Subtarget))
44727     return SDValue();
44728 
44729   if (!canCombineAsMaskOperation(RHS, Subtarget))
44730     return SDValue();
44731 
44732   if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
44733     return SDValue();
44734 
44735   // Commute LHS and RHS to create opportunity to select mask instruction.
44736   // (vselect M, L, R) -> (vselect ~M, R, L)
44737   ISD::CondCode NewCC =
44738       ISD::getSetCCInverse(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
44739                            Cond.getOperand(0).getValueType());
44740   Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(), Cond.getOperand(0),
44741 		                        Cond.getOperand(1), NewCC);
44742   return DAG.getSelect(DL, LHS.getValueType(), Cond, RHS, LHS);
44743 }
44744 
44745 /// Do target-specific dag combines on SELECT and VSELECT nodes.
44746 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
44747                              TargetLowering::DAGCombinerInfo &DCI,
44748                              const X86Subtarget &Subtarget) {
44749   SDLoc DL(N);
44750   SDValue Cond = N->getOperand(0);
44751   SDValue LHS = N->getOperand(1);
44752   SDValue RHS = N->getOperand(2);
44753 
44754   // Try simplification again because we use this function to optimize
44755   // BLENDV nodes that are not handled by the generic combiner.
44756   if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
44757     return V;
44758 
44759   // When avx512 is available the lhs operand of select instruction can be
44760   // folded with mask instruction, while the rhs operand can't. Commute the
44761   // lhs and rhs of the select instruction to create the opportunity of
44762   // folding.
44763   if (SDValue V = commuteSelect(N, DAG, Subtarget))
44764     return V;
44765 
44766   EVT VT = LHS.getValueType();
44767   EVT CondVT = Cond.getValueType();
44768   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44769   bool CondConstantVector = ISD::isBuildVectorOfConstantSDNodes(Cond.getNode());
44770 
44771   // Attempt to combine (select M, (sub 0, X), X) -> (sub (xor X, M), M).
44772   // Limit this to cases of non-constant masks that createShuffleMaskFromVSELECT
44773   // can't catch, plus vXi8 cases where we'd likely end up with BLENDV.
44774   if (CondVT.isVector() && CondVT.isInteger() &&
44775       CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
44776       (!CondConstantVector || CondVT.getScalarType() == MVT::i8) &&
44777       DAG.ComputeNumSignBits(Cond) == CondVT.getScalarSizeInBits())
44778     if (SDValue V = combineLogicBlendIntoConditionalNegate(VT, Cond, RHS, LHS,
44779                                                            DL, DAG, Subtarget))
44780       return V;
44781 
44782   // Convert vselects with constant condition into shuffles.
44783   if (CondConstantVector && DCI.isBeforeLegalizeOps() &&
44784       (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::BLENDV)) {
44785     SmallVector<int, 64> Mask;
44786     if (createShuffleMaskFromVSELECT(Mask, Cond,
44787                                      N->getOpcode() == X86ISD::BLENDV))
44788       return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
44789   }
44790 
44791   // fold vselect(cond, pshufb(x), pshufb(y)) -> or (pshufb(x), pshufb(y))
44792   // by forcing the unselected elements to zero.
44793   // TODO: Can we handle more shuffles with this?
44794   if (N->getOpcode() == ISD::VSELECT && CondVT.isVector() &&
44795       LHS.getOpcode() == X86ISD::PSHUFB && RHS.getOpcode() == X86ISD::PSHUFB &&
44796       LHS.hasOneUse() && RHS.hasOneUse()) {
44797     MVT SimpleVT = VT.getSimpleVT();
44798     SmallVector<SDValue, 1> LHSOps, RHSOps;
44799     SmallVector<int, 64> LHSMask, RHSMask, CondMask;
44800     if (createShuffleMaskFromVSELECT(CondMask, Cond) &&
44801         getTargetShuffleMask(LHS.getNode(), SimpleVT, true, LHSOps, LHSMask) &&
44802         getTargetShuffleMask(RHS.getNode(), SimpleVT, true, RHSOps, RHSMask)) {
44803       int NumElts = VT.getVectorNumElements();
44804       for (int i = 0; i != NumElts; ++i) {
44805         // getConstVector sets negative shuffle mask values as undef, so ensure
44806         // we hardcode SM_SentinelZero values to zero (0x80).
44807         if (CondMask[i] < NumElts) {
44808           LHSMask[i] = isUndefOrZero(LHSMask[i]) ? 0x80 : LHSMask[i];
44809           RHSMask[i] = 0x80;
44810         } else {
44811           LHSMask[i] = 0x80;
44812           RHSMask[i] = isUndefOrZero(RHSMask[i]) ? 0x80 : RHSMask[i];
44813         }
44814       }
44815       LHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, LHS.getOperand(0),
44816                         getConstVector(LHSMask, SimpleVT, DAG, DL, true));
44817       RHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, RHS.getOperand(0),
44818                         getConstVector(RHSMask, SimpleVT, DAG, DL, true));
44819       return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
44820     }
44821   }
44822 
44823   // If we have SSE[12] support, try to form min/max nodes. SSE min/max
44824   // instructions match the semantics of the common C idiom x<y?x:y but not
44825   // x<=y?x:y, because of how they handle negative zero (which can be
44826   // ignored in unsafe-math mode).
44827   // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
44828   if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
44829       VT != MVT::f80 && VT != MVT::f128 && !isSoftF16(VT, Subtarget) &&
44830       (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
44831       (Subtarget.hasSSE2() ||
44832        (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
44833     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
44834 
44835     unsigned Opcode = 0;
44836     // Check for x CC y ? x : y.
44837     if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
44838         DAG.isEqualTo(RHS, Cond.getOperand(1))) {
44839       switch (CC) {
44840       default: break;
44841       case ISD::SETULT:
44842         // Converting this to a min would handle NaNs incorrectly, and swapping
44843         // the operands would cause it to handle comparisons between positive
44844         // and negative zero incorrectly.
44845         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
44846           if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44847               !(DAG.isKnownNeverZeroFloat(LHS) ||
44848                 DAG.isKnownNeverZeroFloat(RHS)))
44849             break;
44850           std::swap(LHS, RHS);
44851         }
44852         Opcode = X86ISD::FMIN;
44853         break;
44854       case ISD::SETOLE:
44855         // Converting this to a min would handle comparisons between positive
44856         // and negative zero incorrectly.
44857         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44858             !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
44859           break;
44860         Opcode = X86ISD::FMIN;
44861         break;
44862       case ISD::SETULE:
44863         // Converting this to a min would handle both negative zeros and NaNs
44864         // incorrectly, but we can swap the operands to fix both.
44865         std::swap(LHS, RHS);
44866         [[fallthrough]];
44867       case ISD::SETOLT:
44868       case ISD::SETLT:
44869       case ISD::SETLE:
44870         Opcode = X86ISD::FMIN;
44871         break;
44872 
44873       case ISD::SETOGE:
44874         // Converting this to a max would handle comparisons between positive
44875         // and negative zero incorrectly.
44876         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44877             !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
44878           break;
44879         Opcode = X86ISD::FMAX;
44880         break;
44881       case ISD::SETUGT:
44882         // Converting this to a max would handle NaNs incorrectly, and swapping
44883         // the operands would cause it to handle comparisons between positive
44884         // and negative zero incorrectly.
44885         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
44886           if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44887               !(DAG.isKnownNeverZeroFloat(LHS) ||
44888                 DAG.isKnownNeverZeroFloat(RHS)))
44889             break;
44890           std::swap(LHS, RHS);
44891         }
44892         Opcode = X86ISD::FMAX;
44893         break;
44894       case ISD::SETUGE:
44895         // Converting this to a max would handle both negative zeros and NaNs
44896         // incorrectly, but we can swap the operands to fix both.
44897         std::swap(LHS, RHS);
44898         [[fallthrough]];
44899       case ISD::SETOGT:
44900       case ISD::SETGT:
44901       case ISD::SETGE:
44902         Opcode = X86ISD::FMAX;
44903         break;
44904       }
44905     // Check for x CC y ? y : x -- a min/max with reversed arms.
44906     } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
44907                DAG.isEqualTo(RHS, Cond.getOperand(0))) {
44908       switch (CC) {
44909       default: break;
44910       case ISD::SETOGE:
44911         // Converting this to a min would handle comparisons between positive
44912         // and negative zero incorrectly, and swapping the operands would
44913         // cause it to handle NaNs incorrectly.
44914         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44915             !(DAG.isKnownNeverZeroFloat(LHS) ||
44916               DAG.isKnownNeverZeroFloat(RHS))) {
44917           if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
44918             break;
44919           std::swap(LHS, RHS);
44920         }
44921         Opcode = X86ISD::FMIN;
44922         break;
44923       case ISD::SETUGT:
44924         // Converting this to a min would handle NaNs incorrectly.
44925         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
44926           break;
44927         Opcode = X86ISD::FMIN;
44928         break;
44929       case ISD::SETUGE:
44930         // Converting this to a min would handle both negative zeros and NaNs
44931         // incorrectly, but we can swap the operands to fix both.
44932         std::swap(LHS, RHS);
44933         [[fallthrough]];
44934       case ISD::SETOGT:
44935       case ISD::SETGT:
44936       case ISD::SETGE:
44937         Opcode = X86ISD::FMIN;
44938         break;
44939 
44940       case ISD::SETULT:
44941         // Converting this to a max would handle NaNs incorrectly.
44942         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
44943           break;
44944         Opcode = X86ISD::FMAX;
44945         break;
44946       case ISD::SETOLE:
44947         // Converting this to a max would handle comparisons between positive
44948         // and negative zero incorrectly, and swapping the operands would
44949         // cause it to handle NaNs incorrectly.
44950         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44951             !DAG.isKnownNeverZeroFloat(LHS) &&
44952             !DAG.isKnownNeverZeroFloat(RHS)) {
44953           if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
44954             break;
44955           std::swap(LHS, RHS);
44956         }
44957         Opcode = X86ISD::FMAX;
44958         break;
44959       case ISD::SETULE:
44960         // Converting this to a max would handle both negative zeros and NaNs
44961         // incorrectly, but we can swap the operands to fix both.
44962         std::swap(LHS, RHS);
44963         [[fallthrough]];
44964       case ISD::SETOLT:
44965       case ISD::SETLT:
44966       case ISD::SETLE:
44967         Opcode = X86ISD::FMAX;
44968         break;
44969       }
44970     }
44971 
44972     if (Opcode)
44973       return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
44974   }
44975 
44976   // Some mask scalar intrinsics rely on checking if only one bit is set
44977   // and implement it in C code like this:
44978   // A[0] = (U & 1) ? A[0] : W[0];
44979   // This creates some redundant instructions that break pattern matching.
44980   // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
44981   if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
44982       Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
44983     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
44984     SDValue AndNode = Cond.getOperand(0);
44985     if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
44986         isNullConstant(Cond.getOperand(1)) &&
44987         isOneConstant(AndNode.getOperand(1))) {
44988       // LHS and RHS swapped due to
44989       // setcc outputting 1 when AND resulted in 0 and vice versa.
44990       AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
44991       return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
44992     }
44993   }
44994 
44995   // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
44996   // lowering on KNL. In this case we convert it to
44997   // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
44998   // The same situation all vectors of i8 and i16 without BWI.
44999   // Make sure we extend these even before type legalization gets a chance to
45000   // split wide vectors.
45001   // Since SKX these selects have a proper lowering.
45002   if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
45003       CondVT.getVectorElementType() == MVT::i1 &&
45004       (VT.getVectorElementType() == MVT::i8 ||
45005        VT.getVectorElementType() == MVT::i16)) {
45006     Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
45007     return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
45008   }
45009 
45010   // AVX512 - Extend select with zero to merge with target shuffle.
45011   // select(mask, extract_subvector(shuffle(x)), zero) -->
45012   // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
45013   // TODO - support non target shuffles as well.
45014   if (Subtarget.hasAVX512() && CondVT.isVector() &&
45015       CondVT.getVectorElementType() == MVT::i1) {
45016     auto SelectableOp = [&TLI](SDValue Op) {
45017       return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
45018              isTargetShuffle(Op.getOperand(0).getOpcode()) &&
45019              isNullConstant(Op.getOperand(1)) &&
45020              TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
45021              Op.hasOneUse() && Op.getOperand(0).hasOneUse();
45022     };
45023 
45024     bool SelectableLHS = SelectableOp(LHS);
45025     bool SelectableRHS = SelectableOp(RHS);
45026     bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
45027     bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
45028 
45029     if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
45030       EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
45031                                 : RHS.getOperand(0).getValueType();
45032       EVT SrcCondVT = SrcVT.changeVectorElementType(MVT::i1);
45033       LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
45034                             VT.getSizeInBits());
45035       RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
45036                             VT.getSizeInBits());
45037       Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
45038                          DAG.getUNDEF(SrcCondVT), Cond,
45039                          DAG.getIntPtrConstant(0, DL));
45040       SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
45041       return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
45042     }
45043   }
45044 
45045   if (SDValue V = combineSelectOfTwoConstants(N, DAG))
45046     return V;
45047 
45048   if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
45049       Cond.hasOneUse()) {
45050     EVT CondVT = Cond.getValueType();
45051     SDValue Cond0 = Cond.getOperand(0);
45052     SDValue Cond1 = Cond.getOperand(1);
45053     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
45054 
45055     // Canonicalize min/max:
45056     // (x > 0) ? x : 0 -> (x >= 0) ? x : 0
45057     // (x < -1) ? x : -1 -> (x <= -1) ? x : -1
45058     // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
45059     // the need for an extra compare against zero. e.g.
45060     // (a - b) > 0 : (a - b) ? 0 -> (a - b) >= 0 : (a - b) ? 0
45061     // subl   %esi, %edi
45062     // testl  %edi, %edi
45063     // movl   $0, %eax
45064     // cmovgl %edi, %eax
45065     // =>
45066     // xorl   %eax, %eax
45067     // subl   %esi, $edi
45068     // cmovsl %eax, %edi
45069     //
45070     // We can also canonicalize
45071     //  (x s> 1) ? x : 1 -> (x s>= 1) ? x : 1 -> (x s> 0) ? x : 1
45072     //  (x u> 1) ? x : 1 -> (x u>= 1) ? x : 1 -> (x != 0) ? x : 1
45073     // This allows the use of a test instruction for the compare.
45074     if (LHS == Cond0 && RHS == Cond1) {
45075       if ((CC == ISD::SETGT && (isNullConstant(RHS) || isOneConstant(RHS))) ||
45076           (CC == ISD::SETLT && isAllOnesConstant(RHS))) {
45077         ISD::CondCode NewCC = CC == ISD::SETGT ? ISD::SETGE : ISD::SETLE;
45078         Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
45079         return DAG.getSelect(DL, VT, Cond, LHS, RHS);
45080       }
45081       if (CC == ISD::SETUGT && isOneConstant(RHS)) {
45082         ISD::CondCode NewCC = ISD::SETUGE;
45083         Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
45084         return DAG.getSelect(DL, VT, Cond, LHS, RHS);
45085       }
45086     }
45087 
45088     // Similar to DAGCombine's select(or(CC0,CC1),X,Y) fold but for legal types.
45089     // fold eq + gt/lt nested selects into ge/le selects
45090     // select (cmpeq Cond0, Cond1), LHS, (select (cmpugt Cond0, Cond1), LHS, Y)
45091     // --> (select (cmpuge Cond0, Cond1), LHS, Y)
45092     // select (cmpslt Cond0, Cond1), LHS, (select (cmpeq Cond0, Cond1), LHS, Y)
45093     // --> (select (cmpsle Cond0, Cond1), LHS, Y)
45094     // .. etc ..
45095     if (RHS.getOpcode() == ISD::SELECT && RHS.getOperand(1) == LHS &&
45096         RHS.getOperand(0).getOpcode() == ISD::SETCC) {
45097       SDValue InnerSetCC = RHS.getOperand(0);
45098       ISD::CondCode InnerCC =
45099           cast<CondCodeSDNode>(InnerSetCC.getOperand(2))->get();
45100       if ((CC == ISD::SETEQ || InnerCC == ISD::SETEQ) &&
45101           Cond0 == InnerSetCC.getOperand(0) &&
45102           Cond1 == InnerSetCC.getOperand(1)) {
45103         ISD::CondCode NewCC;
45104         switch (CC == ISD::SETEQ ? InnerCC : CC) {
45105         case ISD::SETGT:  NewCC = ISD::SETGE; break;
45106         case ISD::SETLT:  NewCC = ISD::SETLE; break;
45107         case ISD::SETUGT: NewCC = ISD::SETUGE; break;
45108         case ISD::SETULT: NewCC = ISD::SETULE; break;
45109         default: NewCC = ISD::SETCC_INVALID; break;
45110         }
45111         if (NewCC != ISD::SETCC_INVALID) {
45112           Cond = DAG.getSetCC(DL, CondVT, Cond0, Cond1, NewCC);
45113           return DAG.getSelect(DL, VT, Cond, LHS, RHS.getOperand(2));
45114         }
45115       }
45116     }
45117   }
45118 
45119   // Check if the first operand is all zeros and Cond type is vXi1.
45120   // If this an avx512 target we can improve the use of zero masking by
45121   // swapping the operands and inverting the condition.
45122   if (N->getOpcode() == ISD::VSELECT && Cond.hasOneUse() &&
45123       Subtarget.hasAVX512() && CondVT.getVectorElementType() == MVT::i1 &&
45124       ISD::isBuildVectorAllZeros(LHS.getNode()) &&
45125       !ISD::isBuildVectorAllZeros(RHS.getNode())) {
45126     // Invert the cond to not(cond) : xor(op,allones)=not(op)
45127     SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
45128     // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
45129     return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
45130   }
45131 
45132   // Attempt to convert a (vXi1 bitcast(iX Cond)) selection mask before it might
45133   // get split by legalization.
45134   if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::BITCAST &&
45135       CondVT.getVectorElementType() == MVT::i1 &&
45136       TLI.isTypeLegal(VT.getScalarType())) {
45137     EVT ExtCondVT = VT.changeVectorElementTypeToInteger();
45138     if (SDValue ExtCond = combineToExtendBoolVectorInReg(
45139             ISD::SIGN_EXTEND, DL, ExtCondVT, Cond, DAG, DCI, Subtarget)) {
45140       ExtCond = DAG.getNode(ISD::TRUNCATE, DL, CondVT, ExtCond);
45141       return DAG.getSelect(DL, VT, ExtCond, LHS, RHS);
45142     }
45143   }
45144 
45145   // Early exit check
45146   if (!TLI.isTypeLegal(VT) || isSoftF16(VT, Subtarget))
45147     return SDValue();
45148 
45149   if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
45150     return V;
45151 
45152   if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
45153     return V;
45154 
45155   if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
45156     return V;
45157 
45158   // select(~Cond, X, Y) -> select(Cond, Y, X)
45159   if (CondVT.getScalarType() != MVT::i1) {
45160     if (SDValue CondNot = IsNOT(Cond, DAG))
45161       return DAG.getNode(N->getOpcode(), DL, VT,
45162                          DAG.getBitcast(CondVT, CondNot), RHS, LHS);
45163 
45164     // pcmpgt(X, -1) -> pcmpgt(0, X) to help select/blendv just use the
45165     // signbit.
45166     if (Cond.getOpcode() == X86ISD::PCMPGT &&
45167         ISD::isBuildVectorAllOnes(Cond.getOperand(1).getNode()) &&
45168         Cond.hasOneUse()) {
45169       Cond = DAG.getNode(X86ISD::PCMPGT, DL, CondVT,
45170                          DAG.getConstant(0, DL, CondVT), Cond.getOperand(0));
45171       return DAG.getNode(N->getOpcode(), DL, VT, Cond, RHS, LHS);
45172     }
45173   }
45174 
45175   // Try to optimize vXi1 selects if both operands are either all constants or
45176   // bitcasts from scalar integer type. In that case we can convert the operands
45177   // to integer and use an integer select which will be converted to a CMOV.
45178   // We need to take a little bit of care to avoid creating an i64 type after
45179   // type legalization.
45180   if (N->getOpcode() == ISD::SELECT && VT.isVector() &&
45181       VT.getVectorElementType() == MVT::i1 &&
45182       (DCI.isBeforeLegalize() || (VT != MVT::v64i1 || Subtarget.is64Bit()))) {
45183     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
45184     if (DCI.isBeforeLegalize() || TLI.isTypeLegal(IntVT)) {
45185       bool LHSIsConst = ISD::isBuildVectorOfConstantSDNodes(LHS.getNode());
45186       bool RHSIsConst = ISD::isBuildVectorOfConstantSDNodes(RHS.getNode());
45187 
45188       if ((LHSIsConst || (LHS.getOpcode() == ISD::BITCAST &&
45189                           LHS.getOperand(0).getValueType() == IntVT)) &&
45190           (RHSIsConst || (RHS.getOpcode() == ISD::BITCAST &&
45191                           RHS.getOperand(0).getValueType() == IntVT))) {
45192         if (LHSIsConst)
45193           LHS = combinevXi1ConstantToInteger(LHS, DAG);
45194         else
45195           LHS = LHS.getOperand(0);
45196 
45197         if (RHSIsConst)
45198           RHS = combinevXi1ConstantToInteger(RHS, DAG);
45199         else
45200           RHS = RHS.getOperand(0);
45201 
45202         SDValue Select = DAG.getSelect(DL, IntVT, Cond, LHS, RHS);
45203         return DAG.getBitcast(VT, Select);
45204       }
45205     }
45206   }
45207 
45208   // If this is "((X & C) == 0) ? Y : Z" and C is a constant mask vector of
45209   // single bits, then invert the predicate and swap the select operands.
45210   // This can lower using a vector shift bit-hack rather than mask and compare.
45211   if (DCI.isBeforeLegalize() && !Subtarget.hasAVX512() &&
45212       N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
45213       Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1 &&
45214       Cond.getOperand(0).getOpcode() == ISD::AND &&
45215       isNullOrNullSplat(Cond.getOperand(1)) &&
45216       cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
45217       Cond.getOperand(0).getValueType() == VT) {
45218     // The 'and' mask must be composed of power-of-2 constants.
45219     SDValue And = Cond.getOperand(0);
45220     auto *C = isConstOrConstSplat(And.getOperand(1));
45221     if (C && C->getAPIntValue().isPowerOf2()) {
45222       // vselect (X & C == 0), LHS, RHS --> vselect (X & C != 0), RHS, LHS
45223       SDValue NotCond =
45224           DAG.getSetCC(DL, CondVT, And, Cond.getOperand(1), ISD::SETNE);
45225       return DAG.getSelect(DL, VT, NotCond, RHS, LHS);
45226     }
45227 
45228     // If we have a non-splat but still powers-of-2 mask, AVX1 can use pmulld
45229     // and AVX2 can use vpsllv{dq}. 8-bit lacks a proper shift or multiply.
45230     // 16-bit lacks a proper blendv.
45231     unsigned EltBitWidth = VT.getScalarSizeInBits();
45232     bool CanShiftBlend =
45233         TLI.isTypeLegal(VT) && ((Subtarget.hasAVX() && EltBitWidth == 32) ||
45234                                 (Subtarget.hasAVX2() && EltBitWidth == 64) ||
45235                                 (Subtarget.hasXOP()));
45236     if (CanShiftBlend &&
45237         ISD::matchUnaryPredicate(And.getOperand(1), [](ConstantSDNode *C) {
45238           return C->getAPIntValue().isPowerOf2();
45239         })) {
45240       // Create a left-shift constant to get the mask bits over to the sign-bit.
45241       SDValue Mask = And.getOperand(1);
45242       SmallVector<int, 32> ShlVals;
45243       for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
45244         auto *MaskVal = cast<ConstantSDNode>(Mask.getOperand(i));
45245         ShlVals.push_back(EltBitWidth - 1 -
45246                           MaskVal->getAPIntValue().exactLogBase2());
45247       }
45248       // vsel ((X & C) == 0), LHS, RHS --> vsel ((shl X, C') < 0), RHS, LHS
45249       SDValue ShlAmt = getConstVector(ShlVals, VT.getSimpleVT(), DAG, DL);
45250       SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And.getOperand(0), ShlAmt);
45251       SDValue NewCond =
45252           DAG.getSetCC(DL, CondVT, Shl, Cond.getOperand(1), ISD::SETLT);
45253       return DAG.getSelect(DL, VT, NewCond, RHS, LHS);
45254     }
45255   }
45256 
45257   return SDValue();
45258 }
45259 
45260 /// Combine:
45261 ///   (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
45262 /// to:
45263 ///   (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
45264 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
45265 /// Note that this is only legal for some op/cc combinations.
45266 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
45267                                        SelectionDAG &DAG,
45268                                        const X86Subtarget &Subtarget) {
45269   // This combine only operates on CMP-like nodes.
45270   if (!(Cmp.getOpcode() == X86ISD::CMP ||
45271         (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
45272     return SDValue();
45273 
45274   // Can't replace the cmp if it has more uses than the one we're looking at.
45275   // FIXME: We would like to be able to handle this, but would need to make sure
45276   // all uses were updated.
45277   if (!Cmp.hasOneUse())
45278     return SDValue();
45279 
45280   // This only applies to variations of the common case:
45281   //   (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
45282   //   (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
45283   //   (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
45284   //   (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
45285   // Using the proper condcodes (see below), overflow is checked for.
45286 
45287   // FIXME: We can generalize both constraints:
45288   // - XOR/OR/AND (if they were made to survive AtomicExpand)
45289   // - LHS != 1
45290   // if the result is compared.
45291 
45292   SDValue CmpLHS = Cmp.getOperand(0);
45293   SDValue CmpRHS = Cmp.getOperand(1);
45294   EVT CmpVT = CmpLHS.getValueType();
45295 
45296   if (!CmpLHS.hasOneUse())
45297     return SDValue();
45298 
45299   unsigned Opc = CmpLHS.getOpcode();
45300   if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
45301     return SDValue();
45302 
45303   SDValue OpRHS = CmpLHS.getOperand(2);
45304   auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
45305   if (!OpRHSC)
45306     return SDValue();
45307 
45308   APInt Addend = OpRHSC->getAPIntValue();
45309   if (Opc == ISD::ATOMIC_LOAD_SUB)
45310     Addend = -Addend;
45311 
45312   auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
45313   if (!CmpRHSC)
45314     return SDValue();
45315 
45316   APInt Comparison = CmpRHSC->getAPIntValue();
45317   APInt NegAddend = -Addend;
45318 
45319   // See if we can adjust the CC to make the comparison match the negated
45320   // addend.
45321   if (Comparison != NegAddend) {
45322     APInt IncComparison = Comparison + 1;
45323     if (IncComparison == NegAddend) {
45324       if (CC == X86::COND_A && !Comparison.isMaxValue()) {
45325         Comparison = IncComparison;
45326         CC = X86::COND_AE;
45327       } else if (CC == X86::COND_LE && !Comparison.isMaxSignedValue()) {
45328         Comparison = IncComparison;
45329         CC = X86::COND_L;
45330       }
45331     }
45332     APInt DecComparison = Comparison - 1;
45333     if (DecComparison == NegAddend) {
45334       if (CC == X86::COND_AE && !Comparison.isMinValue()) {
45335         Comparison = DecComparison;
45336         CC = X86::COND_A;
45337       } else if (CC == X86::COND_L && !Comparison.isMinSignedValue()) {
45338         Comparison = DecComparison;
45339         CC = X86::COND_LE;
45340       }
45341     }
45342   }
45343 
45344   // If the addend is the negation of the comparison value, then we can do
45345   // a full comparison by emitting the atomic arithmetic as a locked sub.
45346   if (Comparison == NegAddend) {
45347     // The CC is fine, but we need to rewrite the LHS of the comparison as an
45348     // atomic sub.
45349     auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
45350     auto AtomicSub = DAG.getAtomic(
45351         ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpVT,
45352         /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
45353         /*RHS*/ DAG.getConstant(NegAddend, SDLoc(CmpRHS), CmpVT),
45354         AN->getMemOperand());
45355     auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
45356     DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
45357     DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
45358     return LockOp;
45359   }
45360 
45361   // We can handle comparisons with zero in a number of cases by manipulating
45362   // the CC used.
45363   if (!Comparison.isZero())
45364     return SDValue();
45365 
45366   if (CC == X86::COND_S && Addend == 1)
45367     CC = X86::COND_LE;
45368   else if (CC == X86::COND_NS && Addend == 1)
45369     CC = X86::COND_G;
45370   else if (CC == X86::COND_G && Addend == -1)
45371     CC = X86::COND_GE;
45372   else if (CC == X86::COND_LE && Addend == -1)
45373     CC = X86::COND_L;
45374   else
45375     return SDValue();
45376 
45377   SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
45378   DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
45379   DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
45380   return LockOp;
45381 }
45382 
45383 // Check whether a boolean test is testing a boolean value generated by
45384 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
45385 // code.
45386 //
45387 // Simplify the following patterns:
45388 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
45389 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
45390 // to (Op EFLAGS Cond)
45391 //
45392 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
45393 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
45394 // to (Op EFLAGS !Cond)
45395 //
45396 // where Op could be BRCOND or CMOV.
45397 //
45398 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
45399   // This combine only operates on CMP-like nodes.
45400   if (!(Cmp.getOpcode() == X86ISD::CMP ||
45401         (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
45402     return SDValue();
45403 
45404   // Quit if not used as a boolean value.
45405   if (CC != X86::COND_E && CC != X86::COND_NE)
45406     return SDValue();
45407 
45408   // Check CMP operands. One of them should be 0 or 1 and the other should be
45409   // an SetCC or extended from it.
45410   SDValue Op1 = Cmp.getOperand(0);
45411   SDValue Op2 = Cmp.getOperand(1);
45412 
45413   SDValue SetCC;
45414   const ConstantSDNode* C = nullptr;
45415   bool needOppositeCond = (CC == X86::COND_E);
45416   bool checkAgainstTrue = false; // Is it a comparison against 1?
45417 
45418   if ((C = dyn_cast<ConstantSDNode>(Op1)))
45419     SetCC = Op2;
45420   else if ((C = dyn_cast<ConstantSDNode>(Op2)))
45421     SetCC = Op1;
45422   else // Quit if all operands are not constants.
45423     return SDValue();
45424 
45425   if (C->getZExtValue() == 1) {
45426     needOppositeCond = !needOppositeCond;
45427     checkAgainstTrue = true;
45428   } else if (C->getZExtValue() != 0)
45429     // Quit if the constant is neither 0 or 1.
45430     return SDValue();
45431 
45432   bool truncatedToBoolWithAnd = false;
45433   // Skip (zext $x), (trunc $x), or (and $x, 1) node.
45434   while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
45435          SetCC.getOpcode() == ISD::TRUNCATE ||
45436          SetCC.getOpcode() == ISD::AND) {
45437     if (SetCC.getOpcode() == ISD::AND) {
45438       int OpIdx = -1;
45439       if (isOneConstant(SetCC.getOperand(0)))
45440         OpIdx = 1;
45441       if (isOneConstant(SetCC.getOperand(1)))
45442         OpIdx = 0;
45443       if (OpIdx < 0)
45444         break;
45445       SetCC = SetCC.getOperand(OpIdx);
45446       truncatedToBoolWithAnd = true;
45447     } else
45448       SetCC = SetCC.getOperand(0);
45449   }
45450 
45451   switch (SetCC.getOpcode()) {
45452   case X86ISD::SETCC_CARRY:
45453     // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
45454     // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
45455     // i.e. it's a comparison against true but the result of SETCC_CARRY is not
45456     // truncated to i1 using 'and'.
45457     if (checkAgainstTrue && !truncatedToBoolWithAnd)
45458       break;
45459     assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
45460            "Invalid use of SETCC_CARRY!");
45461     [[fallthrough]];
45462   case X86ISD::SETCC:
45463     // Set the condition code or opposite one if necessary.
45464     CC = X86::CondCode(SetCC.getConstantOperandVal(0));
45465     if (needOppositeCond)
45466       CC = X86::GetOppositeBranchCondition(CC);
45467     return SetCC.getOperand(1);
45468   case X86ISD::CMOV: {
45469     // Check whether false/true value has canonical one, i.e. 0 or 1.
45470     ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
45471     ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
45472     // Quit if true value is not a constant.
45473     if (!TVal)
45474       return SDValue();
45475     // Quit if false value is not a constant.
45476     if (!FVal) {
45477       SDValue Op = SetCC.getOperand(0);
45478       // Skip 'zext' or 'trunc' node.
45479       if (Op.getOpcode() == ISD::ZERO_EXTEND ||
45480           Op.getOpcode() == ISD::TRUNCATE)
45481         Op = Op.getOperand(0);
45482       // A special case for rdrand/rdseed, where 0 is set if false cond is
45483       // found.
45484       if ((Op.getOpcode() != X86ISD::RDRAND &&
45485            Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
45486         return SDValue();
45487     }
45488     // Quit if false value is not the constant 0 or 1.
45489     bool FValIsFalse = true;
45490     if (FVal && FVal->getZExtValue() != 0) {
45491       if (FVal->getZExtValue() != 1)
45492         return SDValue();
45493       // If FVal is 1, opposite cond is needed.
45494       needOppositeCond = !needOppositeCond;
45495       FValIsFalse = false;
45496     }
45497     // Quit if TVal is not the constant opposite of FVal.
45498     if (FValIsFalse && TVal->getZExtValue() != 1)
45499       return SDValue();
45500     if (!FValIsFalse && TVal->getZExtValue() != 0)
45501       return SDValue();
45502     CC = X86::CondCode(SetCC.getConstantOperandVal(2));
45503     if (needOppositeCond)
45504       CC = X86::GetOppositeBranchCondition(CC);
45505     return SetCC.getOperand(3);
45506   }
45507   }
45508 
45509   return SDValue();
45510 }
45511 
45512 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
45513 /// Match:
45514 ///   (X86or (X86setcc) (X86setcc))
45515 ///   (X86cmp (and (X86setcc) (X86setcc)), 0)
45516 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
45517                                            X86::CondCode &CC1, SDValue &Flags,
45518                                            bool &isAnd) {
45519   if (Cond->getOpcode() == X86ISD::CMP) {
45520     if (!isNullConstant(Cond->getOperand(1)))
45521       return false;
45522 
45523     Cond = Cond->getOperand(0);
45524   }
45525 
45526   isAnd = false;
45527 
45528   SDValue SetCC0, SetCC1;
45529   switch (Cond->getOpcode()) {
45530   default: return false;
45531   case ISD::AND:
45532   case X86ISD::AND:
45533     isAnd = true;
45534     [[fallthrough]];
45535   case ISD::OR:
45536   case X86ISD::OR:
45537     SetCC0 = Cond->getOperand(0);
45538     SetCC1 = Cond->getOperand(1);
45539     break;
45540   };
45541 
45542   // Make sure we have SETCC nodes, using the same flags value.
45543   if (SetCC0.getOpcode() != X86ISD::SETCC ||
45544       SetCC1.getOpcode() != X86ISD::SETCC ||
45545       SetCC0->getOperand(1) != SetCC1->getOperand(1))
45546     return false;
45547 
45548   CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
45549   CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
45550   Flags = SetCC0->getOperand(1);
45551   return true;
45552 }
45553 
45554 // When legalizing carry, we create carries via add X, -1
45555 // If that comes from an actual carry, via setcc, we use the
45556 // carry directly.
45557 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
45558   if (EFLAGS.getOpcode() == X86ISD::ADD) {
45559     if (isAllOnesConstant(EFLAGS.getOperand(1))) {
45560       bool FoundAndLSB = false;
45561       SDValue Carry = EFLAGS.getOperand(0);
45562       while (Carry.getOpcode() == ISD::TRUNCATE ||
45563              Carry.getOpcode() == ISD::ZERO_EXTEND ||
45564              (Carry.getOpcode() == ISD::AND &&
45565               isOneConstant(Carry.getOperand(1)))) {
45566         FoundAndLSB |= Carry.getOpcode() == ISD::AND;
45567         Carry = Carry.getOperand(0);
45568       }
45569       if (Carry.getOpcode() == X86ISD::SETCC ||
45570           Carry.getOpcode() == X86ISD::SETCC_CARRY) {
45571         // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
45572         uint64_t CarryCC = Carry.getConstantOperandVal(0);
45573         SDValue CarryOp1 = Carry.getOperand(1);
45574         if (CarryCC == X86::COND_B)
45575           return CarryOp1;
45576         if (CarryCC == X86::COND_A) {
45577           // Try to convert COND_A into COND_B in an attempt to facilitate
45578           // materializing "setb reg".
45579           //
45580           // Do not flip "e > c", where "c" is a constant, because Cmp
45581           // instruction cannot take an immediate as its first operand.
45582           //
45583           if (CarryOp1.getOpcode() == X86ISD::SUB &&
45584               CarryOp1.getNode()->hasOneUse() &&
45585               CarryOp1.getValueType().isInteger() &&
45586               !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
45587             SDValue SubCommute =
45588                 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
45589                             CarryOp1.getOperand(1), CarryOp1.getOperand(0));
45590             return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
45591           }
45592         }
45593         // If this is a check of the z flag of an add with 1, switch to the
45594         // C flag.
45595         if (CarryCC == X86::COND_E &&
45596             CarryOp1.getOpcode() == X86ISD::ADD &&
45597             isOneConstant(CarryOp1.getOperand(1)))
45598           return CarryOp1;
45599       } else if (FoundAndLSB) {
45600         SDLoc DL(Carry);
45601         SDValue BitNo = DAG.getConstant(0, DL, Carry.getValueType());
45602         if (Carry.getOpcode() == ISD::SRL) {
45603           BitNo = Carry.getOperand(1);
45604           Carry = Carry.getOperand(0);
45605         }
45606         return getBT(Carry, BitNo, DL, DAG);
45607       }
45608     }
45609   }
45610 
45611   return SDValue();
45612 }
45613 
45614 /// If we are inverting an PTEST/TESTP operand, attempt to adjust the CC
45615 /// to avoid the inversion.
45616 static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
45617                               SelectionDAG &DAG,
45618                               const X86Subtarget &Subtarget) {
45619   // TODO: Handle X86ISD::KTEST/X86ISD::KORTEST.
45620   if (EFLAGS.getOpcode() != X86ISD::PTEST &&
45621       EFLAGS.getOpcode() != X86ISD::TESTP)
45622     return SDValue();
45623 
45624   // PTEST/TESTP sets EFLAGS as:
45625   // TESTZ: ZF = (Op0 & Op1) == 0
45626   // TESTC: CF = (~Op0 & Op1) == 0
45627   // TESTNZC: ZF == 0 && CF == 0
45628   MVT VT = EFLAGS.getSimpleValueType();
45629   SDValue Op0 = EFLAGS.getOperand(0);
45630   SDValue Op1 = EFLAGS.getOperand(1);
45631   MVT OpVT = Op0.getSimpleValueType();
45632 
45633   // TEST*(~X,Y) == TEST*(X,Y)
45634   if (SDValue NotOp0 = IsNOT(Op0, DAG)) {
45635     X86::CondCode InvCC;
45636     switch (CC) {
45637     case X86::COND_B:
45638       // testc -> testz.
45639       InvCC = X86::COND_E;
45640       break;
45641     case X86::COND_AE:
45642       // !testc -> !testz.
45643       InvCC = X86::COND_NE;
45644       break;
45645     case X86::COND_E:
45646       // testz -> testc.
45647       InvCC = X86::COND_B;
45648       break;
45649     case X86::COND_NE:
45650       // !testz -> !testc.
45651       InvCC = X86::COND_AE;
45652       break;
45653     case X86::COND_A:
45654     case X86::COND_BE:
45655       // testnzc -> testnzc (no change).
45656       InvCC = CC;
45657       break;
45658     default:
45659       InvCC = X86::COND_INVALID;
45660       break;
45661     }
45662 
45663     if (InvCC != X86::COND_INVALID) {
45664       CC = InvCC;
45665       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45666                          DAG.getBitcast(OpVT, NotOp0), Op1);
45667     }
45668   }
45669 
45670   if (CC == X86::COND_B || CC == X86::COND_AE) {
45671     // TESTC(X,~X) == TESTC(X,-1)
45672     if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
45673       if (peekThroughBitcasts(NotOp1) == peekThroughBitcasts(Op0)) {
45674         SDLoc DL(EFLAGS);
45675         return DAG.getNode(
45676             EFLAGS.getOpcode(), DL, VT, DAG.getBitcast(OpVT, NotOp1),
45677             DAG.getBitcast(OpVT,
45678                            DAG.getAllOnesConstant(DL, NotOp1.getValueType())));
45679       }
45680     }
45681   }
45682 
45683   if (CC == X86::COND_E || CC == X86::COND_NE) {
45684     // TESTZ(X,~Y) == TESTC(Y,X)
45685     if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
45686       CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
45687       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45688                          DAG.getBitcast(OpVT, NotOp1), Op0);
45689     }
45690 
45691     if (Op0 == Op1) {
45692       SDValue BC = peekThroughBitcasts(Op0);
45693       EVT BCVT = BC.getValueType();
45694 
45695       // TESTZ(AND(X,Y),AND(X,Y)) == TESTZ(X,Y)
45696       if (BC.getOpcode() == ISD::AND || BC.getOpcode() == X86ISD::FAND) {
45697         return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45698                            DAG.getBitcast(OpVT, BC.getOperand(0)),
45699                            DAG.getBitcast(OpVT, BC.getOperand(1)));
45700       }
45701 
45702       // TESTZ(AND(~X,Y),AND(~X,Y)) == TESTC(X,Y)
45703       if (BC.getOpcode() == X86ISD::ANDNP || BC.getOpcode() == X86ISD::FANDN) {
45704         CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
45705         return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45706                            DAG.getBitcast(OpVT, BC.getOperand(0)),
45707                            DAG.getBitcast(OpVT, BC.getOperand(1)));
45708       }
45709 
45710       // If every element is an all-sign value, see if we can use TESTP/MOVMSK
45711       // to more efficiently extract the sign bits and compare that.
45712       // TODO: Handle TESTC with comparison inversion.
45713       // TODO: Can we remove SimplifyMultipleUseDemandedBits and rely on
45714       // TESTP/MOVMSK combines to make sure its never worse than PTEST?
45715       if (BCVT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(BCVT)) {
45716         unsigned EltBits = BCVT.getScalarSizeInBits();
45717         if (DAG.ComputeNumSignBits(BC) == EltBits) {
45718           assert(VT == MVT::i32 && "Expected i32 EFLAGS comparison result");
45719           APInt SignMask = APInt::getSignMask(EltBits);
45720           const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45721           if (SDValue Res =
45722                   TLI.SimplifyMultipleUseDemandedBits(BC, SignMask, DAG)) {
45723             // For vXi16 cases we need to use pmovmksb and extract every other
45724             // sign bit.
45725             SDLoc DL(EFLAGS);
45726             if ((EltBits == 32 || EltBits == 64) && Subtarget.hasAVX()) {
45727               MVT FloatSVT = MVT::getFloatingPointVT(EltBits);
45728               MVT FloatVT =
45729                   MVT::getVectorVT(FloatSVT, OpVT.getSizeInBits() / EltBits);
45730               Res = DAG.getBitcast(FloatVT, Res);
45731               return DAG.getNode(X86ISD::TESTP, SDLoc(EFLAGS), VT, Res, Res);
45732             } else if (EltBits == 16) {
45733               MVT MovmskVT = BCVT.is128BitVector() ? MVT::v16i8 : MVT::v32i8;
45734               Res = DAG.getBitcast(MovmskVT, Res);
45735               Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
45736               Res = DAG.getNode(ISD::AND, DL, MVT::i32, Res,
45737                                 DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
45738             } else {
45739               Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
45740             }
45741             return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Res,
45742                                DAG.getConstant(0, DL, MVT::i32));
45743           }
45744         }
45745       }
45746     }
45747 
45748     // TESTZ(-1,X) == TESTZ(X,X)
45749     if (ISD::isBuildVectorAllOnes(Op0.getNode()))
45750       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op1, Op1);
45751 
45752     // TESTZ(X,-1) == TESTZ(X,X)
45753     if (ISD::isBuildVectorAllOnes(Op1.getNode()))
45754       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op0, Op0);
45755 
45756     // TESTZ(OR(LO(X),HI(X)),OR(LO(Y),HI(Y))) -> TESTZ(X,Y)
45757     // TODO: Add COND_NE handling?
45758     if (CC == X86::COND_E && OpVT.is128BitVector() && Subtarget.hasAVX()) {
45759       SDValue Src0 = peekThroughBitcasts(Op0);
45760       SDValue Src1 = peekThroughBitcasts(Op1);
45761       if (Src0.getOpcode() == ISD::OR && Src1.getOpcode() == ISD::OR) {
45762         Src0 = getSplitVectorSrc(peekThroughBitcasts(Src0.getOperand(0)),
45763                                  peekThroughBitcasts(Src0.getOperand(1)), true);
45764         Src1 = getSplitVectorSrc(peekThroughBitcasts(Src1.getOperand(0)),
45765                                  peekThroughBitcasts(Src1.getOperand(1)), true);
45766         if (Src0 && Src1) {
45767           MVT OpVT2 = OpVT.getDoubleNumVectorElementsVT();
45768           return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45769                              DAG.getBitcast(OpVT2, Src0),
45770                              DAG.getBitcast(OpVT2, Src1));
45771         }
45772       }
45773     }
45774   }
45775 
45776   return SDValue();
45777 }
45778 
45779 // Attempt to simplify the MOVMSK input based on the comparison type.
45780 static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
45781                                   SelectionDAG &DAG,
45782                                   const X86Subtarget &Subtarget) {
45783   // Handle eq/ne against zero (any_of).
45784   // Handle eq/ne against -1 (all_of).
45785   if (!(CC == X86::COND_E || CC == X86::COND_NE))
45786     return SDValue();
45787   if (EFLAGS.getValueType() != MVT::i32)
45788     return SDValue();
45789   unsigned CmpOpcode = EFLAGS.getOpcode();
45790   if (CmpOpcode != X86ISD::CMP && CmpOpcode != X86ISD::SUB)
45791     return SDValue();
45792   auto *CmpConstant = dyn_cast<ConstantSDNode>(EFLAGS.getOperand(1));
45793   if (!CmpConstant)
45794     return SDValue();
45795   const APInt &CmpVal = CmpConstant->getAPIntValue();
45796 
45797   SDValue CmpOp = EFLAGS.getOperand(0);
45798   unsigned CmpBits = CmpOp.getValueSizeInBits();
45799   assert(CmpBits == CmpVal.getBitWidth() && "Value size mismatch");
45800 
45801   // Peek through any truncate.
45802   if (CmpOp.getOpcode() == ISD::TRUNCATE)
45803     CmpOp = CmpOp.getOperand(0);
45804 
45805   // Bail if we don't find a MOVMSK.
45806   if (CmpOp.getOpcode() != X86ISD::MOVMSK)
45807     return SDValue();
45808 
45809   SDValue Vec = CmpOp.getOperand(0);
45810   MVT VecVT = Vec.getSimpleValueType();
45811   assert((VecVT.is128BitVector() || VecVT.is256BitVector()) &&
45812          "Unexpected MOVMSK operand");
45813   unsigned NumElts = VecVT.getVectorNumElements();
45814   unsigned NumEltBits = VecVT.getScalarSizeInBits();
45815 
45816   bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isZero();
45817   bool IsAllOf = (CmpOpcode == X86ISD::SUB || CmpOpcode == X86ISD::CMP) &&
45818                  NumElts <= CmpBits && CmpVal.isMask(NumElts);
45819   if (!IsAnyOf && !IsAllOf)
45820     return SDValue();
45821 
45822   // TODO: Check more combining cases for me.
45823   // Here we check the cmp use number to decide do combining or not.
45824   // Currently we only get 2 tests about combining "MOVMSK(CONCAT(..))"
45825   // and "MOVMSK(PCMPEQ(..))" are fit to use this constraint.
45826   bool IsOneUse = CmpOp.getNode()->hasOneUse();
45827 
45828   // See if we can peek through to a vector with a wider element type, if the
45829   // signbits extend down to all the sub-elements as well.
45830   // Calling MOVMSK with the wider type, avoiding the bitcast, helps expose
45831   // potential SimplifyDemandedBits/Elts cases.
45832   // If we looked through a truncate that discard bits, we can't do this
45833   // transform.
45834   // FIXME: We could do this transform for truncates that discarded bits by
45835   // inserting an AND mask between the new MOVMSK and the CMP.
45836   if (Vec.getOpcode() == ISD::BITCAST && NumElts <= CmpBits) {
45837     SDValue BC = peekThroughBitcasts(Vec);
45838     MVT BCVT = BC.getSimpleValueType();
45839     unsigned BCNumElts = BCVT.getVectorNumElements();
45840     unsigned BCNumEltBits = BCVT.getScalarSizeInBits();
45841     if ((BCNumEltBits == 32 || BCNumEltBits == 64) &&
45842         BCNumEltBits > NumEltBits &&
45843         DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) {
45844       SDLoc DL(EFLAGS);
45845       APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : BCNumElts);
45846       return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
45847                          DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC),
45848                          DAG.getConstant(CmpMask, DL, MVT::i32));
45849     }
45850   }
45851 
45852   // MOVMSK(CONCAT(X,Y)) == 0 ->  MOVMSK(OR(X,Y)).
45853   // MOVMSK(CONCAT(X,Y)) != 0 ->  MOVMSK(OR(X,Y)).
45854   // MOVMSK(CONCAT(X,Y)) == -1 ->  MOVMSK(AND(X,Y)).
45855   // MOVMSK(CONCAT(X,Y)) != -1 ->  MOVMSK(AND(X,Y)).
45856   if (VecVT.is256BitVector() && NumElts <= CmpBits && IsOneUse) {
45857     SmallVector<SDValue> Ops;
45858     if (collectConcatOps(peekThroughBitcasts(Vec).getNode(), Ops, DAG) &&
45859         Ops.size() == 2) {
45860       SDLoc DL(EFLAGS);
45861       EVT SubVT = Ops[0].getValueType().changeTypeToInteger();
45862       APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : NumElts / 2);
45863       SDValue V = DAG.getNode(IsAnyOf ? ISD::OR : ISD::AND, DL, SubVT,
45864                               DAG.getBitcast(SubVT, Ops[0]),
45865                               DAG.getBitcast(SubVT, Ops[1]));
45866       V = DAG.getBitcast(VecVT.getHalfNumVectorElementsVT(), V);
45867       return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
45868                          DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V),
45869                          DAG.getConstant(CmpMask, DL, MVT::i32));
45870     }
45871   }
45872 
45873   // MOVMSK(PCMPEQ(X,0)) == -1 -> PTESTZ(X,X).
45874   // MOVMSK(PCMPEQ(X,0)) != -1 -> !PTESTZ(X,X).
45875   // MOVMSK(PCMPEQ(X,Y)) == -1 -> PTESTZ(XOR(X,Y),XOR(X,Y)).
45876   // MOVMSK(PCMPEQ(X,Y)) != -1 -> !PTESTZ(XOR(X,Y),XOR(X,Y)).
45877   if (IsAllOf && Subtarget.hasSSE41() && IsOneUse) {
45878     MVT TestVT = VecVT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
45879     SDValue BC = peekThroughBitcasts(Vec);
45880     // Ensure MOVMSK was testing every signbit of BC.
45881     if (BC.getValueType().getVectorNumElements() <= NumElts) {
45882       if (BC.getOpcode() == X86ISD::PCMPEQ) {
45883         SDValue V = DAG.getNode(ISD::XOR, SDLoc(BC), BC.getValueType(),
45884                                 BC.getOperand(0), BC.getOperand(1));
45885         V = DAG.getBitcast(TestVT, V);
45886         return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
45887       }
45888       // Check for 256-bit split vector cases.
45889       if (BC.getOpcode() == ISD::AND &&
45890           BC.getOperand(0).getOpcode() == X86ISD::PCMPEQ &&
45891           BC.getOperand(1).getOpcode() == X86ISD::PCMPEQ) {
45892         SDValue LHS = BC.getOperand(0);
45893         SDValue RHS = BC.getOperand(1);
45894         LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), LHS.getValueType(),
45895                           LHS.getOperand(0), LHS.getOperand(1));
45896         RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), RHS.getValueType(),
45897                           RHS.getOperand(0), RHS.getOperand(1));
45898         LHS = DAG.getBitcast(TestVT, LHS);
45899         RHS = DAG.getBitcast(TestVT, RHS);
45900         SDValue V = DAG.getNode(ISD::OR, SDLoc(EFLAGS), TestVT, LHS, RHS);
45901         return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
45902       }
45903     }
45904   }
45905 
45906   // See if we can avoid a PACKSS by calling MOVMSK on the sources.
45907   // For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out
45908   // sign bits prior to the comparison with zero unless we know that
45909   // the vXi16 splats the sign bit down to the lower i8 half.
45910   // TODO: Handle all_of patterns.
45911   if (Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) {
45912     SDValue VecOp0 = Vec.getOperand(0);
45913     SDValue VecOp1 = Vec.getOperand(1);
45914     bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8;
45915     bool SignExt1 = DAG.ComputeNumSignBits(VecOp1) > 8;
45916     // PMOVMSKB(PACKSSBW(X, undef)) -> PMOVMSKB(BITCAST_v16i8(X)) & 0xAAAA.
45917     if (IsAnyOf && CmpBits == 8 && VecOp1.isUndef()) {
45918       SDLoc DL(EFLAGS);
45919       SDValue Result = DAG.getBitcast(MVT::v16i8, VecOp0);
45920       Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
45921       Result = DAG.getZExtOrTrunc(Result, DL, MVT::i16);
45922       if (!SignExt0) {
45923         Result = DAG.getNode(ISD::AND, DL, MVT::i16, Result,
45924                              DAG.getConstant(0xAAAA, DL, MVT::i16));
45925       }
45926       return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
45927                          DAG.getConstant(0, DL, MVT::i16));
45928     }
45929     // PMOVMSKB(PACKSSBW(LO(X), HI(X)))
45930     // -> PMOVMSKB(BITCAST_v32i8(X)) & 0xAAAAAAAA.
45931     if (CmpBits >= 16 && Subtarget.hasInt256() &&
45932         (IsAnyOf || (SignExt0 && SignExt1))) {
45933       if (SDValue Src = getSplitVectorSrc(VecOp0, VecOp1, true)) {
45934         SDLoc DL(EFLAGS);
45935         SDValue Result = peekThroughBitcasts(Src);
45936         if (IsAllOf && Result.getOpcode() == X86ISD::PCMPEQ &&
45937             Result.getValueType().getVectorNumElements() <= NumElts) {
45938           SDValue V = DAG.getNode(ISD::XOR, DL, Result.getValueType(),
45939                                   Result.getOperand(0), Result.getOperand(1));
45940           V = DAG.getBitcast(MVT::v4i64, V);
45941           return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
45942         }
45943         Result = DAG.getBitcast(MVT::v32i8, Result);
45944         Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
45945         unsigned CmpMask = IsAnyOf ? 0 : 0xFFFFFFFF;
45946         if (!SignExt0 || !SignExt1) {
45947           assert(IsAnyOf &&
45948                  "Only perform v16i16 signmasks for any_of patterns");
45949           Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
45950                                DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
45951         }
45952         return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
45953                            DAG.getConstant(CmpMask, DL, MVT::i32));
45954       }
45955     }
45956   }
45957 
45958   // MOVMSK(SHUFFLE(X,u)) -> MOVMSK(X) iff every element is referenced.
45959   // Since we peek through a bitcast, we need to be careful if the base vector
45960   // type has smaller elements than the MOVMSK type.  In that case, even if
45961   // all the elements are demanded by the shuffle mask, only the "high"
45962   // elements which have highbits that align with highbits in the MOVMSK vec
45963   // elements are actually demanded. A simplification of spurious operations
45964   // on the "low" elements take place during other simplifications.
45965   //
45966   // For example:
45967   // MOVMSK64(BITCAST(SHUF32 X, (1,0,3,2))) even though all the elements are
45968   // demanded, because we are swapping around the result can change.
45969   //
45970   // To address this, we check that we can scale the shuffle mask to MOVMSK
45971   // element width (this will ensure "high" elements match). Its slightly overly
45972   // conservative, but fine for an edge case fold.
45973   SmallVector<int, 32> ShuffleMask, ScaledMaskUnused;
45974   SmallVector<SDValue, 2> ShuffleInputs;
45975   if (NumElts <= CmpBits &&
45976       getTargetShuffleInputs(peekThroughBitcasts(Vec), ShuffleInputs,
45977                              ShuffleMask, DAG) &&
45978       ShuffleInputs.size() == 1 && !isAnyZeroOrUndef(ShuffleMask) &&
45979       ShuffleInputs[0].getValueSizeInBits() == VecVT.getSizeInBits() &&
45980       scaleShuffleElements(ShuffleMask, NumElts, ScaledMaskUnused)) {
45981     unsigned NumShuffleElts = ShuffleMask.size();
45982     APInt DemandedElts = APInt::getZero(NumShuffleElts);
45983     for (int M : ShuffleMask) {
45984       assert(0 <= M && M < (int)NumShuffleElts && "Bad unary shuffle index");
45985       DemandedElts.setBit(M);
45986     }
45987     if (DemandedElts.isAllOnes()) {
45988       SDLoc DL(EFLAGS);
45989       SDValue Result = DAG.getBitcast(VecVT, ShuffleInputs[0]);
45990       Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
45991       Result =
45992           DAG.getZExtOrTrunc(Result, DL, EFLAGS.getOperand(0).getValueType());
45993       return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
45994                          EFLAGS.getOperand(1));
45995     }
45996   }
45997 
45998   // MOVMSKPS(V) !=/== 0 -> TESTPS(V,V)
45999   // MOVMSKPD(V) !=/== 0 -> TESTPD(V,V)
46000   // MOVMSKPS(V) !=/== -1 -> TESTPS(V,V)
46001   // MOVMSKPD(V) !=/== -1 -> TESTPD(V,V)
46002   // iff every element is referenced.
46003   if (NumElts <= CmpBits && Subtarget.hasAVX() &&
46004       !Subtarget.preferMovmskOverVTest() && IsOneUse &&
46005       (NumEltBits == 32 || NumEltBits == 64)) {
46006     SDLoc DL(EFLAGS);
46007     MVT FloatSVT = MVT::getFloatingPointVT(NumEltBits);
46008     MVT FloatVT = MVT::getVectorVT(FloatSVT, NumElts);
46009     MVT IntVT = FloatVT.changeVectorElementTypeToInteger();
46010     SDValue LHS = Vec;
46011     SDValue RHS = IsAnyOf ? Vec : DAG.getAllOnesConstant(DL, IntVT);
46012     CC = IsAnyOf ? CC : (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
46013     return DAG.getNode(X86ISD::TESTP, DL, MVT::i32,
46014                        DAG.getBitcast(FloatVT, LHS),
46015                        DAG.getBitcast(FloatVT, RHS));
46016   }
46017 
46018   return SDValue();
46019 }
46020 
46021 /// Optimize an EFLAGS definition used according to the condition code \p CC
46022 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
46023 /// uses of chain values.
46024 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
46025                                   SelectionDAG &DAG,
46026                                   const X86Subtarget &Subtarget) {
46027   if (CC == X86::COND_B)
46028     if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
46029       return Flags;
46030 
46031   if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
46032     return R;
46033 
46034   if (SDValue R = combinePTESTCC(EFLAGS, CC, DAG, Subtarget))
46035     return R;
46036 
46037   if (SDValue R = combineSetCCMOVMSK(EFLAGS, CC, DAG, Subtarget))
46038     return R;
46039 
46040   return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
46041 }
46042 
46043 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
46044 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
46045                            TargetLowering::DAGCombinerInfo &DCI,
46046                            const X86Subtarget &Subtarget) {
46047   SDLoc DL(N);
46048 
46049   SDValue FalseOp = N->getOperand(0);
46050   SDValue TrueOp = N->getOperand(1);
46051   X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
46052   SDValue Cond = N->getOperand(3);
46053 
46054   // cmov X, X, ?, ? --> X
46055   if (TrueOp == FalseOp)
46056     return TrueOp;
46057 
46058   // Try to simplify the EFLAGS and condition code operands.
46059   // We can't always do this as FCMOV only supports a subset of X86 cond.
46060   if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
46061     if (!(FalseOp.getValueType() == MVT::f80 ||
46062           (FalseOp.getValueType() == MVT::f64 && !Subtarget.hasSSE2()) ||
46063           (FalseOp.getValueType() == MVT::f32 && !Subtarget.hasSSE1())) ||
46064         !Subtarget.canUseCMOV() || hasFPCMov(CC)) {
46065       SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
46066                        Flags};
46067       return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
46068     }
46069   }
46070 
46071   // If this is a select between two integer constants, try to do some
46072   // optimizations.  Note that the operands are ordered the opposite of SELECT
46073   // operands.
46074   if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
46075     if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
46076       // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
46077       // larger than FalseC (the false value).
46078       if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
46079         CC = X86::GetOppositeBranchCondition(CC);
46080         std::swap(TrueC, FalseC);
46081         std::swap(TrueOp, FalseOp);
46082       }
46083 
46084       // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3.  Likewise for any pow2/0.
46085       // This is efficient for any integer data type (including i8/i16) and
46086       // shift amount.
46087       if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
46088         Cond = getSETCC(CC, Cond, DL, DAG);
46089 
46090         // Zero extend the condition if needed.
46091         Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
46092 
46093         unsigned ShAmt = TrueC->getAPIntValue().logBase2();
46094         Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
46095                            DAG.getConstant(ShAmt, DL, MVT::i8));
46096         return Cond;
46097       }
46098 
46099       // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.  This is efficient
46100       // for any integer data type, including i8/i16.
46101       if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
46102         Cond = getSETCC(CC, Cond, DL, DAG);
46103 
46104         // Zero extend the condition if needed.
46105         Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
46106                            FalseC->getValueType(0), Cond);
46107         Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
46108                            SDValue(FalseC, 0));
46109         return Cond;
46110       }
46111 
46112       // Optimize cases that will turn into an LEA instruction.  This requires
46113       // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
46114       if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
46115         APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
46116         assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
46117                "Implicit constant truncation");
46118 
46119         bool isFastMultiplier = false;
46120         if (Diff.ult(10)) {
46121           switch (Diff.getZExtValue()) {
46122           default: break;
46123           case 1:  // result = add base, cond
46124           case 2:  // result = lea base(    , cond*2)
46125           case 3:  // result = lea base(cond, cond*2)
46126           case 4:  // result = lea base(    , cond*4)
46127           case 5:  // result = lea base(cond, cond*4)
46128           case 8:  // result = lea base(    , cond*8)
46129           case 9:  // result = lea base(cond, cond*8)
46130             isFastMultiplier = true;
46131             break;
46132           }
46133         }
46134 
46135         if (isFastMultiplier) {
46136           Cond = getSETCC(CC, Cond, DL ,DAG);
46137           // Zero extend the condition if needed.
46138           Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
46139                              Cond);
46140           // Scale the condition by the difference.
46141           if (Diff != 1)
46142             Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
46143                                DAG.getConstant(Diff, DL, Cond.getValueType()));
46144 
46145           // Add the base if non-zero.
46146           if (FalseC->getAPIntValue() != 0)
46147             Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
46148                                SDValue(FalseC, 0));
46149           return Cond;
46150         }
46151       }
46152     }
46153   }
46154 
46155   // Handle these cases:
46156   //   (select (x != c), e, c) -> select (x != c), e, x),
46157   //   (select (x == c), c, e) -> select (x == c), x, e)
46158   // where the c is an integer constant, and the "select" is the combination
46159   // of CMOV and CMP.
46160   //
46161   // The rationale for this change is that the conditional-move from a constant
46162   // needs two instructions, however, conditional-move from a register needs
46163   // only one instruction.
46164   //
46165   // CAVEAT: By replacing a constant with a symbolic value, it may obscure
46166   //  some instruction-combining opportunities. This opt needs to be
46167   //  postponed as late as possible.
46168   //
46169   if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
46170     // the DCI.xxxx conditions are provided to postpone the optimization as
46171     // late as possible.
46172 
46173     ConstantSDNode *CmpAgainst = nullptr;
46174     if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
46175         (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
46176         !isa<ConstantSDNode>(Cond.getOperand(0))) {
46177 
46178       if (CC == X86::COND_NE &&
46179           CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
46180         CC = X86::GetOppositeBranchCondition(CC);
46181         std::swap(TrueOp, FalseOp);
46182       }
46183 
46184       if (CC == X86::COND_E &&
46185           CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
46186         SDValue Ops[] = {FalseOp, Cond.getOperand(0),
46187                          DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
46188         return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
46189       }
46190     }
46191   }
46192 
46193   // Transform:
46194   //
46195   //   (cmov 1 T (uge T 2))
46196   //
46197   // to:
46198   //
46199   //   (adc T 0 (sub T 1))
46200   if (CC == X86::COND_AE && isOneConstant(FalseOp) &&
46201       Cond.getOpcode() == X86ISD::SUB && Cond->hasOneUse()) {
46202     SDValue Cond0 = Cond.getOperand(0);
46203     if (Cond0.getOpcode() == ISD::TRUNCATE)
46204       Cond0 = Cond0.getOperand(0);
46205     auto *Sub1C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
46206     if (Cond0 == TrueOp && Sub1C && Sub1C->getZExtValue() == 2) {
46207       EVT CondVT = Cond->getValueType(0);
46208       EVT OuterVT = N->getValueType(0);
46209       // Subtract 1 and generate a carry.
46210       SDValue NewSub =
46211           DAG.getNode(X86ISD::SUB, DL, Cond->getVTList(), Cond.getOperand(0),
46212                       DAG.getConstant(1, DL, CondVT));
46213       SDValue EFLAGS(NewSub.getNode(), 1);
46214       return DAG.getNode(X86ISD::ADC, DL, DAG.getVTList(OuterVT, MVT::i32),
46215                          TrueOp, DAG.getConstant(0, DL, OuterVT), EFLAGS);
46216     }
46217   }
46218 
46219   // Fold and/or of setcc's to double CMOV:
46220   //   (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
46221   //   (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
46222   //
46223   // This combine lets us generate:
46224   //   cmovcc1 (jcc1 if we don't have CMOV)
46225   //   cmovcc2 (same)
46226   // instead of:
46227   //   setcc1
46228   //   setcc2
46229   //   and/or
46230   //   cmovne (jne if we don't have CMOV)
46231   // When we can't use the CMOV instruction, it might increase branch
46232   // mispredicts.
46233   // When we can use CMOV, or when there is no mispredict, this improves
46234   // throughput and reduces register pressure.
46235   //
46236   if (CC == X86::COND_NE) {
46237     SDValue Flags;
46238     X86::CondCode CC0, CC1;
46239     bool isAndSetCC;
46240     if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
46241       if (isAndSetCC) {
46242         std::swap(FalseOp, TrueOp);
46243         CC0 = X86::GetOppositeBranchCondition(CC0);
46244         CC1 = X86::GetOppositeBranchCondition(CC1);
46245       }
46246 
46247       SDValue LOps[] = {FalseOp, TrueOp,
46248                         DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
46249       SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
46250       SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
46251                        Flags};
46252       SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
46253       return CMOV;
46254     }
46255   }
46256 
46257   // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
46258   //      (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
46259   // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
46260   //    (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
46261   if ((CC == X86::COND_NE || CC == X86::COND_E) &&
46262       Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
46263     SDValue Add = TrueOp;
46264     SDValue Const = FalseOp;
46265     // Canonicalize the condition code for easier matching and output.
46266     if (CC == X86::COND_E)
46267       std::swap(Add, Const);
46268 
46269     // We might have replaced the constant in the cmov with the LHS of the
46270     // compare. If so change it to the RHS of the compare.
46271     if (Const == Cond.getOperand(0))
46272       Const = Cond.getOperand(1);
46273 
46274     // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
46275     if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
46276         Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
46277         (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
46278          Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
46279         Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
46280       EVT VT = N->getValueType(0);
46281       // This should constant fold.
46282       SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
46283       SDValue CMov =
46284           DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
46285                       DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
46286       return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
46287     }
46288   }
46289 
46290   return SDValue();
46291 }
46292 
46293 /// Different mul shrinking modes.
46294 enum class ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
46295 
46296 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
46297   EVT VT = N->getOperand(0).getValueType();
46298   if (VT.getScalarSizeInBits() != 32)
46299     return false;
46300 
46301   assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
46302   unsigned SignBits[2] = {1, 1};
46303   bool IsPositive[2] = {false, false};
46304   for (unsigned i = 0; i < 2; i++) {
46305     SDValue Opd = N->getOperand(i);
46306 
46307     SignBits[i] = DAG.ComputeNumSignBits(Opd);
46308     IsPositive[i] = DAG.SignBitIsZero(Opd);
46309   }
46310 
46311   bool AllPositive = IsPositive[0] && IsPositive[1];
46312   unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
46313   // When ranges are from -128 ~ 127, use MULS8 mode.
46314   if (MinSignBits >= 25)
46315     Mode = ShrinkMode::MULS8;
46316   // When ranges are from 0 ~ 255, use MULU8 mode.
46317   else if (AllPositive && MinSignBits >= 24)
46318     Mode = ShrinkMode::MULU8;
46319   // When ranges are from -32768 ~ 32767, use MULS16 mode.
46320   else if (MinSignBits >= 17)
46321     Mode = ShrinkMode::MULS16;
46322   // When ranges are from 0 ~ 65535, use MULU16 mode.
46323   else if (AllPositive && MinSignBits >= 16)
46324     Mode = ShrinkMode::MULU16;
46325   else
46326     return false;
46327   return true;
46328 }
46329 
46330 /// When the operands of vector mul are extended from smaller size values,
46331 /// like i8 and i16, the type of mul may be shrinked to generate more
46332 /// efficient code. Two typical patterns are handled:
46333 /// Pattern1:
46334 ///     %2 = sext/zext <N x i8> %1 to <N x i32>
46335 ///     %4 = sext/zext <N x i8> %3 to <N x i32>
46336 //   or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
46337 ///     %5 = mul <N x i32> %2, %4
46338 ///
46339 /// Pattern2:
46340 ///     %2 = zext/sext <N x i16> %1 to <N x i32>
46341 ///     %4 = zext/sext <N x i16> %3 to <N x i32>
46342 ///  or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
46343 ///     %5 = mul <N x i32> %2, %4
46344 ///
46345 /// There are four mul shrinking modes:
46346 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
46347 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
46348 /// generate pmullw+sext32 for it (MULS8 mode).
46349 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
46350 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
46351 /// generate pmullw+zext32 for it (MULU8 mode).
46352 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
46353 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
46354 /// generate pmullw+pmulhw for it (MULS16 mode).
46355 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
46356 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
46357 /// generate pmullw+pmulhuw for it (MULU16 mode).
46358 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
46359                                const X86Subtarget &Subtarget) {
46360   // Check for legality
46361   // pmullw/pmulhw are not supported by SSE.
46362   if (!Subtarget.hasSSE2())
46363     return SDValue();
46364 
46365   // Check for profitability
46366   // pmulld is supported since SSE41. It is better to use pmulld
46367   // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
46368   // the expansion.
46369   bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
46370   if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
46371     return SDValue();
46372 
46373   ShrinkMode Mode;
46374   if (!canReduceVMulWidth(N, DAG, Mode))
46375     return SDValue();
46376 
46377   SDLoc DL(N);
46378   SDValue N0 = N->getOperand(0);
46379   SDValue N1 = N->getOperand(1);
46380   EVT VT = N->getOperand(0).getValueType();
46381   unsigned NumElts = VT.getVectorNumElements();
46382   if ((NumElts % 2) != 0)
46383     return SDValue();
46384 
46385   EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
46386 
46387   // Shrink the operands of mul.
46388   SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
46389   SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
46390 
46391   // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
46392   // lower part is needed.
46393   SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
46394   if (Mode == ShrinkMode::MULU8 || Mode == ShrinkMode::MULS8)
46395     return DAG.getNode((Mode == ShrinkMode::MULU8) ? ISD::ZERO_EXTEND
46396                                                    : ISD::SIGN_EXTEND,
46397                        DL, VT, MulLo);
46398 
46399   EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts / 2);
46400   // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
46401   // the higher part is also needed.
46402   SDValue MulHi =
46403       DAG.getNode(Mode == ShrinkMode::MULS16 ? ISD::MULHS : ISD::MULHU, DL,
46404                   ReducedVT, NewN0, NewN1);
46405 
46406   // Repack the lower part and higher part result of mul into a wider
46407   // result.
46408   // Generate shuffle functioning as punpcklwd.
46409   SmallVector<int, 16> ShuffleMask(NumElts);
46410   for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
46411     ShuffleMask[2 * i] = i;
46412     ShuffleMask[2 * i + 1] = i + NumElts;
46413   }
46414   SDValue ResLo =
46415       DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
46416   ResLo = DAG.getBitcast(ResVT, ResLo);
46417   // Generate shuffle functioning as punpckhwd.
46418   for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
46419     ShuffleMask[2 * i] = i + NumElts / 2;
46420     ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
46421   }
46422   SDValue ResHi =
46423       DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
46424   ResHi = DAG.getBitcast(ResVT, ResHi);
46425   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
46426 }
46427 
46428 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
46429                                  EVT VT, const SDLoc &DL) {
46430 
46431   auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
46432     SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46433                                  DAG.getConstant(Mult, DL, VT));
46434     Result = DAG.getNode(ISD::SHL, DL, VT, Result,
46435                          DAG.getConstant(Shift, DL, MVT::i8));
46436     Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
46437                          N->getOperand(0));
46438     return Result;
46439   };
46440 
46441   auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
46442     SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46443                                  DAG.getConstant(Mul1, DL, VT));
46444     Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
46445                          DAG.getConstant(Mul2, DL, VT));
46446     Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
46447                          N->getOperand(0));
46448     return Result;
46449   };
46450 
46451   switch (MulAmt) {
46452   default:
46453     break;
46454   case 11:
46455     // mul x, 11 => add ((shl (mul x, 5), 1), x)
46456     return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
46457   case 21:
46458     // mul x, 21 => add ((shl (mul x, 5), 2), x)
46459     return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
46460   case 41:
46461     // mul x, 41 => add ((shl (mul x, 5), 3), x)
46462     return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
46463   case 22:
46464     // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
46465     return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
46466                        combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
46467   case 19:
46468     // mul x, 19 => add ((shl (mul x, 9), 1), x)
46469     return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
46470   case 37:
46471     // mul x, 37 => add ((shl (mul x, 9), 2), x)
46472     return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
46473   case 73:
46474     // mul x, 73 => add ((shl (mul x, 9), 3), x)
46475     return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
46476   case 13:
46477     // mul x, 13 => add ((shl (mul x, 3), 2), x)
46478     return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
46479   case 23:
46480     // mul x, 23 => sub ((shl (mul x, 3), 3), x)
46481     return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
46482   case 26:
46483     // mul x, 26 => add ((mul (mul x, 5), 5), x)
46484     return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
46485   case 28:
46486     // mul x, 28 => add ((mul (mul x, 9), 3), x)
46487     return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
46488   case 29:
46489     // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
46490     return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
46491                        combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
46492   }
46493 
46494   // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
46495   // by a single LEA.
46496   // First check if this a sum of two power of 2s because that's easy. Then
46497   // count how many zeros are up to the first bit.
46498   // TODO: We can do this even without LEA at a cost of two shifts and an add.
46499   if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
46500     unsigned ScaleShift = llvm::countr_zero(MulAmt);
46501     if (ScaleShift >= 1 && ScaleShift < 4) {
46502       unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
46503       SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46504                                    DAG.getConstant(ShiftAmt, DL, MVT::i8));
46505       SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46506                                    DAG.getConstant(ScaleShift, DL, MVT::i8));
46507       return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
46508     }
46509   }
46510 
46511   return SDValue();
46512 }
46513 
46514 // If the upper 17 bits of either element are zero and the other element are
46515 // zero/sign bits then we can use PMADDWD, which is always at least as quick as
46516 // PMULLD, except on KNL.
46517 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
46518                                    const X86Subtarget &Subtarget) {
46519   if (!Subtarget.hasSSE2())
46520     return SDValue();
46521 
46522   if (Subtarget.isPMADDWDSlow())
46523     return SDValue();
46524 
46525   EVT VT = N->getValueType(0);
46526 
46527   // Only support vXi32 vectors.
46528   if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
46529     return SDValue();
46530 
46531   // Make sure the type is legal or can split/widen to a legal type.
46532   // With AVX512 but without BWI, we would need to split v32i16.
46533   unsigned NumElts = VT.getVectorNumElements();
46534   if (NumElts == 1 || !isPowerOf2_32(NumElts))
46535     return SDValue();
46536 
46537   // With AVX512 but without BWI, we would need to split v32i16.
46538   if (32 <= (2 * NumElts) && Subtarget.hasAVX512() && !Subtarget.hasBWI())
46539     return SDValue();
46540 
46541   SDValue N0 = N->getOperand(0);
46542   SDValue N1 = N->getOperand(1);
46543 
46544   // If we are zero/sign extending two steps without SSE4.1, its better to
46545   // reduce the vmul width instead.
46546   if (!Subtarget.hasSSE41() &&
46547       (((N0.getOpcode() == ISD::ZERO_EXTEND &&
46548          N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
46549         (N1.getOpcode() == ISD::ZERO_EXTEND &&
46550          N1.getOperand(0).getScalarValueSizeInBits() <= 8)) ||
46551        ((N0.getOpcode() == ISD::SIGN_EXTEND &&
46552          N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
46553         (N1.getOpcode() == ISD::SIGN_EXTEND &&
46554          N1.getOperand(0).getScalarValueSizeInBits() <= 8))))
46555     return SDValue();
46556 
46557   // If we are sign extending a wide vector without SSE4.1, its better to reduce
46558   // the vmul width instead.
46559   if (!Subtarget.hasSSE41() &&
46560       (N0.getOpcode() == ISD::SIGN_EXTEND &&
46561        N0.getOperand(0).getValueSizeInBits() > 128) &&
46562       (N1.getOpcode() == ISD::SIGN_EXTEND &&
46563        N1.getOperand(0).getValueSizeInBits() > 128))
46564     return SDValue();
46565 
46566   // Sign bits must extend down to the lowest i16.
46567   if (DAG.ComputeMaxSignificantBits(N1) > 16 ||
46568       DAG.ComputeMaxSignificantBits(N0) > 16)
46569     return SDValue();
46570 
46571   // At least one of the elements must be zero in the upper 17 bits, or can be
46572   // safely made zero without altering the final result.
46573   auto GetZeroableOp = [&](SDValue Op) {
46574     APInt Mask17 = APInt::getHighBitsSet(32, 17);
46575     if (DAG.MaskedValueIsZero(Op, Mask17))
46576       return Op;
46577     // Mask off upper 16-bits of sign-extended constants.
46578     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()))
46579       return DAG.getNode(ISD::AND, SDLoc(N), VT, Op,
46580                          DAG.getConstant(0xFFFF, SDLoc(N), VT));
46581     if (Op.getOpcode() == ISD::SIGN_EXTEND && N->isOnlyUserOf(Op.getNode())) {
46582       SDValue Src = Op.getOperand(0);
46583       // Convert sext(vXi16) to zext(vXi16).
46584       if (Src.getScalarValueSizeInBits() == 16 && VT.getSizeInBits() <= 128)
46585         return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
46586       // Convert sext(vXi8) to zext(vXi16 sext(vXi8)) on pre-SSE41 targets
46587       // which will expand the extension.
46588       if (Src.getScalarValueSizeInBits() < 16 && !Subtarget.hasSSE41()) {
46589         EVT ExtVT = VT.changeVectorElementType(MVT::i16);
46590         Src = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), ExtVT, Src);
46591         return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
46592       }
46593     }
46594     // Convert SIGN_EXTEND_VECTOR_INREG to ZEXT_EXTEND_VECTOR_INREG.
46595     if (Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG &&
46596         N->isOnlyUserOf(Op.getNode())) {
46597       SDValue Src = Op.getOperand(0);
46598       if (Src.getScalarValueSizeInBits() == 16)
46599         return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(N), VT, Src);
46600     }
46601     // Convert VSRAI(Op, 16) to VSRLI(Op, 16).
46602     if (Op.getOpcode() == X86ISD::VSRAI && Op.getConstantOperandVal(1) == 16 &&
46603         N->isOnlyUserOf(Op.getNode())) {
46604       return DAG.getNode(X86ISD::VSRLI, SDLoc(N), VT, Op.getOperand(0),
46605                          Op.getOperand(1));
46606     }
46607     return SDValue();
46608   };
46609   SDValue ZeroN0 = GetZeroableOp(N0);
46610   SDValue ZeroN1 = GetZeroableOp(N1);
46611   if (!ZeroN0 && !ZeroN1)
46612     return SDValue();
46613   N0 = ZeroN0 ? ZeroN0 : N0;
46614   N1 = ZeroN1 ? ZeroN1 : N1;
46615 
46616   // Use SplitOpsAndApply to handle AVX splitting.
46617   auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46618                            ArrayRef<SDValue> Ops) {
46619     MVT ResVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
46620     MVT OpVT = MVT::getVectorVT(MVT::i16, Ops[0].getValueSizeInBits() / 16);
46621     return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
46622                        DAG.getBitcast(OpVT, Ops[0]),
46623                        DAG.getBitcast(OpVT, Ops[1]));
46624   };
46625   return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {N0, N1},
46626                           PMADDWDBuilder);
46627 }
46628 
46629 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
46630                                   const X86Subtarget &Subtarget) {
46631   if (!Subtarget.hasSSE2())
46632     return SDValue();
46633 
46634   EVT VT = N->getValueType(0);
46635 
46636   // Only support vXi64 vectors.
46637   if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
46638       VT.getVectorNumElements() < 2 ||
46639       !isPowerOf2_32(VT.getVectorNumElements()))
46640     return SDValue();
46641 
46642   SDValue N0 = N->getOperand(0);
46643   SDValue N1 = N->getOperand(1);
46644 
46645   // MULDQ returns the 64-bit result of the signed multiplication of the lower
46646   // 32-bits. We can lower with this if the sign bits stretch that far.
46647   if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
46648       DAG.ComputeNumSignBits(N1) > 32) {
46649     auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46650                             ArrayRef<SDValue> Ops) {
46651       return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
46652     };
46653     return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
46654                             PMULDQBuilder, /*CheckBWI*/false);
46655   }
46656 
46657   // If the upper bits are zero we can use a single pmuludq.
46658   APInt Mask = APInt::getHighBitsSet(64, 32);
46659   if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
46660     auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46661                              ArrayRef<SDValue> Ops) {
46662       return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
46663     };
46664     return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
46665                             PMULUDQBuilder, /*CheckBWI*/false);
46666   }
46667 
46668   return SDValue();
46669 }
46670 
46671 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
46672                           TargetLowering::DAGCombinerInfo &DCI,
46673                           const X86Subtarget &Subtarget) {
46674   EVT VT = N->getValueType(0);
46675 
46676   if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
46677     return V;
46678 
46679   if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
46680     return V;
46681 
46682   if (DCI.isBeforeLegalize() && VT.isVector())
46683     return reduceVMULWidth(N, DAG, Subtarget);
46684 
46685   // Optimize a single multiply with constant into two operations in order to
46686   // implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
46687   if (!MulConstantOptimization)
46688     return SDValue();
46689 
46690   // An imul is usually smaller than the alternative sequence.
46691   if (DAG.getMachineFunction().getFunction().hasMinSize())
46692     return SDValue();
46693 
46694   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
46695     return SDValue();
46696 
46697   if (VT != MVT::i64 && VT != MVT::i32 &&
46698       (!VT.isVector() || !VT.isSimple() || !VT.isInteger()))
46699     return SDValue();
46700 
46701   ConstantSDNode *CNode = isConstOrConstSplat(
46702       N->getOperand(1), /*AllowUndefs*/ true, /*AllowTrunc*/ false);
46703   const APInt *C = nullptr;
46704   if (!CNode) {
46705     if (VT.isVector())
46706       if (auto *RawC = getTargetConstantFromNode(N->getOperand(1)))
46707         if (auto *SplatC = RawC->getSplatValue())
46708           C = &(SplatC->getUniqueInteger());
46709 
46710     if (!C || C->getBitWidth() != VT.getScalarSizeInBits())
46711       return SDValue();
46712   } else {
46713     C = &(CNode->getAPIntValue());
46714   }
46715 
46716   if (isPowerOf2_64(C->getZExtValue()))
46717     return SDValue();
46718 
46719   int64_t SignMulAmt = C->getSExtValue();
46720   assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
46721   uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
46722 
46723   SDLoc DL(N);
46724   SDValue NewMul = SDValue();
46725   if (VT == MVT::i64 || VT == MVT::i32) {
46726     if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
46727       NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46728                            DAG.getConstant(AbsMulAmt, DL, VT));
46729       if (SignMulAmt < 0)
46730         NewMul =
46731             DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
46732 
46733       return NewMul;
46734     }
46735 
46736     uint64_t MulAmt1 = 0;
46737     uint64_t MulAmt2 = 0;
46738     if ((AbsMulAmt % 9) == 0) {
46739       MulAmt1 = 9;
46740       MulAmt2 = AbsMulAmt / 9;
46741     } else if ((AbsMulAmt % 5) == 0) {
46742       MulAmt1 = 5;
46743       MulAmt2 = AbsMulAmt / 5;
46744     } else if ((AbsMulAmt % 3) == 0) {
46745       MulAmt1 = 3;
46746       MulAmt2 = AbsMulAmt / 3;
46747     }
46748 
46749     // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
46750     if (MulAmt2 &&
46751         (isPowerOf2_64(MulAmt2) ||
46752          (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
46753 
46754       if (isPowerOf2_64(MulAmt2) && !(SignMulAmt >= 0 && N->hasOneUse() &&
46755                                       N->use_begin()->getOpcode() == ISD::ADD))
46756         // If second multiplifer is pow2, issue it first. We want the multiply
46757         // by 3, 5, or 9 to be folded into the addressing mode unless the lone
46758         // use is an add. Only do this for positive multiply amounts since the
46759         // negate would prevent it from being used as an address mode anyway.
46760         std::swap(MulAmt1, MulAmt2);
46761 
46762       if (isPowerOf2_64(MulAmt1))
46763         NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46764                              DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
46765       else
46766         NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46767                              DAG.getConstant(MulAmt1, DL, VT));
46768 
46769       if (isPowerOf2_64(MulAmt2))
46770         NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
46771                              DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
46772       else
46773         NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
46774                              DAG.getConstant(MulAmt2, DL, VT));
46775 
46776       // Negate the result.
46777       if (SignMulAmt < 0)
46778         NewMul =
46779             DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
46780     } else if (!Subtarget.slowLEA())
46781       NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
46782   }
46783   if (!NewMul) {
46784     EVT ShiftVT = VT.isVector() ? VT : MVT::i8;
46785     assert(C->getZExtValue() != 0 &&
46786            C->getZExtValue() != maxUIntN(VT.getScalarSizeInBits()) &&
46787            "Both cases that could cause potential overflows should have "
46788            "already been handled.");
46789     if (isPowerOf2_64(AbsMulAmt - 1)) {
46790       // (mul x, 2^N + 1) => (add (shl x, N), x)
46791       NewMul = DAG.getNode(
46792           ISD::ADD, DL, VT, N->getOperand(0),
46793           DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46794                       DAG.getConstant(Log2_64(AbsMulAmt - 1), DL, ShiftVT)));
46795       // To negate, subtract the number from zero
46796       if (SignMulAmt < 0)
46797         NewMul =
46798             DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
46799     } else if (isPowerOf2_64(AbsMulAmt + 1)) {
46800       // (mul x, 2^N - 1) => (sub (shl x, N), x)
46801       NewMul =
46802           DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46803                       DAG.getConstant(Log2_64(AbsMulAmt + 1), DL, ShiftVT));
46804       // To negate, reverse the operands of the subtract.
46805       if (SignMulAmt < 0)
46806         NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
46807       else
46808         NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
46809     } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2) &&
46810                (!VT.isVector() || Subtarget.fastImmVectorShift())) {
46811       // (mul x, 2^N + 2) => (add (shl x, N), (add x, x))
46812       NewMul =
46813           DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46814                       DAG.getConstant(Log2_64(AbsMulAmt - 2), DL, ShiftVT));
46815       NewMul = DAG.getNode(
46816           ISD::ADD, DL, VT, NewMul,
46817           DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), N->getOperand(0)));
46818     } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2) &&
46819                (!VT.isVector() || Subtarget.fastImmVectorShift())) {
46820       // (mul x, 2^N - 2) => (sub (shl x, N), (add x, x))
46821       NewMul =
46822           DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46823                       DAG.getConstant(Log2_64(AbsMulAmt + 2), DL, ShiftVT));
46824       NewMul = DAG.getNode(
46825           ISD::SUB, DL, VT, NewMul,
46826           DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), N->getOperand(0)));
46827     } else if (SignMulAmt >= 0 && VT.isVector() &&
46828                Subtarget.fastImmVectorShift()) {
46829       uint64_t AbsMulAmtLowBit = AbsMulAmt & (-AbsMulAmt);
46830       uint64_t ShiftAmt1;
46831       std::optional<unsigned> Opc;
46832       if (isPowerOf2_64(AbsMulAmt - AbsMulAmtLowBit)) {
46833         ShiftAmt1 = AbsMulAmt - AbsMulAmtLowBit;
46834         Opc = ISD::ADD;
46835       } else if (isPowerOf2_64(AbsMulAmt + AbsMulAmtLowBit)) {
46836         ShiftAmt1 = AbsMulAmt + AbsMulAmtLowBit;
46837         Opc = ISD::SUB;
46838       }
46839 
46840       if (Opc) {
46841         SDValue Shift1 =
46842             DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46843                         DAG.getConstant(Log2_64(ShiftAmt1), DL, ShiftVT));
46844         SDValue Shift2 =
46845             DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46846                         DAG.getConstant(Log2_64(AbsMulAmtLowBit), DL, ShiftVT));
46847         NewMul = DAG.getNode(*Opc, DL, VT, Shift1, Shift2);
46848       }
46849     }
46850   }
46851 
46852   return NewMul;
46853 }
46854 
46855 // Try to form a MULHU or MULHS node by looking for
46856 // (srl (mul ext, ext), 16)
46857 // TODO: This is X86 specific because we want to be able to handle wide types
46858 // before type legalization. But we can only do it if the vector will be
46859 // legalized via widening/splitting. Type legalization can't handle promotion
46860 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
46861 // combiner.
46862 static SDValue combineShiftToPMULH(SDNode *N, SelectionDAG &DAG,
46863                                    const X86Subtarget &Subtarget) {
46864   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
46865            "SRL or SRA node is required here!");
46866   SDLoc DL(N);
46867 
46868   if (!Subtarget.hasSSE2())
46869     return SDValue();
46870 
46871   // The operation feeding into the shift must be a multiply.
46872   SDValue ShiftOperand = N->getOperand(0);
46873   if (ShiftOperand.getOpcode() != ISD::MUL || !ShiftOperand.hasOneUse())
46874     return SDValue();
46875 
46876   // Input type should be at least vXi32.
46877   EVT VT = N->getValueType(0);
46878   if (!VT.isVector() || VT.getVectorElementType().getSizeInBits() < 32)
46879     return SDValue();
46880 
46881   // Need a shift by 16.
46882   APInt ShiftAmt;
46883   if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), ShiftAmt) ||
46884       ShiftAmt != 16)
46885     return SDValue();
46886 
46887   SDValue LHS = ShiftOperand.getOperand(0);
46888   SDValue RHS = ShiftOperand.getOperand(1);
46889 
46890   unsigned ExtOpc = LHS.getOpcode();
46891   if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
46892       RHS.getOpcode() != ExtOpc)
46893     return SDValue();
46894 
46895   // Peek through the extends.
46896   LHS = LHS.getOperand(0);
46897   RHS = RHS.getOperand(0);
46898 
46899   // Ensure the input types match.
46900   EVT MulVT = LHS.getValueType();
46901   if (MulVT.getVectorElementType() != MVT::i16 || RHS.getValueType() != MulVT)
46902     return SDValue();
46903 
46904   unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
46905   SDValue Mulh = DAG.getNode(Opc, DL, MulVT, LHS, RHS);
46906 
46907   ExtOpc = N->getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
46908   return DAG.getNode(ExtOpc, DL, VT, Mulh);
46909 }
46910 
46911 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
46912   SDValue N0 = N->getOperand(0);
46913   SDValue N1 = N->getOperand(1);
46914   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
46915   EVT VT = N0.getValueType();
46916 
46917   // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
46918   // since the result of setcc_c is all zero's or all ones.
46919   if (VT.isInteger() && !VT.isVector() &&
46920       N1C && N0.getOpcode() == ISD::AND &&
46921       N0.getOperand(1).getOpcode() == ISD::Constant) {
46922     SDValue N00 = N0.getOperand(0);
46923     APInt Mask = N0.getConstantOperandAPInt(1);
46924     Mask <<= N1C->getAPIntValue();
46925     bool MaskOK = false;
46926     // We can handle cases concerning bit-widening nodes containing setcc_c if
46927     // we carefully interrogate the mask to make sure we are semantics
46928     // preserving.
46929     // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
46930     // of the underlying setcc_c operation if the setcc_c was zero extended.
46931     // Consider the following example:
46932     //   zext(setcc_c)                 -> i32 0x0000FFFF
46933     //   c1                            -> i32 0x0000FFFF
46934     //   c2                            -> i32 0x00000001
46935     //   (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
46936     //   (and setcc_c, (c1 << c2))     -> i32 0x0000FFFE
46937     if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
46938       MaskOK = true;
46939     } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
46940                N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
46941       MaskOK = true;
46942     } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
46943                 N00.getOpcode() == ISD::ANY_EXTEND) &&
46944                N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
46945       MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
46946     }
46947     if (MaskOK && Mask != 0) {
46948       SDLoc DL(N);
46949       return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
46950     }
46951   }
46952 
46953   return SDValue();
46954 }
46955 
46956 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
46957                                            const X86Subtarget &Subtarget) {
46958   SDValue N0 = N->getOperand(0);
46959   SDValue N1 = N->getOperand(1);
46960   EVT VT = N0.getValueType();
46961   unsigned Size = VT.getSizeInBits();
46962 
46963   if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
46964     return V;
46965 
46966   // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
46967   // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
46968   // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
46969   // depending on sign of (SarConst - [56,48,32,24,16])
46970 
46971   // sexts in X86 are MOVs. The MOVs have the same code size
46972   // as above SHIFTs (only SHIFT on 1 has lower code size).
46973   // However the MOVs have 2 advantages to a SHIFT:
46974   // 1. MOVs can write to a register that differs from source
46975   // 2. MOVs accept memory operands
46976 
46977   if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
46978       N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
46979       N0.getOperand(1).getOpcode() != ISD::Constant)
46980     return SDValue();
46981 
46982   SDValue N00 = N0.getOperand(0);
46983   SDValue N01 = N0.getOperand(1);
46984   APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
46985   APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
46986   EVT CVT = N1.getValueType();
46987 
46988   if (SarConst.isNegative())
46989     return SDValue();
46990 
46991   for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
46992     unsigned ShiftSize = SVT.getSizeInBits();
46993     // skipping types without corresponding sext/zext and
46994     // ShlConst that is not one of [56,48,32,24,16]
46995     if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
46996       continue;
46997     SDLoc DL(N);
46998     SDValue NN =
46999         DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
47000     SarConst = SarConst - (Size - ShiftSize);
47001     if (SarConst == 0)
47002       return NN;
47003     if (SarConst.isNegative())
47004       return DAG.getNode(ISD::SHL, DL, VT, NN,
47005                          DAG.getConstant(-SarConst, DL, CVT));
47006     return DAG.getNode(ISD::SRA, DL, VT, NN,
47007                        DAG.getConstant(SarConst, DL, CVT));
47008   }
47009   return SDValue();
47010 }
47011 
47012 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
47013                                         TargetLowering::DAGCombinerInfo &DCI,
47014                                         const X86Subtarget &Subtarget) {
47015   SDValue N0 = N->getOperand(0);
47016   SDValue N1 = N->getOperand(1);
47017   EVT VT = N0.getValueType();
47018 
47019   if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
47020     return V;
47021 
47022   // Only do this on the last DAG combine as it can interfere with other
47023   // combines.
47024   if (!DCI.isAfterLegalizeDAG())
47025     return SDValue();
47026 
47027   // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
47028   // TODO: This is a generic DAG combine that became an x86-only combine to
47029   // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
47030   // and-not ('andn').
47031   if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
47032     return SDValue();
47033 
47034   auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
47035   auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
47036   if (!ShiftC || !AndC)
47037     return SDValue();
47038 
47039   // If we can shrink the constant mask below 8-bits or 32-bits, then this
47040   // transform should reduce code size. It may also enable secondary transforms
47041   // from improved known-bits analysis or instruction selection.
47042   APInt MaskVal = AndC->getAPIntValue();
47043 
47044   // If this can be matched by a zero extend, don't optimize.
47045   if (MaskVal.isMask()) {
47046     unsigned TO = MaskVal.countr_one();
47047     if (TO >= 8 && isPowerOf2_32(TO))
47048       return SDValue();
47049   }
47050 
47051   APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
47052   unsigned OldMaskSize = MaskVal.getSignificantBits();
47053   unsigned NewMaskSize = NewMaskVal.getSignificantBits();
47054   if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
47055       (OldMaskSize > 32 && NewMaskSize <= 32)) {
47056     // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
47057     SDLoc DL(N);
47058     SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
47059     SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
47060     return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
47061   }
47062   return SDValue();
47063 }
47064 
47065 static SDValue combineHorizOpWithShuffle(SDNode *N, SelectionDAG &DAG,
47066                                          const X86Subtarget &Subtarget) {
47067   unsigned Opcode = N->getOpcode();
47068   assert(isHorizOp(Opcode) && "Unexpected hadd/hsub/pack opcode");
47069 
47070   SDLoc DL(N);
47071   EVT VT = N->getValueType(0);
47072   SDValue N0 = N->getOperand(0);
47073   SDValue N1 = N->getOperand(1);
47074   EVT SrcVT = N0.getValueType();
47075 
47076   SDValue BC0 =
47077       N->isOnlyUserOf(N0.getNode()) ? peekThroughOneUseBitcasts(N0) : N0;
47078   SDValue BC1 =
47079       N->isOnlyUserOf(N1.getNode()) ? peekThroughOneUseBitcasts(N1) : N1;
47080 
47081   // Attempt to fold HOP(LOSUBVECTOR(SHUFFLE(X)),HISUBVECTOR(SHUFFLE(X)))
47082   // to SHUFFLE(HOP(LOSUBVECTOR(X),HISUBVECTOR(X))), this is mainly for
47083   // truncation trees that help us avoid lane crossing shuffles.
47084   // TODO: There's a lot more we can do for PACK/HADD style shuffle combines.
47085   // TODO: We don't handle vXf64 shuffles yet.
47086   if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
47087     if (SDValue BCSrc = getSplitVectorSrc(BC0, BC1, false)) {
47088       SmallVector<SDValue> ShuffleOps;
47089       SmallVector<int> ShuffleMask, ScaledMask;
47090       SDValue Vec = peekThroughBitcasts(BCSrc);
47091       if (getTargetShuffleInputs(Vec, ShuffleOps, ShuffleMask, DAG)) {
47092         resolveTargetShuffleInputsAndMask(ShuffleOps, ShuffleMask);
47093         // To keep the HOP LHS/RHS coherency, we must be able to scale the unary
47094         // shuffle to a v4X64 width - we can probably relax this in the future.
47095         if (!isAnyZero(ShuffleMask) && ShuffleOps.size() == 1 &&
47096             ShuffleOps[0].getValueType().is256BitVector() &&
47097             scaleShuffleElements(ShuffleMask, 4, ScaledMask)) {
47098           SDValue Lo, Hi;
47099           MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
47100           std::tie(Lo, Hi) = DAG.SplitVector(ShuffleOps[0], DL);
47101           Lo = DAG.getBitcast(SrcVT, Lo);
47102           Hi = DAG.getBitcast(SrcVT, Hi);
47103           SDValue Res = DAG.getNode(Opcode, DL, VT, Lo, Hi);
47104           Res = DAG.getBitcast(ShufVT, Res);
47105           Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ScaledMask);
47106           return DAG.getBitcast(VT, Res);
47107         }
47108       }
47109     }
47110   }
47111 
47112   // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(Z,W)) -> SHUFFLE(HOP()).
47113   if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
47114     // If either/both ops are a shuffle that can scale to v2x64,
47115     // then see if we can perform this as a v4x32 post shuffle.
47116     SmallVector<SDValue> Ops0, Ops1;
47117     SmallVector<int> Mask0, Mask1, ScaledMask0, ScaledMask1;
47118     bool IsShuf0 =
47119         getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
47120         scaleShuffleElements(Mask0, 2, ScaledMask0) &&
47121         all_of(Ops0, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
47122     bool IsShuf1 =
47123         getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
47124         scaleShuffleElements(Mask1, 2, ScaledMask1) &&
47125         all_of(Ops1, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
47126     if (IsShuf0 || IsShuf1) {
47127       if (!IsShuf0) {
47128         Ops0.assign({BC0});
47129         ScaledMask0.assign({0, 1});
47130       }
47131       if (!IsShuf1) {
47132         Ops1.assign({BC1});
47133         ScaledMask1.assign({0, 1});
47134       }
47135 
47136       SDValue LHS, RHS;
47137       int PostShuffle[4] = {-1, -1, -1, -1};
47138       auto FindShuffleOpAndIdx = [&](int M, int &Idx, ArrayRef<SDValue> Ops) {
47139         if (M < 0)
47140           return true;
47141         Idx = M % 2;
47142         SDValue Src = Ops[M / 2];
47143         if (!LHS || LHS == Src) {
47144           LHS = Src;
47145           return true;
47146         }
47147         if (!RHS || RHS == Src) {
47148           Idx += 2;
47149           RHS = Src;
47150           return true;
47151         }
47152         return false;
47153       };
47154       if (FindShuffleOpAndIdx(ScaledMask0[0], PostShuffle[0], Ops0) &&
47155           FindShuffleOpAndIdx(ScaledMask0[1], PostShuffle[1], Ops0) &&
47156           FindShuffleOpAndIdx(ScaledMask1[0], PostShuffle[2], Ops1) &&
47157           FindShuffleOpAndIdx(ScaledMask1[1], PostShuffle[3], Ops1)) {
47158         LHS = DAG.getBitcast(SrcVT, LHS);
47159         RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
47160         MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
47161         SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
47162         Res = DAG.getBitcast(ShufVT, Res);
47163         Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, PostShuffle);
47164         return DAG.getBitcast(VT, Res);
47165       }
47166     }
47167   }
47168 
47169   // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(HOP(X,Y)).
47170   if (VT.is256BitVector() && Subtarget.hasInt256()) {
47171     SmallVector<int> Mask0, Mask1;
47172     SmallVector<SDValue> Ops0, Ops1;
47173     SmallVector<int, 2> ScaledMask0, ScaledMask1;
47174     if (getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
47175         getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
47176         !Ops0.empty() && !Ops1.empty() &&
47177         all_of(Ops0,
47178                [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
47179         all_of(Ops1,
47180                [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
47181         scaleShuffleElements(Mask0, 2, ScaledMask0) &&
47182         scaleShuffleElements(Mask1, 2, ScaledMask1)) {
47183       SDValue Op00 = peekThroughBitcasts(Ops0.front());
47184       SDValue Op10 = peekThroughBitcasts(Ops1.front());
47185       SDValue Op01 = peekThroughBitcasts(Ops0.back());
47186       SDValue Op11 = peekThroughBitcasts(Ops1.back());
47187       if ((Op00 == Op11) && (Op01 == Op10)) {
47188         std::swap(Op10, Op11);
47189         ShuffleVectorSDNode::commuteMask(ScaledMask1);
47190       }
47191       if ((Op00 == Op10) && (Op01 == Op11)) {
47192         const int Map[4] = {0, 2, 1, 3};
47193         SmallVector<int, 4> ShuffleMask(
47194             {Map[ScaledMask0[0]], Map[ScaledMask1[0]], Map[ScaledMask0[1]],
47195              Map[ScaledMask1[1]]});
47196         MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
47197         SDValue Res = DAG.getNode(Opcode, DL, VT, DAG.getBitcast(SrcVT, Op00),
47198                                   DAG.getBitcast(SrcVT, Op01));
47199         Res = DAG.getBitcast(ShufVT, Res);
47200         Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ShuffleMask);
47201         return DAG.getBitcast(VT, Res);
47202       }
47203     }
47204   }
47205 
47206   return SDValue();
47207 }
47208 
47209 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
47210                                  TargetLowering::DAGCombinerInfo &DCI,
47211                                  const X86Subtarget &Subtarget) {
47212   unsigned Opcode = N->getOpcode();
47213   assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
47214          "Unexpected pack opcode");
47215 
47216   EVT VT = N->getValueType(0);
47217   SDValue N0 = N->getOperand(0);
47218   SDValue N1 = N->getOperand(1);
47219   unsigned NumDstElts = VT.getVectorNumElements();
47220   unsigned DstBitsPerElt = VT.getScalarSizeInBits();
47221   unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
47222   assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
47223          N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
47224          "Unexpected PACKSS/PACKUS input type");
47225 
47226   bool IsSigned = (X86ISD::PACKSS == Opcode);
47227 
47228   // Constant Folding.
47229   APInt UndefElts0, UndefElts1;
47230   SmallVector<APInt, 32> EltBits0, EltBits1;
47231   if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
47232       (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
47233       getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
47234       getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
47235     unsigned NumLanes = VT.getSizeInBits() / 128;
47236     unsigned NumSrcElts = NumDstElts / 2;
47237     unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
47238     unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
47239 
47240     APInt Undefs(NumDstElts, 0);
47241     SmallVector<APInt, 32> Bits(NumDstElts, APInt::getZero(DstBitsPerElt));
47242     for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
47243       for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
47244         unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
47245         auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
47246         auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
47247 
47248         if (UndefElts[SrcIdx]) {
47249           Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
47250           continue;
47251         }
47252 
47253         APInt &Val = EltBits[SrcIdx];
47254         if (IsSigned) {
47255           // PACKSS: Truncate signed value with signed saturation.
47256           // Source values less than dst minint are saturated to minint.
47257           // Source values greater than dst maxint are saturated to maxint.
47258           if (Val.isSignedIntN(DstBitsPerElt))
47259             Val = Val.trunc(DstBitsPerElt);
47260           else if (Val.isNegative())
47261             Val = APInt::getSignedMinValue(DstBitsPerElt);
47262           else
47263             Val = APInt::getSignedMaxValue(DstBitsPerElt);
47264         } else {
47265           // PACKUS: Truncate signed value with unsigned saturation.
47266           // Source values less than zero are saturated to zero.
47267           // Source values greater than dst maxuint are saturated to maxuint.
47268           if (Val.isIntN(DstBitsPerElt))
47269             Val = Val.trunc(DstBitsPerElt);
47270           else if (Val.isNegative())
47271             Val = APInt::getZero(DstBitsPerElt);
47272           else
47273             Val = APInt::getAllOnes(DstBitsPerElt);
47274         }
47275         Bits[Lane * NumDstEltsPerLane + Elt] = Val;
47276       }
47277     }
47278 
47279     return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
47280   }
47281 
47282   // Try to fold PACK(SHUFFLE(),SHUFFLE()) -> SHUFFLE(PACK()).
47283   if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
47284     return V;
47285 
47286   // Try to fold PACKSS(NOT(X),NOT(Y)) -> NOT(PACKSS(X,Y)).
47287   // Currently limit this to allsignbits cases only.
47288   if (IsSigned &&
47289       (N0.isUndef() || DAG.ComputeNumSignBits(N0) == SrcBitsPerElt) &&
47290       (N1.isUndef() || DAG.ComputeNumSignBits(N1) == SrcBitsPerElt)) {
47291     SDValue Not0 = N0.isUndef() ? N0 : IsNOT(N0, DAG);
47292     SDValue Not1 = N1.isUndef() ? N1 : IsNOT(N1, DAG);
47293     if (Not0 && Not1) {
47294       SDLoc DL(N);
47295       MVT SrcVT = N0.getSimpleValueType();
47296       SDValue Pack =
47297           DAG.getNode(X86ISD::PACKSS, DL, VT, DAG.getBitcast(SrcVT, Not0),
47298                       DAG.getBitcast(SrcVT, Not1));
47299       return DAG.getNOT(DL, Pack, VT);
47300     }
47301   }
47302 
47303   // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
47304   // truncate to create a larger truncate.
47305   if (Subtarget.hasAVX512() &&
47306       N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
47307       N0.getOperand(0).getValueType() == MVT::v8i32) {
47308     if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
47309         (!IsSigned &&
47310          DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
47311       if (Subtarget.hasVLX())
47312         return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
47313 
47314       // Widen input to v16i32 so we can truncate that.
47315       SDLoc dl(N);
47316       SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
47317                                    N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
47318       return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
47319     }
47320   }
47321 
47322   // Try to fold PACK(EXTEND(X),EXTEND(Y)) -> CONCAT(X,Y) subvectors.
47323   if (VT.is128BitVector()) {
47324     unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
47325     SDValue Src0, Src1;
47326     if (N0.getOpcode() == ExtOpc &&
47327         N0.getOperand(0).getValueType().is64BitVector() &&
47328         N0.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
47329       Src0 = N0.getOperand(0);
47330     }
47331     if (N1.getOpcode() == ExtOpc &&
47332         N1.getOperand(0).getValueType().is64BitVector() &&
47333         N1.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
47334       Src1 = N1.getOperand(0);
47335     }
47336     if ((Src0 || N0.isUndef()) && (Src1 || N1.isUndef())) {
47337       assert((Src0 || Src1) && "Found PACK(UNDEF,UNDEF)");
47338       Src0 = Src0 ? Src0 : DAG.getUNDEF(Src1.getValueType());
47339       Src1 = Src1 ? Src1 : DAG.getUNDEF(Src0.getValueType());
47340       return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Src0, Src1);
47341     }
47342 
47343     // Try again with pack(*_extend_vector_inreg, undef).
47344     unsigned VecInRegOpc = IsSigned ? ISD::SIGN_EXTEND_VECTOR_INREG
47345                                     : ISD::ZERO_EXTEND_VECTOR_INREG;
47346     if (N0.getOpcode() == VecInRegOpc && N1.isUndef() &&
47347         N0.getOperand(0).getScalarValueSizeInBits() < DstBitsPerElt)
47348       return getEXTEND_VECTOR_INREG(ExtOpc, SDLoc(N), VT, N0.getOperand(0),
47349                                     DAG);
47350   }
47351 
47352   // Attempt to combine as shuffle.
47353   SDValue Op(N, 0);
47354   if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
47355     return Res;
47356 
47357   return SDValue();
47358 }
47359 
47360 static SDValue combineVectorHADDSUB(SDNode *N, SelectionDAG &DAG,
47361                                     TargetLowering::DAGCombinerInfo &DCI,
47362                                     const X86Subtarget &Subtarget) {
47363   assert((X86ISD::HADD == N->getOpcode() || X86ISD::FHADD == N->getOpcode() ||
47364           X86ISD::HSUB == N->getOpcode() || X86ISD::FHSUB == N->getOpcode()) &&
47365          "Unexpected horizontal add/sub opcode");
47366 
47367   if (!shouldUseHorizontalOp(true, DAG, Subtarget)) {
47368     MVT VT = N->getSimpleValueType(0);
47369     SDValue LHS = N->getOperand(0);
47370     SDValue RHS = N->getOperand(1);
47371 
47372     // HOP(HOP'(X,X),HOP'(Y,Y)) -> HOP(PERMUTE(HOP'(X,Y)),PERMUTE(HOP'(X,Y)).
47373     if (LHS != RHS && LHS.getOpcode() == N->getOpcode() &&
47374         LHS.getOpcode() == RHS.getOpcode() &&
47375         LHS.getValueType() == RHS.getValueType() &&
47376         N->isOnlyUserOf(LHS.getNode()) && N->isOnlyUserOf(RHS.getNode())) {
47377       SDValue LHS0 = LHS.getOperand(0);
47378       SDValue LHS1 = LHS.getOperand(1);
47379       SDValue RHS0 = RHS.getOperand(0);
47380       SDValue RHS1 = RHS.getOperand(1);
47381       if ((LHS0 == LHS1 || LHS0.isUndef() || LHS1.isUndef()) &&
47382           (RHS0 == RHS1 || RHS0.isUndef() || RHS1.isUndef())) {
47383         SDLoc DL(N);
47384         SDValue Res = DAG.getNode(LHS.getOpcode(), DL, LHS.getValueType(),
47385                                   LHS0.isUndef() ? LHS1 : LHS0,
47386                                   RHS0.isUndef() ? RHS1 : RHS0);
47387         MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
47388         Res = DAG.getBitcast(ShufVT, Res);
47389         SDValue NewLHS =
47390             DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
47391                         getV4X86ShuffleImm8ForMask({0, 1, 0, 1}, DL, DAG));
47392         SDValue NewRHS =
47393             DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
47394                         getV4X86ShuffleImm8ForMask({2, 3, 2, 3}, DL, DAG));
47395         return DAG.getNode(N->getOpcode(), DL, VT, DAG.getBitcast(VT, NewLHS),
47396                            DAG.getBitcast(VT, NewRHS));
47397       }
47398     }
47399   }
47400 
47401   // Try to fold HOP(SHUFFLE(),SHUFFLE()) -> SHUFFLE(HOP()).
47402   if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
47403     return V;
47404 
47405   return SDValue();
47406 }
47407 
47408 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
47409                                      TargetLowering::DAGCombinerInfo &DCI,
47410                                      const X86Subtarget &Subtarget) {
47411   assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
47412           X86ISD::VSRL == N->getOpcode()) &&
47413          "Unexpected shift opcode");
47414   EVT VT = N->getValueType(0);
47415   SDValue N0 = N->getOperand(0);
47416   SDValue N1 = N->getOperand(1);
47417 
47418   // Shift zero -> zero.
47419   if (ISD::isBuildVectorAllZeros(N0.getNode()))
47420     return DAG.getConstant(0, SDLoc(N), VT);
47421 
47422   // Detect constant shift amounts.
47423   APInt UndefElts;
47424   SmallVector<APInt, 32> EltBits;
47425   if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
47426     unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
47427     return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
47428                                       EltBits[0].getZExtValue(), DAG);
47429   }
47430 
47431   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47432   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
47433   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
47434     return SDValue(N, 0);
47435 
47436   return SDValue();
47437 }
47438 
47439 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
47440                                      TargetLowering::DAGCombinerInfo &DCI,
47441                                      const X86Subtarget &Subtarget) {
47442   unsigned Opcode = N->getOpcode();
47443   assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
47444           X86ISD::VSRLI == Opcode) &&
47445          "Unexpected shift opcode");
47446   bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
47447   EVT VT = N->getValueType(0);
47448   SDValue N0 = N->getOperand(0);
47449   SDValue N1 = N->getOperand(1);
47450   unsigned NumBitsPerElt = VT.getScalarSizeInBits();
47451   assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
47452          "Unexpected value type");
47453   assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type");
47454 
47455   // (shift undef, X) -> 0
47456   if (N0.isUndef())
47457     return DAG.getConstant(0, SDLoc(N), VT);
47458 
47459   // Out of range logical bit shifts are guaranteed to be zero.
47460   // Out of range arithmetic bit shifts splat the sign bit.
47461   unsigned ShiftVal = N->getConstantOperandVal(1);
47462   if (ShiftVal >= NumBitsPerElt) {
47463     if (LogicalShift)
47464       return DAG.getConstant(0, SDLoc(N), VT);
47465     ShiftVal = NumBitsPerElt - 1;
47466   }
47467 
47468   // (shift X, 0) -> X
47469   if (!ShiftVal)
47470     return N0;
47471 
47472   // (shift 0, C) -> 0
47473   if (ISD::isBuildVectorAllZeros(N0.getNode()))
47474     // N0 is all zeros or undef. We guarantee that the bits shifted into the
47475     // result are all zeros, not undef.
47476     return DAG.getConstant(0, SDLoc(N), VT);
47477 
47478   // (VSRAI -1, C) -> -1
47479   if (!LogicalShift && ISD::isBuildVectorAllOnes(N0.getNode()))
47480     // N0 is all ones or undef. We guarantee that the bits shifted into the
47481     // result are all ones, not undef.
47482     return DAG.getConstant(-1, SDLoc(N), VT);
47483 
47484   auto MergeShifts = [&](SDValue X, uint64_t Amt0, uint64_t Amt1) {
47485     unsigned NewShiftVal = Amt0 + Amt1;
47486     if (NewShiftVal >= NumBitsPerElt) {
47487       // Out of range logical bit shifts are guaranteed to be zero.
47488       // Out of range arithmetic bit shifts splat the sign bit.
47489       if (LogicalShift)
47490         return DAG.getConstant(0, SDLoc(N), VT);
47491       NewShiftVal = NumBitsPerElt - 1;
47492     }
47493     return DAG.getNode(Opcode, SDLoc(N), VT, N0.getOperand(0),
47494                        DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
47495   };
47496 
47497   // (shift (shift X, C2), C1) -> (shift X, (C1 + C2))
47498   if (Opcode == N0.getOpcode())
47499     return MergeShifts(N0.getOperand(0), ShiftVal, N0.getConstantOperandVal(1));
47500 
47501   // (shl (add X, X), C) -> (shl X, (C + 1))
47502   if (Opcode == X86ISD::VSHLI && N0.getOpcode() == ISD::ADD &&
47503       N0.getOperand(0) == N0.getOperand(1))
47504     return MergeShifts(N0.getOperand(0), ShiftVal, 1);
47505 
47506   // We can decode 'whole byte' logical bit shifts as shuffles.
47507   if (LogicalShift && (ShiftVal % 8) == 0) {
47508     SDValue Op(N, 0);
47509     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
47510       return Res;
47511   }
47512 
47513   // Attempt to detect an expanded vXi64 SIGN_EXTEND_INREG vXi1 pattern, and
47514   // convert to a splatted v2Xi32 SIGN_EXTEND_INREG pattern:
47515   // psrad(pshufd(psllq(X,63),1,1,3,3),31) ->
47516   // pshufd(psrad(pslld(X,31),31),0,0,2,2).
47517   if (Opcode == X86ISD::VSRAI && NumBitsPerElt == 32 && ShiftVal == 31 &&
47518       N0.getOpcode() == X86ISD::PSHUFD &&
47519       N0.getConstantOperandVal(1) == getV4X86ShuffleImm({1, 1, 3, 3}) &&
47520       N0->hasOneUse()) {
47521     SDValue BC = peekThroughOneUseBitcasts(N0.getOperand(0));
47522     if (BC.getOpcode() == X86ISD::VSHLI &&
47523         BC.getScalarValueSizeInBits() == 64 &&
47524         BC.getConstantOperandVal(1) == 63) {
47525       SDLoc DL(N);
47526       SDValue Src = BC.getOperand(0);
47527       Src = DAG.getBitcast(VT, Src);
47528       Src = DAG.getNode(X86ISD::PSHUFD, DL, VT, Src,
47529                         getV4X86ShuffleImm8ForMask({0, 0, 2, 2}, DL, DAG));
47530       Src = DAG.getNode(X86ISD::VSHLI, DL, VT, Src, N1);
47531       Src = DAG.getNode(X86ISD::VSRAI, DL, VT, Src, N1);
47532       return Src;
47533     }
47534   }
47535 
47536   auto TryConstantFold = [&](SDValue V) {
47537     APInt UndefElts;
47538     SmallVector<APInt, 32> EltBits;
47539     if (!getTargetConstantBitsFromNode(V, NumBitsPerElt, UndefElts, EltBits))
47540       return SDValue();
47541     assert(EltBits.size() == VT.getVectorNumElements() &&
47542            "Unexpected shift value type");
47543     // Undef elements need to fold to 0. It's possible SimplifyDemandedBits
47544     // created an undef input due to no input bits being demanded, but user
47545     // still expects 0 in other bits.
47546     for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
47547       APInt &Elt = EltBits[i];
47548       if (UndefElts[i])
47549         Elt = 0;
47550       else if (X86ISD::VSHLI == Opcode)
47551         Elt <<= ShiftVal;
47552       else if (X86ISD::VSRAI == Opcode)
47553         Elt.ashrInPlace(ShiftVal);
47554       else
47555         Elt.lshrInPlace(ShiftVal);
47556     }
47557     // Reset undef elements since they were zeroed above.
47558     UndefElts = 0;
47559     return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
47560   };
47561 
47562   // Constant Folding.
47563   if (N->isOnlyUserOf(N0.getNode())) {
47564     if (SDValue C = TryConstantFold(N0))
47565       return C;
47566 
47567     // Fold (shift (logic X, C2), C1) -> (logic (shift X, C1), (shift C2, C1))
47568     // Don't break NOT patterns.
47569     SDValue BC = peekThroughOneUseBitcasts(N0);
47570     if (ISD::isBitwiseLogicOp(BC.getOpcode()) &&
47571         BC->isOnlyUserOf(BC.getOperand(1).getNode()) &&
47572         !ISD::isBuildVectorAllOnes(BC.getOperand(1).getNode())) {
47573       if (SDValue RHS = TryConstantFold(BC.getOperand(1))) {
47574         SDLoc DL(N);
47575         SDValue LHS = DAG.getNode(Opcode, DL, VT,
47576                                   DAG.getBitcast(VT, BC.getOperand(0)), N1);
47577         return DAG.getNode(BC.getOpcode(), DL, VT, LHS, RHS);
47578       }
47579     }
47580   }
47581 
47582   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47583   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBitsPerElt),
47584                                DCI))
47585     return SDValue(N, 0);
47586 
47587   return SDValue();
47588 }
47589 
47590 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
47591                                    TargetLowering::DAGCombinerInfo &DCI,
47592                                    const X86Subtarget &Subtarget) {
47593   EVT VT = N->getValueType(0);
47594   unsigned Opcode = N->getOpcode();
47595   assert(((Opcode == X86ISD::PINSRB && VT == MVT::v16i8) ||
47596           (Opcode == X86ISD::PINSRW && VT == MVT::v8i16) ||
47597           Opcode == ISD::INSERT_VECTOR_ELT) &&
47598          "Unexpected vector insertion");
47599 
47600   SDValue Vec = N->getOperand(0);
47601   SDValue Scl = N->getOperand(1);
47602   SDValue Idx = N->getOperand(2);
47603 
47604   // Fold insert_vector_elt(undef, elt, 0) --> scalar_to_vector(elt).
47605   if (Opcode == ISD::INSERT_VECTOR_ELT && Vec.isUndef() && isNullConstant(Idx))
47606     return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Scl);
47607 
47608   if (Opcode == X86ISD::PINSRB || Opcode == X86ISD::PINSRW) {
47609     unsigned NumBitsPerElt = VT.getScalarSizeInBits();
47610     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47611     if (TLI.SimplifyDemandedBits(SDValue(N, 0),
47612                                  APInt::getAllOnes(NumBitsPerElt), DCI))
47613       return SDValue(N, 0);
47614   }
47615 
47616   // Attempt to combine insertion patterns to a shuffle.
47617   if (VT.isSimple() && DCI.isAfterLegalizeDAG()) {
47618     SDValue Op(N, 0);
47619     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
47620       return Res;
47621   }
47622 
47623   return SDValue();
47624 }
47625 
47626 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
47627 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
47628 /// OR -> CMPNEQSS.
47629 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
47630                                    TargetLowering::DAGCombinerInfo &DCI,
47631                                    const X86Subtarget &Subtarget) {
47632   unsigned opcode;
47633 
47634   // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
47635   // we're requiring SSE2 for both.
47636   if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
47637     SDValue N0 = N->getOperand(0);
47638     SDValue N1 = N->getOperand(1);
47639     SDValue CMP0 = N0.getOperand(1);
47640     SDValue CMP1 = N1.getOperand(1);
47641     SDLoc DL(N);
47642 
47643     // The SETCCs should both refer to the same CMP.
47644     if (CMP0.getOpcode() != X86ISD::FCMP || CMP0 != CMP1)
47645       return SDValue();
47646 
47647     SDValue CMP00 = CMP0->getOperand(0);
47648     SDValue CMP01 = CMP0->getOperand(1);
47649     EVT     VT    = CMP00.getValueType();
47650 
47651     if (VT == MVT::f32 || VT == MVT::f64 ||
47652         (VT == MVT::f16 && Subtarget.hasFP16())) {
47653       bool ExpectingFlags = false;
47654       // Check for any users that want flags:
47655       for (const SDNode *U : N->uses()) {
47656         if (ExpectingFlags)
47657           break;
47658 
47659         switch (U->getOpcode()) {
47660         default:
47661         case ISD::BR_CC:
47662         case ISD::BRCOND:
47663         case ISD::SELECT:
47664           ExpectingFlags = true;
47665           break;
47666         case ISD::CopyToReg:
47667         case ISD::SIGN_EXTEND:
47668         case ISD::ZERO_EXTEND:
47669         case ISD::ANY_EXTEND:
47670           break;
47671         }
47672       }
47673 
47674       if (!ExpectingFlags) {
47675         enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
47676         enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
47677 
47678         if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
47679           X86::CondCode tmp = cc0;
47680           cc0 = cc1;
47681           cc1 = tmp;
47682         }
47683 
47684         if ((cc0 == X86::COND_E  && cc1 == X86::COND_NP) ||
47685             (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
47686           // FIXME: need symbolic constants for these magic numbers.
47687           // See X86ATTInstPrinter.cpp:printSSECC().
47688           unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
47689           if (Subtarget.hasAVX512()) {
47690             SDValue FSetCC =
47691                 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
47692                             DAG.getTargetConstant(x86cc, DL, MVT::i8));
47693             // Need to fill with zeros to ensure the bitcast will produce zeroes
47694             // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
47695             SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
47696                                       DAG.getConstant(0, DL, MVT::v16i1),
47697                                       FSetCC, DAG.getIntPtrConstant(0, DL));
47698             return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
47699                                       N->getSimpleValueType(0));
47700           }
47701           SDValue OnesOrZeroesF =
47702               DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
47703                           CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
47704 
47705           bool is64BitFP = (CMP00.getValueType() == MVT::f64);
47706           MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
47707 
47708           if (is64BitFP && !Subtarget.is64Bit()) {
47709             // On a 32-bit target, we cannot bitcast the 64-bit float to a
47710             // 64-bit integer, since that's not a legal type. Since
47711             // OnesOrZeroesF is all ones or all zeroes, we don't need all the
47712             // bits, but can do this little dance to extract the lowest 32 bits
47713             // and work with those going forward.
47714             SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
47715                                            OnesOrZeroesF);
47716             SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
47717             OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
47718                                         Vector32, DAG.getIntPtrConstant(0, DL));
47719             IntVT = MVT::i32;
47720           }
47721 
47722           SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
47723           SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
47724                                       DAG.getConstant(1, DL, IntVT));
47725           SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
47726                                               ANDed);
47727           return OneBitOfTruth;
47728         }
47729       }
47730     }
47731   }
47732   return SDValue();
47733 }
47734 
47735 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
47736 static SDValue combineAndNotIntoANDNP(SDNode *N, SelectionDAG &DAG) {
47737   assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDNP");
47738 
47739   MVT VT = N->getSimpleValueType(0);
47740   if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
47741     return SDValue();
47742 
47743   SDValue X, Y;
47744   SDValue N0 = N->getOperand(0);
47745   SDValue N1 = N->getOperand(1);
47746 
47747   if (SDValue Not = IsNOT(N0, DAG)) {
47748     X = Not;
47749     Y = N1;
47750   } else if (SDValue Not = IsNOT(N1, DAG)) {
47751     X = Not;
47752     Y = N0;
47753   } else
47754     return SDValue();
47755 
47756   X = DAG.getBitcast(VT, X);
47757   Y = DAG.getBitcast(VT, Y);
47758   return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
47759 }
47760 
47761 /// Try to fold:
47762 ///   and (vector_shuffle<Z,...,Z>
47763 ///            (insert_vector_elt undef, (xor X, -1), Z), undef), Y
47764 ///   ->
47765 ///   andnp (vector_shuffle<Z,...,Z>
47766 ///              (insert_vector_elt undef, X, Z), undef), Y
47767 static SDValue combineAndShuffleNot(SDNode *N, SelectionDAG &DAG,
47768                                     const X86Subtarget &Subtarget) {
47769   assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDNP");
47770 
47771   EVT VT = N->getValueType(0);
47772   // Do not split 256 and 512 bit vectors with SSE2 as they overwrite original
47773   // value and require extra moves.
47774   if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
47775         ((VT.is256BitVector() || VT.is512BitVector()) && Subtarget.hasAVX())))
47776     return SDValue();
47777 
47778   auto GetNot = [&DAG](SDValue V) {
47779     auto *SVN = dyn_cast<ShuffleVectorSDNode>(peekThroughOneUseBitcasts(V));
47780     // TODO: SVN->hasOneUse() is a strong condition. It can be relaxed if all
47781     // end-users are ISD::AND including cases
47782     // (and(extract_vector_element(SVN), Y)).
47783     if (!SVN || !SVN->hasOneUse() || !SVN->isSplat() ||
47784         !SVN->getOperand(1).isUndef()) {
47785       return SDValue();
47786     }
47787     SDValue IVEN = SVN->getOperand(0);
47788     if (IVEN.getOpcode() != ISD::INSERT_VECTOR_ELT ||
47789         !IVEN.getOperand(0).isUndef() || !IVEN.hasOneUse())
47790       return SDValue();
47791     if (!isa<ConstantSDNode>(IVEN.getOperand(2)) ||
47792         IVEN.getConstantOperandAPInt(2) != SVN->getSplatIndex())
47793       return SDValue();
47794     SDValue Src = IVEN.getOperand(1);
47795     if (SDValue Not = IsNOT(Src, DAG)) {
47796       SDValue NotSrc = DAG.getBitcast(Src.getValueType(), Not);
47797       SDValue NotIVEN =
47798           DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(IVEN), IVEN.getValueType(),
47799                       IVEN.getOperand(0), NotSrc, IVEN.getOperand(2));
47800       return DAG.getVectorShuffle(SVN->getValueType(0), SDLoc(SVN), NotIVEN,
47801                                   SVN->getOperand(1), SVN->getMask());
47802     }
47803     return SDValue();
47804   };
47805 
47806   SDValue X, Y;
47807   SDValue N0 = N->getOperand(0);
47808   SDValue N1 = N->getOperand(1);
47809 
47810   if (SDValue Not = GetNot(N0)) {
47811     X = Not;
47812     Y = N1;
47813   } else if (SDValue Not = GetNot(N1)) {
47814     X = Not;
47815     Y = N0;
47816   } else
47817     return SDValue();
47818 
47819   X = DAG.getBitcast(VT, X);
47820   Y = DAG.getBitcast(VT, Y);
47821   SDLoc DL(N);
47822   // We do not split for SSE at all, but we need to split vectors for AVX1 and
47823   // AVX2.
47824   if (!Subtarget.useAVX512Regs() && VT.is512BitVector()) {
47825     SDValue LoX, HiX;
47826     std::tie(LoX, HiX) = splitVector(X, DAG, DL);
47827     SDValue LoY, HiY;
47828     std::tie(LoY, HiY) = splitVector(Y, DAG, DL);
47829     EVT SplitVT = LoX.getValueType();
47830     SDValue LoV = DAG.getNode(X86ISD::ANDNP, DL, SplitVT, {LoX, LoY});
47831     SDValue HiV = DAG.getNode(X86ISD::ANDNP, DL, SplitVT, {HiX, HiY});
47832     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, {LoV, HiV});
47833   }
47834   return DAG.getNode(X86ISD::ANDNP, DL, VT, {X, Y});
47835 }
47836 
47837 // Try to widen AND, OR and XOR nodes to VT in order to remove casts around
47838 // logical operations, like in the example below.
47839 //   or (and (truncate x, truncate y)),
47840 //      (xor (truncate z, build_vector (constants)))
47841 // Given a target type \p VT, we generate
47842 //   or (and x, y), (xor z, zext(build_vector (constants)))
47843 // given x, y and z are of type \p VT. We can do so, if operands are either
47844 // truncates from VT types, the second operand is a vector of constants or can
47845 // be recursively promoted.
47846 static SDValue PromoteMaskArithmetic(SDNode *N, EVT VT, SelectionDAG &DAG,
47847                                      unsigned Depth) {
47848   // Limit recursion to avoid excessive compile times.
47849   if (Depth >= SelectionDAG::MaxRecursionDepth)
47850     return SDValue();
47851 
47852   if (N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND &&
47853       N->getOpcode() != ISD::OR)
47854     return SDValue();
47855 
47856   SDValue N0 = N->getOperand(0);
47857   SDValue N1 = N->getOperand(1);
47858   SDLoc DL(N);
47859 
47860   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47861   if (!TLI.isOperationLegalOrPromote(N->getOpcode(), VT))
47862     return SDValue();
47863 
47864   if (SDValue NN0 = PromoteMaskArithmetic(N0.getNode(), VT, DAG, Depth + 1))
47865     N0 = NN0;
47866   else {
47867     // The Left side has to be a trunc.
47868     if (N0.getOpcode() != ISD::TRUNCATE)
47869       return SDValue();
47870 
47871     // The type of the truncated inputs.
47872     if (N0.getOperand(0).getValueType() != VT)
47873       return SDValue();
47874 
47875     N0 = N0.getOperand(0);
47876   }
47877 
47878   if (SDValue NN1 = PromoteMaskArithmetic(N1.getNode(), VT, DAG, Depth + 1))
47879     N1 = NN1;
47880   else {
47881     // The right side has to be a 'trunc' or a constant vector.
47882     bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
47883                     N1.getOperand(0).getValueType() == VT;
47884     if (!RHSTrunc && !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
47885       return SDValue();
47886 
47887     if (RHSTrunc)
47888       N1 = N1.getOperand(0);
47889     else
47890       N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
47891   }
47892 
47893   return DAG.getNode(N->getOpcode(), DL, VT, N0, N1);
47894 }
47895 
47896 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
47897 // register. In most cases we actually compare or select YMM-sized registers
47898 // and mixing the two types creates horrible code. This method optimizes
47899 // some of the transition sequences.
47900 // Even with AVX-512 this is still useful for removing casts around logical
47901 // operations on vXi1 mask types.
47902 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
47903                                      const X86Subtarget &Subtarget) {
47904   EVT VT = N->getValueType(0);
47905   assert(VT.isVector() && "Expected vector type");
47906 
47907   SDLoc DL(N);
47908   assert((N->getOpcode() == ISD::ANY_EXTEND ||
47909           N->getOpcode() == ISD::ZERO_EXTEND ||
47910           N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
47911 
47912   SDValue Narrow = N->getOperand(0);
47913   EVT NarrowVT = Narrow.getValueType();
47914 
47915   // Generate the wide operation.
47916   SDValue Op = PromoteMaskArithmetic(Narrow.getNode(), VT, DAG, 0);
47917   if (!Op)
47918     return SDValue();
47919   switch (N->getOpcode()) {
47920   default: llvm_unreachable("Unexpected opcode");
47921   case ISD::ANY_EXTEND:
47922     return Op;
47923   case ISD::ZERO_EXTEND:
47924     return DAG.getZeroExtendInReg(Op, DL, NarrowVT);
47925   case ISD::SIGN_EXTEND:
47926     return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
47927                        Op, DAG.getValueType(NarrowVT));
47928   }
47929 }
47930 
47931 static unsigned convertIntLogicToFPLogicOpcode(unsigned Opcode) {
47932   unsigned FPOpcode;
47933   switch (Opcode) {
47934   default: llvm_unreachable("Unexpected input node for FP logic conversion");
47935   case ISD::AND: FPOpcode = X86ISD::FAND; break;
47936   case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
47937   case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
47938   }
47939   return FPOpcode;
47940 }
47941 
47942 /// If both input operands of a logic op are being cast from floating-point
47943 /// types or FP compares, try to convert this into a floating-point logic node
47944 /// to avoid unnecessary moves from SSE to integer registers.
47945 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
47946                                         TargetLowering::DAGCombinerInfo &DCI,
47947                                         const X86Subtarget &Subtarget) {
47948   EVT VT = N->getValueType(0);
47949   SDValue N0 = N->getOperand(0);
47950   SDValue N1 = N->getOperand(1);
47951   SDLoc DL(N);
47952 
47953   if (!((N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST) ||
47954         (N0.getOpcode() == ISD::SETCC && N1.getOpcode() == ISD::SETCC)))
47955     return SDValue();
47956 
47957   SDValue N00 = N0.getOperand(0);
47958   SDValue N10 = N1.getOperand(0);
47959   EVT N00Type = N00.getValueType();
47960   EVT N10Type = N10.getValueType();
47961 
47962   // Ensure that both types are the same and are legal scalar fp types.
47963   if (N00Type != N10Type || !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
47964                               (Subtarget.hasSSE2() && N00Type == MVT::f64) ||
47965                               (Subtarget.hasFP16() && N00Type == MVT::f16)))
47966     return SDValue();
47967 
47968   if (N0.getOpcode() == ISD::BITCAST && !DCI.isBeforeLegalizeOps()) {
47969     unsigned FPOpcode = convertIntLogicToFPLogicOpcode(N->getOpcode());
47970     SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
47971     return DAG.getBitcast(VT, FPLogic);
47972   }
47973 
47974   if (VT != MVT::i1 || N0.getOpcode() != ISD::SETCC || !N0.hasOneUse() ||
47975       !N1.hasOneUse())
47976     return SDValue();
47977 
47978   ISD::CondCode CC0 = cast<CondCodeSDNode>(N0.getOperand(2))->get();
47979   ISD::CondCode CC1 = cast<CondCodeSDNode>(N1.getOperand(2))->get();
47980 
47981   // The vector ISA for FP predicates is incomplete before AVX, so converting
47982   // COMIS* to CMPS* may not be a win before AVX.
47983   if (!Subtarget.hasAVX() &&
47984       !(cheapX86FSETCC_SSE(CC0) && cheapX86FSETCC_SSE(CC1)))
47985     return SDValue();
47986 
47987   // Convert scalar FP compares and logic to vector compares (COMIS* to CMPS*)
47988   // and vector logic:
47989   // logic (setcc N00, N01), (setcc N10, N11) -->
47990   // extelt (logic (setcc (s2v N00), (s2v N01)), setcc (s2v N10), (s2v N11))), 0
47991   unsigned NumElts = 128 / N00Type.getSizeInBits();
47992   EVT VecVT = EVT::getVectorVT(*DAG.getContext(), N00Type, NumElts);
47993   EVT BoolVecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
47994   SDValue ZeroIndex = DAG.getVectorIdxConstant(0, DL);
47995   SDValue N01 = N0.getOperand(1);
47996   SDValue N11 = N1.getOperand(1);
47997   SDValue Vec00 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N00);
47998   SDValue Vec01 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N01);
47999   SDValue Vec10 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N10);
48000   SDValue Vec11 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N11);
48001   SDValue Setcc0 = DAG.getSetCC(DL, BoolVecVT, Vec00, Vec01, CC0);
48002   SDValue Setcc1 = DAG.getSetCC(DL, BoolVecVT, Vec10, Vec11, CC1);
48003   SDValue Logic = DAG.getNode(N->getOpcode(), DL, BoolVecVT, Setcc0, Setcc1);
48004   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Logic, ZeroIndex);
48005 }
48006 
48007 // Attempt to fold BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y))
48008 // to reduce XMM->GPR traffic.
48009 static SDValue combineBitOpWithMOVMSK(SDNode *N, SelectionDAG &DAG) {
48010   unsigned Opc = N->getOpcode();
48011   assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48012          "Unexpected bit opcode");
48013 
48014   SDValue N0 = N->getOperand(0);
48015   SDValue N1 = N->getOperand(1);
48016 
48017   // Both operands must be single use MOVMSK.
48018   if (N0.getOpcode() != X86ISD::MOVMSK || !N0.hasOneUse() ||
48019       N1.getOpcode() != X86ISD::MOVMSK || !N1.hasOneUse())
48020     return SDValue();
48021 
48022   SDValue Vec0 = N0.getOperand(0);
48023   SDValue Vec1 = N1.getOperand(0);
48024   EVT VecVT0 = Vec0.getValueType();
48025   EVT VecVT1 = Vec1.getValueType();
48026 
48027   // Both MOVMSK operands must be from vectors of the same size and same element
48028   // size, but its OK for a fp/int diff.
48029   if (VecVT0.getSizeInBits() != VecVT1.getSizeInBits() ||
48030       VecVT0.getScalarSizeInBits() != VecVT1.getScalarSizeInBits())
48031     return SDValue();
48032 
48033   SDLoc DL(N);
48034   unsigned VecOpc =
48035       VecVT0.isFloatingPoint() ? convertIntLogicToFPLogicOpcode(Opc) : Opc;
48036   SDValue Result =
48037       DAG.getNode(VecOpc, DL, VecVT0, Vec0, DAG.getBitcast(VecVT0, Vec1));
48038   return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
48039 }
48040 
48041 // Attempt to fold BITOP(SHIFT(X,Z),SHIFT(Y,Z)) -> SHIFT(BITOP(X,Y),Z).
48042 // NOTE: This is a very limited case of what SimplifyUsingDistributiveLaws
48043 // handles in InstCombine.
48044 static SDValue combineBitOpWithShift(SDNode *N, SelectionDAG &DAG) {
48045   unsigned Opc = N->getOpcode();
48046   assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48047          "Unexpected bit opcode");
48048 
48049   SDValue N0 = N->getOperand(0);
48050   SDValue N1 = N->getOperand(1);
48051   EVT VT = N->getValueType(0);
48052 
48053   // Both operands must be single use.
48054   if (!N0.hasOneUse() || !N1.hasOneUse())
48055     return SDValue();
48056 
48057   // Search for matching shifts.
48058   SDValue BC0 = peekThroughOneUseBitcasts(N0);
48059   SDValue BC1 = peekThroughOneUseBitcasts(N1);
48060 
48061   unsigned BCOpc = BC0.getOpcode();
48062   EVT BCVT = BC0.getValueType();
48063   if (BCOpc != BC1->getOpcode() || BCVT != BC1.getValueType())
48064     return SDValue();
48065 
48066   switch (BCOpc) {
48067   case X86ISD::VSHLI:
48068   case X86ISD::VSRLI:
48069   case X86ISD::VSRAI: {
48070     if (BC0.getOperand(1) != BC1.getOperand(1))
48071       return SDValue();
48072 
48073     SDLoc DL(N);
48074     SDValue BitOp =
48075         DAG.getNode(Opc, DL, BCVT, BC0.getOperand(0), BC1.getOperand(0));
48076     SDValue Shift = DAG.getNode(BCOpc, DL, BCVT, BitOp, BC0.getOperand(1));
48077     return DAG.getBitcast(VT, Shift);
48078   }
48079   }
48080 
48081   return SDValue();
48082 }
48083 
48084 // Attempt to fold:
48085 // BITOP(PACKSS(X,Z),PACKSS(Y,W)) --> PACKSS(BITOP(X,Y),BITOP(Z,W)).
48086 // TODO: Handle PACKUS handling.
48087 static SDValue combineBitOpWithPACK(SDNode *N, SelectionDAG &DAG) {
48088   unsigned Opc = N->getOpcode();
48089   assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48090          "Unexpected bit opcode");
48091 
48092   SDValue N0 = N->getOperand(0);
48093   SDValue N1 = N->getOperand(1);
48094   EVT VT = N->getValueType(0);
48095 
48096   // Both operands must be single use.
48097   if (!N0.hasOneUse() || !N1.hasOneUse())
48098     return SDValue();
48099 
48100   // Search for matching packs.
48101   N0 = peekThroughOneUseBitcasts(N0);
48102   N1 = peekThroughOneUseBitcasts(N1);
48103 
48104   if (N0.getOpcode() != X86ISD::PACKSS || N1.getOpcode() != X86ISD::PACKSS)
48105     return SDValue();
48106 
48107   MVT DstVT = N0.getSimpleValueType();
48108   if (DstVT != N1.getSimpleValueType())
48109     return SDValue();
48110 
48111   MVT SrcVT = N0.getOperand(0).getSimpleValueType();
48112   unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
48113 
48114   // Limit to allsignbits packing.
48115   if (DAG.ComputeNumSignBits(N0.getOperand(0)) != NumSrcBits ||
48116       DAG.ComputeNumSignBits(N0.getOperand(1)) != NumSrcBits ||
48117       DAG.ComputeNumSignBits(N1.getOperand(0)) != NumSrcBits ||
48118       DAG.ComputeNumSignBits(N1.getOperand(1)) != NumSrcBits)
48119     return SDValue();
48120 
48121   SDLoc DL(N);
48122   SDValue LHS = DAG.getNode(Opc, DL, SrcVT, N0.getOperand(0), N1.getOperand(0));
48123   SDValue RHS = DAG.getNode(Opc, DL, SrcVT, N0.getOperand(1), N1.getOperand(1));
48124   return DAG.getBitcast(VT, DAG.getNode(X86ISD::PACKSS, DL, DstVT, LHS, RHS));
48125 }
48126 
48127 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
48128 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
48129 /// with a shift-right to eliminate loading the vector constant mask value.
48130 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
48131                                      const X86Subtarget &Subtarget) {
48132   SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
48133   SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
48134   EVT VT = Op0.getValueType();
48135   if (VT != Op1.getValueType() || !VT.isSimple() || !VT.isInteger())
48136     return SDValue();
48137 
48138   // Try to convert an "is positive" signbit masking operation into arithmetic
48139   // shift and "andn". This saves a materialization of a -1 vector constant.
48140   // The "is negative" variant should be handled more generally because it only
48141   // requires "and" rather than "andn":
48142   // and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth - 1), Y
48143   //
48144   // This is limited to the original type to avoid producing even more bitcasts.
48145   // If the bitcasts can't be eliminated, then it is unlikely that this fold
48146   // will be profitable.
48147   if (N->getValueType(0) == VT &&
48148       supportedVectorShiftWithImm(VT, Subtarget, ISD::SRA)) {
48149     SDValue X, Y;
48150     if (Op1.getOpcode() == X86ISD::PCMPGT &&
48151         isAllOnesOrAllOnesSplat(Op1.getOperand(1)) && Op1.hasOneUse()) {
48152       X = Op1.getOperand(0);
48153       Y = Op0;
48154     } else if (Op0.getOpcode() == X86ISD::PCMPGT &&
48155                isAllOnesOrAllOnesSplat(Op0.getOperand(1)) && Op0.hasOneUse()) {
48156       X = Op0.getOperand(0);
48157       Y = Op1;
48158     }
48159     if (X && Y) {
48160       SDLoc DL(N);
48161       SDValue Sra =
48162           getTargetVShiftByConstNode(X86ISD::VSRAI, DL, VT.getSimpleVT(), X,
48163                                      VT.getScalarSizeInBits() - 1, DAG);
48164       return DAG.getNode(X86ISD::ANDNP, DL, VT, Sra, Y);
48165     }
48166   }
48167 
48168   APInt SplatVal;
48169   if (!X86::isConstantSplat(Op1, SplatVal, false) || !SplatVal.isMask())
48170     return SDValue();
48171 
48172   // Don't prevent creation of ANDN.
48173   if (isBitwiseNot(Op0))
48174     return SDValue();
48175 
48176   if (!supportedVectorShiftWithImm(VT, Subtarget, ISD::SRL))
48177     return SDValue();
48178 
48179   unsigned EltBitWidth = VT.getScalarSizeInBits();
48180   if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
48181     return SDValue();
48182 
48183   SDLoc DL(N);
48184   unsigned ShiftVal = SplatVal.countr_one();
48185   SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
48186   SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT, Op0, ShAmt);
48187   return DAG.getBitcast(N->getValueType(0), Shift);
48188 }
48189 
48190 // Get the index node from the lowered DAG of a GEP IR instruction with one
48191 // indexing dimension.
48192 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
48193   if (Ld->isIndexed())
48194     return SDValue();
48195 
48196   SDValue Base = Ld->getBasePtr();
48197 
48198   if (Base.getOpcode() != ISD::ADD)
48199     return SDValue();
48200 
48201   SDValue ShiftedIndex = Base.getOperand(0);
48202 
48203   if (ShiftedIndex.getOpcode() != ISD::SHL)
48204     return SDValue();
48205 
48206   return ShiftedIndex.getOperand(0);
48207 
48208 }
48209 
48210 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
48211   if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
48212     switch (VT.getSizeInBits()) {
48213     default: return false;
48214     case 64: return Subtarget.is64Bit() ? true : false;
48215     case 32: return true;
48216     }
48217   }
48218   return false;
48219 }
48220 
48221 // This function recognizes cases where X86 bzhi instruction can replace and
48222 // 'and-load' sequence.
48223 // In case of loading integer value from an array of constants which is defined
48224 // as follows:
48225 //
48226 //   int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
48227 //
48228 // then applying a bitwise and on the result with another input.
48229 // It's equivalent to performing bzhi (zero high bits) on the input, with the
48230 // same index of the load.
48231 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
48232                                     const X86Subtarget &Subtarget) {
48233   MVT VT = Node->getSimpleValueType(0);
48234   SDLoc dl(Node);
48235 
48236   // Check if subtarget has BZHI instruction for the node's type
48237   if (!hasBZHI(Subtarget, VT))
48238     return SDValue();
48239 
48240   // Try matching the pattern for both operands.
48241   for (unsigned i = 0; i < 2; i++) {
48242     SDValue N = Node->getOperand(i);
48243     LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
48244 
48245      // continue if the operand is not a load instruction
48246     if (!Ld)
48247       return SDValue();
48248 
48249     const Value *MemOp = Ld->getMemOperand()->getValue();
48250 
48251     if (!MemOp)
48252       return SDValue();
48253 
48254     if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
48255       if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
48256         if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
48257 
48258           Constant *Init = GV->getInitializer();
48259           Type *Ty = Init->getType();
48260           if (!isa<ConstantDataArray>(Init) ||
48261               !Ty->getArrayElementType()->isIntegerTy() ||
48262               Ty->getArrayElementType()->getScalarSizeInBits() !=
48263                   VT.getSizeInBits() ||
48264               Ty->getArrayNumElements() >
48265                   Ty->getArrayElementType()->getScalarSizeInBits())
48266             continue;
48267 
48268           // Check if the array's constant elements are suitable to our case.
48269           uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
48270           bool ConstantsMatch = true;
48271           for (uint64_t j = 0; j < ArrayElementCount; j++) {
48272             auto *Elem = cast<ConstantInt>(Init->getAggregateElement(j));
48273             if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
48274               ConstantsMatch = false;
48275               break;
48276             }
48277           }
48278           if (!ConstantsMatch)
48279             continue;
48280 
48281           // Do the transformation (For 32-bit type):
48282           // -> (and (load arr[idx]), inp)
48283           // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
48284           //    that will be replaced with one bzhi instruction.
48285           SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
48286           SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
48287 
48288           // Get the Node which indexes into the array.
48289           SDValue Index = getIndexFromUnindexedLoad(Ld);
48290           if (!Index)
48291             return SDValue();
48292           Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
48293 
48294           SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
48295           Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
48296 
48297           SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
48298           SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
48299 
48300           return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
48301         }
48302       }
48303     }
48304   }
48305   return SDValue();
48306 }
48307 
48308 // Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
48309 // Where C is a mask containing the same number of bits as the setcc and
48310 // where the setcc will freely 0 upper bits of k-register. We can replace the
48311 // undef in the concat with 0s and remove the AND. This mainly helps with
48312 // v2i1/v4i1 setcc being casted to scalar.
48313 static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
48314                                              const X86Subtarget &Subtarget) {
48315   assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
48316 
48317   EVT VT = N->getValueType(0);
48318 
48319   // Make sure this is an AND with constant. We will check the value of the
48320   // constant later.
48321   auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
48322   if (!C1)
48323     return SDValue();
48324 
48325   // This is implied by the ConstantSDNode.
48326   assert(!VT.isVector() && "Expected scalar VT!");
48327 
48328   SDValue Src = N->getOperand(0);
48329   if (!Src.hasOneUse())
48330     return SDValue();
48331 
48332   // (Optionally) peek through any_extend().
48333   if (Src.getOpcode() == ISD::ANY_EXTEND) {
48334     if (!Src.getOperand(0).hasOneUse())
48335       return SDValue();
48336     Src = Src.getOperand(0);
48337   }
48338 
48339   if (Src.getOpcode() != ISD::BITCAST || !Src.getOperand(0).hasOneUse())
48340     return SDValue();
48341 
48342   Src = Src.getOperand(0);
48343   EVT SrcVT = Src.getValueType();
48344 
48345   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48346   if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
48347       !TLI.isTypeLegal(SrcVT))
48348     return SDValue();
48349 
48350   if (Src.getOpcode() != ISD::CONCAT_VECTORS)
48351     return SDValue();
48352 
48353   // We only care about the first subvector of the concat, we expect the
48354   // other subvectors to be ignored due to the AND if we make the change.
48355   SDValue SubVec = Src.getOperand(0);
48356   EVT SubVecVT = SubVec.getValueType();
48357 
48358   // The RHS of the AND should be a mask with as many bits as SubVec.
48359   if (!TLI.isTypeLegal(SubVecVT) ||
48360       !C1->getAPIntValue().isMask(SubVecVT.getVectorNumElements()))
48361     return SDValue();
48362 
48363   // First subvector should be a setcc with a legal result type or a
48364   // AND containing at least one setcc with a legal result type.
48365   auto IsLegalSetCC = [&](SDValue V) {
48366     if (V.getOpcode() != ISD::SETCC)
48367       return false;
48368     EVT SetccVT = V.getOperand(0).getValueType();
48369     if (!TLI.isTypeLegal(SetccVT) ||
48370         !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
48371       return false;
48372     if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
48373       return false;
48374     return true;
48375   };
48376   if (!(IsLegalSetCC(SubVec) || (SubVec.getOpcode() == ISD::AND &&
48377                                  (IsLegalSetCC(SubVec.getOperand(0)) ||
48378                                   IsLegalSetCC(SubVec.getOperand(1))))))
48379     return SDValue();
48380 
48381   // We passed all the checks. Rebuild the concat_vectors with zeroes
48382   // and cast it back to VT.
48383   SDLoc dl(N);
48384   SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
48385                               DAG.getConstant(0, dl, SubVecVT));
48386   Ops[0] = SubVec;
48387   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
48388                                Ops);
48389   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcVT.getSizeInBits());
48390   return DAG.getZExtOrTrunc(DAG.getBitcast(IntVT, Concat), dl, VT);
48391 }
48392 
48393 static SDValue getBMIMatchingOp(unsigned Opc, SelectionDAG &DAG,
48394                                 SDValue OpMustEq, SDValue Op, unsigned Depth) {
48395   // We don't want to go crazy with the recursion here. This isn't a super
48396   // important optimization.
48397   static constexpr unsigned kMaxDepth = 2;
48398 
48399   // Only do this re-ordering if op has one use.
48400   if (!Op.hasOneUse())
48401     return SDValue();
48402 
48403   SDLoc DL(Op);
48404   // If we hit another assosiative op, recurse further.
48405   if (Op.getOpcode() == Opc) {
48406     // Done recursing.
48407     if (Depth++ >= kMaxDepth)
48408       return SDValue();
48409 
48410     for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx)
48411       if (SDValue R =
48412               getBMIMatchingOp(Opc, DAG, OpMustEq, Op.getOperand(OpIdx), Depth))
48413         return DAG.getNode(Op.getOpcode(), DL, Op.getValueType(), R,
48414                            Op.getOperand(1 - OpIdx));
48415 
48416   } else if (Op.getOpcode() == ISD::SUB) {
48417     if (Opc == ISD::AND) {
48418       // BLSI: (and x, (sub 0, x))
48419       if (isNullConstant(Op.getOperand(0)) && Op.getOperand(1) == OpMustEq)
48420         return DAG.getNode(Opc, DL, Op.getValueType(), OpMustEq, Op);
48421     }
48422     // Opc must be ISD::AND or ISD::XOR
48423     // BLSR: (and x, (sub x, 1))
48424     // BLSMSK: (xor x, (sub x, 1))
48425     if (isOneConstant(Op.getOperand(1)) && Op.getOperand(0) == OpMustEq)
48426       return DAG.getNode(Opc, DL, Op.getValueType(), OpMustEq, Op);
48427 
48428   } else if (Op.getOpcode() == ISD::ADD) {
48429     // Opc must be ISD::AND or ISD::XOR
48430     // BLSR: (and x, (add x, -1))
48431     // BLSMSK: (xor x, (add x, -1))
48432     if (isAllOnesConstant(Op.getOperand(1)) && Op.getOperand(0) == OpMustEq)
48433       return DAG.getNode(Opc, DL, Op.getValueType(), OpMustEq, Op);
48434   }
48435   return SDValue();
48436 }
48437 
48438 static SDValue combineBMILogicOp(SDNode *N, SelectionDAG &DAG,
48439                                  const X86Subtarget &Subtarget) {
48440   EVT VT = N->getValueType(0);
48441   // Make sure this node is a candidate for BMI instructions.
48442   if (!Subtarget.hasBMI() || !VT.isScalarInteger() ||
48443       (VT != MVT::i32 && VT != MVT::i64))
48444     return SDValue();
48445 
48446   assert(N->getOpcode() == ISD::AND || N->getOpcode() == ISD::XOR);
48447 
48448   // Try and match LHS and RHS.
48449   for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx)
48450     if (SDValue OpMatch =
48451             getBMIMatchingOp(N->getOpcode(), DAG, N->getOperand(OpIdx),
48452                              N->getOperand(1 - OpIdx), 0))
48453       return OpMatch;
48454   return SDValue();
48455 }
48456 
48457 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
48458                           TargetLowering::DAGCombinerInfo &DCI,
48459                           const X86Subtarget &Subtarget) {
48460   SDValue N0 = N->getOperand(0);
48461   SDValue N1 = N->getOperand(1);
48462   EVT VT = N->getValueType(0);
48463   SDLoc dl(N);
48464   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48465 
48466   // If this is SSE1 only convert to FAND to avoid scalarization.
48467   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
48468     return DAG.getBitcast(MVT::v4i32,
48469                           DAG.getNode(X86ISD::FAND, dl, MVT::v4f32,
48470                                       DAG.getBitcast(MVT::v4f32, N0),
48471                                       DAG.getBitcast(MVT::v4f32, N1)));
48472   }
48473 
48474   // Use a 32-bit and+zext if upper bits known zero.
48475   if (VT == MVT::i64 && Subtarget.is64Bit() && !isa<ConstantSDNode>(N1)) {
48476     APInt HiMask = APInt::getHighBitsSet(64, 32);
48477     if (DAG.MaskedValueIsZero(N1, HiMask) ||
48478         DAG.MaskedValueIsZero(N0, HiMask)) {
48479       SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N0);
48480       SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N1);
48481       return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
48482                          DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
48483     }
48484   }
48485 
48486   // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
48487   // TODO: Support multiple SrcOps.
48488   if (VT == MVT::i1) {
48489     SmallVector<SDValue, 2> SrcOps;
48490     SmallVector<APInt, 2> SrcPartials;
48491     if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps, &SrcPartials) &&
48492         SrcOps.size() == 1) {
48493       unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
48494       EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
48495       SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
48496       if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
48497         Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
48498       if (Mask) {
48499         assert(SrcPartials[0].getBitWidth() == NumElts &&
48500                "Unexpected partial reduction mask");
48501         SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
48502         Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
48503         return DAG.getSetCC(dl, MVT::i1, Mask, PartialBits, ISD::SETEQ);
48504       }
48505     }
48506   }
48507 
48508   // InstCombine converts:
48509   //    `(-x << C0) & C1`
48510   // to
48511   //    `(x * (Pow2_Ceil(C1) - (1 << C0))) & C1`
48512   // This saves an IR instruction but on x86 the neg/shift version is preferable
48513   // so undo the transform.
48514 
48515   if (N0.getOpcode() == ISD::MUL && N0.hasOneUse()) {
48516     // TODO: We don't actually need a splat for this, we just need the checks to
48517     // hold for each element.
48518     ConstantSDNode *N1C = isConstOrConstSplat(N1, /*AllowUndefs*/ true,
48519                                               /*AllowTruncation*/ false);
48520     ConstantSDNode *N01C =
48521         isConstOrConstSplat(N0.getOperand(1), /*AllowUndefs*/ true,
48522                             /*AllowTruncation*/ false);
48523     if (N1C && N01C) {
48524       const APInt &MulC = N01C->getAPIntValue();
48525       const APInt &AndC = N1C->getAPIntValue();
48526       APInt MulCLowBit = MulC & (-MulC);
48527       if (MulC.uge(AndC) && !MulC.isPowerOf2() &&
48528           (MulCLowBit + MulC).isPowerOf2()) {
48529         SDValue Neg = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT),
48530                                   N0.getOperand(0));
48531         int32_t MulCLowBitLog = MulCLowBit.exactLogBase2();
48532         assert(MulCLowBitLog != -1 &&
48533                "Isolated lowbit is somehow not a power of 2!");
48534         SDValue Shift = DAG.getNode(ISD::SHL, dl, VT, Neg,
48535                                     DAG.getConstant(MulCLowBitLog, dl, VT));
48536         return DAG.getNode(ISD::AND, dl, VT, Shift, N1);
48537       }
48538     }
48539   }
48540 
48541   if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
48542     return V;
48543 
48544   if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
48545     return R;
48546 
48547   if (SDValue R = combineBitOpWithShift(N, DAG))
48548     return R;
48549 
48550   if (SDValue R = combineBitOpWithPACK(N, DAG))
48551     return R;
48552 
48553   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
48554     return FPLogic;
48555 
48556   if (SDValue R = combineAndShuffleNot(N, DAG, Subtarget))
48557     return R;
48558 
48559   if (DCI.isBeforeLegalizeOps())
48560     return SDValue();
48561 
48562   if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
48563     return R;
48564 
48565   if (SDValue R = combineAndNotIntoANDNP(N, DAG))
48566     return R;
48567 
48568   if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
48569     return ShiftRight;
48570 
48571   if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
48572     return R;
48573 
48574   // fold (and (mul x, c1), c2) -> (mul x, (and c1, c2))
48575   // iff c2 is all/no bits mask - i.e. a select-with-zero mask.
48576   // TODO: Handle PMULDQ/PMULUDQ/VPMADDWD/VPMADDUBSW?
48577   if (VT.isVector() && getTargetConstantFromNode(N1)) {
48578     unsigned Opc0 = N0.getOpcode();
48579     if ((Opc0 == ISD::MUL || Opc0 == ISD::MULHU || Opc0 == ISD::MULHS) &&
48580         getTargetConstantFromNode(N0.getOperand(1)) &&
48581         DAG.ComputeNumSignBits(N1) == VT.getScalarSizeInBits() &&
48582         N0->hasOneUse() && N0.getOperand(1)->hasOneUse()) {
48583       SDValue MaskMul = DAG.getNode(ISD::AND, dl, VT, N0.getOperand(1), N1);
48584       return DAG.getNode(Opc0, dl, VT, N0.getOperand(0), MaskMul);
48585     }
48586   }
48587 
48588   // Fold AND(SRL(X,Y),1) -> SETCC(BT(X,Y), COND_B) iff Y is not a constant
48589   // avoids slow variable shift (moving shift amount to ECX etc.)
48590   if (isOneConstant(N1) && N0->hasOneUse()) {
48591     SDValue Src = N0;
48592     while ((Src.getOpcode() == ISD::ZERO_EXTEND ||
48593             Src.getOpcode() == ISD::TRUNCATE) &&
48594            Src.getOperand(0)->hasOneUse())
48595       Src = Src.getOperand(0);
48596     bool ContainsNOT = false;
48597     X86::CondCode X86CC = X86::COND_B;
48598     // Peek through AND(NOT(SRL(X,Y)),1).
48599     if (isBitwiseNot(Src)) {
48600       Src = Src.getOperand(0);
48601       X86CC = X86::COND_AE;
48602       ContainsNOT = true;
48603     }
48604     if (Src.getOpcode() == ISD::SRL &&
48605         !isa<ConstantSDNode>(Src.getOperand(1))) {
48606       SDValue BitNo = Src.getOperand(1);
48607       Src = Src.getOperand(0);
48608       // Peek through AND(SRL(NOT(X),Y),1).
48609       if (isBitwiseNot(Src)) {
48610         Src = Src.getOperand(0);
48611         X86CC = X86CC == X86::COND_AE ? X86::COND_B : X86::COND_AE;
48612         ContainsNOT = true;
48613       }
48614       // If we have BMI2 then SHRX should be faster for i32/i64 cases.
48615       if (!(Subtarget.hasBMI2() && !ContainsNOT && VT.getSizeInBits() >= 32))
48616         if (SDValue BT = getBT(Src, BitNo, dl, DAG))
48617           return DAG.getZExtOrTrunc(getSETCC(X86CC, BT, dl, DAG), dl, VT);
48618     }
48619   }
48620 
48621   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
48622     // Attempt to recursively combine a bitmask AND with shuffles.
48623     SDValue Op(N, 0);
48624     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
48625       return Res;
48626 
48627     // If either operand is a constant mask, then only the elements that aren't
48628     // zero are actually demanded by the other operand.
48629     auto GetDemandedMasks = [&](SDValue Op) {
48630       APInt UndefElts;
48631       SmallVector<APInt> EltBits;
48632       int NumElts = VT.getVectorNumElements();
48633       int EltSizeInBits = VT.getScalarSizeInBits();
48634       APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
48635       APInt DemandedElts = APInt::getAllOnes(NumElts);
48636       if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
48637                                         EltBits)) {
48638         DemandedBits.clearAllBits();
48639         DemandedElts.clearAllBits();
48640         for (int I = 0; I != NumElts; ++I) {
48641           if (UndefElts[I]) {
48642             // We can't assume an undef src element gives an undef dst - the
48643             // other src might be zero.
48644             DemandedBits.setAllBits();
48645             DemandedElts.setBit(I);
48646           } else if (!EltBits[I].isZero()) {
48647             DemandedBits |= EltBits[I];
48648             DemandedElts.setBit(I);
48649           }
48650         }
48651       }
48652       return std::make_pair(DemandedBits, DemandedElts);
48653     };
48654     APInt Bits0, Elts0;
48655     APInt Bits1, Elts1;
48656     std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
48657     std::tie(Bits1, Elts1) = GetDemandedMasks(N0);
48658 
48659     if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
48660         TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
48661         TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
48662         TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
48663       if (N->getOpcode() != ISD::DELETED_NODE)
48664         DCI.AddToWorklist(N);
48665       return SDValue(N, 0);
48666     }
48667 
48668     SDValue NewN0 = TLI.SimplifyMultipleUseDemandedBits(N0, Bits0, Elts0, DAG);
48669     SDValue NewN1 = TLI.SimplifyMultipleUseDemandedBits(N1, Bits1, Elts1, DAG);
48670     if (NewN0 || NewN1)
48671       return DAG.getNode(ISD::AND, dl, VT, NewN0 ? NewN0 : N0,
48672                          NewN1 ? NewN1 : N1);
48673   }
48674 
48675   // Attempt to combine a scalar bitmask AND with an extracted shuffle.
48676   if ((VT.getScalarSizeInBits() % 8) == 0 &&
48677       N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
48678       isa<ConstantSDNode>(N0.getOperand(1)) && N0->hasOneUse()) {
48679     SDValue BitMask = N1;
48680     SDValue SrcVec = N0.getOperand(0);
48681     EVT SrcVecVT = SrcVec.getValueType();
48682 
48683     // Check that the constant bitmask masks whole bytes.
48684     APInt UndefElts;
48685     SmallVector<APInt, 64> EltBits;
48686     if (VT == SrcVecVT.getScalarType() && N0->isOnlyUserOf(SrcVec.getNode()) &&
48687         getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
48688         llvm::all_of(EltBits, [](const APInt &M) {
48689           return M.isZero() || M.isAllOnes();
48690         })) {
48691       unsigned NumElts = SrcVecVT.getVectorNumElements();
48692       unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
48693       unsigned Idx = N0.getConstantOperandVal(1);
48694 
48695       // Create a root shuffle mask from the byte mask and the extracted index.
48696       SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
48697       for (unsigned i = 0; i != Scale; ++i) {
48698         if (UndefElts[i])
48699           continue;
48700         int VecIdx = Scale * Idx + i;
48701         ShuffleMask[VecIdx] = EltBits[i].isZero() ? SM_SentinelZero : VecIdx;
48702       }
48703 
48704       if (SDValue Shuffle = combineX86ShufflesRecursively(
48705               {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
48706               X86::MaxShuffleCombineDepth,
48707               /*HasVarMask*/ false, /*AllowVarCrossLaneMask*/ true,
48708               /*AllowVarPerLaneMask*/ true, DAG, Subtarget))
48709         return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Shuffle,
48710                            N0.getOperand(1));
48711     }
48712   }
48713 
48714   if (SDValue R = combineBMILogicOp(N, DAG, Subtarget))
48715     return R;
48716 
48717   return SDValue();
48718 }
48719 
48720 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
48721 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
48722                                      const X86Subtarget &Subtarget) {
48723   assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
48724 
48725   MVT VT = N->getSimpleValueType(0);
48726   unsigned EltSizeInBits = VT.getScalarSizeInBits();
48727   if (!VT.isVector() || (EltSizeInBits % 8) != 0)
48728     return SDValue();
48729 
48730   SDValue N0 = peekThroughBitcasts(N->getOperand(0));
48731   SDValue N1 = peekThroughBitcasts(N->getOperand(1));
48732   if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
48733     return SDValue();
48734 
48735   // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
48736   // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
48737   if (!(Subtarget.hasXOP() || useVPTERNLOG(Subtarget, VT) ||
48738         !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
48739     return SDValue();
48740 
48741   // Attempt to extract constant byte masks.
48742   APInt UndefElts0, UndefElts1;
48743   SmallVector<APInt, 32> EltBits0, EltBits1;
48744   if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
48745                                      false, false))
48746     return SDValue();
48747   if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
48748                                      false, false))
48749     return SDValue();
48750 
48751   for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
48752     // TODO - add UNDEF elts support.
48753     if (UndefElts0[i] || UndefElts1[i])
48754       return SDValue();
48755     if (EltBits0[i] != ~EltBits1[i])
48756       return SDValue();
48757   }
48758 
48759   SDLoc DL(N);
48760 
48761   if (useVPTERNLOG(Subtarget, VT)) {
48762     // Emit a VPTERNLOG node directly - 0xCA is the imm code for A?B:C.
48763     // VPTERNLOG is only available as vXi32/64-bit types.
48764     MVT OpSVT = EltSizeInBits <= 32 ? MVT::i32 : MVT::i64;
48765     MVT OpVT =
48766         MVT::getVectorVT(OpSVT, VT.getSizeInBits() / OpSVT.getSizeInBits());
48767     SDValue A = DAG.getBitcast(OpVT, N0.getOperand(1));
48768     SDValue B = DAG.getBitcast(OpVT, N0.getOperand(0));
48769     SDValue C = DAG.getBitcast(OpVT, N1.getOperand(0));
48770     SDValue Imm = DAG.getTargetConstant(0xCA, DL, MVT::i8);
48771     SDValue Res = getAVX512Node(X86ISD::VPTERNLOG, DL, OpVT, {A, B, C, Imm},
48772                                 DAG, Subtarget);
48773     return DAG.getBitcast(VT, Res);
48774   }
48775 
48776   SDValue X = N->getOperand(0);
48777   SDValue Y =
48778       DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
48779                   DAG.getBitcast(VT, N1.getOperand(0)));
48780   return DAG.getNode(ISD::OR, DL, VT, X, Y);
48781 }
48782 
48783 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
48784 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
48785   if (N->getOpcode() != ISD::OR)
48786     return false;
48787 
48788   SDValue N0 = N->getOperand(0);
48789   SDValue N1 = N->getOperand(1);
48790 
48791   // Canonicalize AND to LHS.
48792   if (N1.getOpcode() == ISD::AND)
48793     std::swap(N0, N1);
48794 
48795   // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
48796   if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
48797     return false;
48798 
48799   Mask = N1.getOperand(0);
48800   X = N1.getOperand(1);
48801 
48802   // Check to see if the mask appeared in both the AND and ANDNP.
48803   if (N0.getOperand(0) == Mask)
48804     Y = N0.getOperand(1);
48805   else if (N0.getOperand(1) == Mask)
48806     Y = N0.getOperand(0);
48807   else
48808     return false;
48809 
48810   // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
48811   // ANDNP combine allows other combines to happen that prevent matching.
48812   return true;
48813 }
48814 
48815 // Try to fold:
48816 //   (or (and (m, y), (pandn m, x)))
48817 // into:
48818 //   (vselect m, x, y)
48819 // As a special case, try to fold:
48820 //   (or (and (m, (sub 0, x)), (pandn m, x)))
48821 // into:
48822 //   (sub (xor X, M), M)
48823 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
48824                                             const X86Subtarget &Subtarget) {
48825   assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
48826 
48827   EVT VT = N->getValueType(0);
48828   if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
48829         (VT.is256BitVector() && Subtarget.hasInt256())))
48830     return SDValue();
48831 
48832   SDValue X, Y, Mask;
48833   if (!matchLogicBlend(N, X, Y, Mask))
48834     return SDValue();
48835 
48836   // Validate that X, Y, and Mask are bitcasts, and see through them.
48837   Mask = peekThroughBitcasts(Mask);
48838   X = peekThroughBitcasts(X);
48839   Y = peekThroughBitcasts(Y);
48840 
48841   EVT MaskVT = Mask.getValueType();
48842   unsigned EltBits = MaskVT.getScalarSizeInBits();
48843 
48844   // TODO: Attempt to handle floating point cases as well?
48845   if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
48846     return SDValue();
48847 
48848   SDLoc DL(N);
48849 
48850   // Attempt to combine to conditional negate: (sub (xor X, M), M)
48851   if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
48852                                                            DAG, Subtarget))
48853     return Res;
48854 
48855   // PBLENDVB is only available on SSE 4.1.
48856   if (!Subtarget.hasSSE41())
48857     return SDValue();
48858 
48859   // If we have VPTERNLOG we should prefer that since PBLENDVB is multiple uops.
48860   if (Subtarget.hasVLX())
48861     return SDValue();
48862 
48863   MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
48864 
48865   X = DAG.getBitcast(BlendVT, X);
48866   Y = DAG.getBitcast(BlendVT, Y);
48867   Mask = DAG.getBitcast(BlendVT, Mask);
48868   Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
48869   return DAG.getBitcast(VT, Mask);
48870 }
48871 
48872 // Helper function for combineOrCmpEqZeroToCtlzSrl
48873 // Transforms:
48874 //   seteq(cmp x, 0)
48875 //   into:
48876 //   srl(ctlz x), log2(bitsize(x))
48877 // Input pattern is checked by caller.
48878 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) {
48879   SDValue Cmp = Op.getOperand(1);
48880   EVT VT = Cmp.getOperand(0).getValueType();
48881   unsigned Log2b = Log2_32(VT.getSizeInBits());
48882   SDLoc dl(Op);
48883   SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
48884   // The result of the shift is true or false, and on X86, the 32-bit
48885   // encoding of shr and lzcnt is more desirable.
48886   SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
48887   SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
48888                             DAG.getConstant(Log2b, dl, MVT::i8));
48889   return Scc;
48890 }
48891 
48892 // Try to transform:
48893 //   zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
48894 //   into:
48895 //   srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
48896 // Will also attempt to match more generic cases, eg:
48897 //   zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
48898 // Only applies if the target supports the FastLZCNT feature.
48899 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
48900                                            TargetLowering::DAGCombinerInfo &DCI,
48901                                            const X86Subtarget &Subtarget) {
48902   if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
48903     return SDValue();
48904 
48905   auto isORCandidate = [](SDValue N) {
48906     return (N->getOpcode() == ISD::OR && N->hasOneUse());
48907   };
48908 
48909   // Check the zero extend is extending to 32-bit or more. The code generated by
48910   // srl(ctlz) for 16-bit or less variants of the pattern would require extra
48911   // instructions to clear the upper bits.
48912   if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
48913       !isORCandidate(N->getOperand(0)))
48914     return SDValue();
48915 
48916   // Check the node matches: setcc(eq, cmp 0)
48917   auto isSetCCCandidate = [](SDValue N) {
48918     return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
48919            X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
48920            N->getOperand(1).getOpcode() == X86ISD::CMP &&
48921            isNullConstant(N->getOperand(1).getOperand(1)) &&
48922            N->getOperand(1).getValueType().bitsGE(MVT::i32);
48923   };
48924 
48925   SDNode *OR = N->getOperand(0).getNode();
48926   SDValue LHS = OR->getOperand(0);
48927   SDValue RHS = OR->getOperand(1);
48928 
48929   // Save nodes matching or(or, setcc(eq, cmp 0)).
48930   SmallVector<SDNode *, 2> ORNodes;
48931   while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
48932           (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
48933     ORNodes.push_back(OR);
48934     OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
48935     LHS = OR->getOperand(0);
48936     RHS = OR->getOperand(1);
48937   }
48938 
48939   // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
48940   if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
48941       !isORCandidate(SDValue(OR, 0)))
48942     return SDValue();
48943 
48944   // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
48945   // to
48946   // or(srl(ctlz),srl(ctlz)).
48947   // The dag combiner can then fold it into:
48948   // srl(or(ctlz, ctlz)).
48949   SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, DAG);
48950   SDValue Ret, NewRHS;
48951   if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG)))
48952     Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, NewLHS, NewRHS);
48953 
48954   if (!Ret)
48955     return SDValue();
48956 
48957   // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
48958   while (!ORNodes.empty()) {
48959     OR = ORNodes.pop_back_val();
48960     LHS = OR->getOperand(0);
48961     RHS = OR->getOperand(1);
48962     // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
48963     if (RHS->getOpcode() == ISD::OR)
48964       std::swap(LHS, RHS);
48965     NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG);
48966     if (!NewRHS)
48967       return SDValue();
48968     Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, Ret, NewRHS);
48969   }
48970 
48971   return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
48972 }
48973 
48974 static SDValue foldMaskedMergeImpl(SDValue And0_L, SDValue And0_R,
48975                                    SDValue And1_L, SDValue And1_R,
48976                                    const SDLoc &DL, SelectionDAG &DAG) {
48977   if (!isBitwiseNot(And0_L, true) || !And0_L->hasOneUse())
48978     return SDValue();
48979   SDValue NotOp = And0_L->getOperand(0);
48980   if (NotOp == And1_R)
48981     std::swap(And1_R, And1_L);
48982   if (NotOp != And1_L)
48983     return SDValue();
48984 
48985   // (~(NotOp) & And0_R) | (NotOp & And1_R)
48986   // --> ((And0_R ^ And1_R) & NotOp) ^ And1_R
48987   EVT VT = And1_L->getValueType(0);
48988   SDValue Freeze_And0_R = DAG.getNode(ISD::FREEZE, SDLoc(), VT, And0_R);
48989   SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, And1_R, Freeze_And0_R);
48990   SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
48991   SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, Freeze_And0_R);
48992   return Xor1;
48993 }
48994 
48995 /// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
48996 /// equivalent `((x ^ y) & m) ^ y)` pattern.
48997 /// This is typically a better representation for  targets without a fused
48998 /// "and-not" operation. This function is intended to be called from a
48999 /// `TargetLowering::PerformDAGCombine` callback on `ISD::OR` nodes.
49000 static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG) {
49001   // Note that masked-merge variants using XOR or ADD expressions are
49002   // normalized to OR by InstCombine so we only check for OR.
49003   assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
49004   SDValue N0 = Node->getOperand(0);
49005   if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
49006     return SDValue();
49007   SDValue N1 = Node->getOperand(1);
49008   if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
49009     return SDValue();
49010 
49011   SDLoc DL(Node);
49012   SDValue N00 = N0->getOperand(0);
49013   SDValue N01 = N0->getOperand(1);
49014   SDValue N10 = N1->getOperand(0);
49015   SDValue N11 = N1->getOperand(1);
49016   if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
49017     return Result;
49018   if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
49019     return Result;
49020   if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
49021     return Result;
49022   if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
49023     return Result;
49024   return SDValue();
49025 }
49026 
49027 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
49028 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
49029 /// with CMP+{ADC, SBB}.
49030 /// Also try (ADD/SUB)+(AND(SRL,1)) bit extraction pattern with BT+{ADC, SBB}.
49031 static SDValue combineAddOrSubToADCOrSBB(bool IsSub, const SDLoc &DL, EVT VT,
49032                                          SDValue X, SDValue Y,
49033                                          SelectionDAG &DAG,
49034                                          bool ZeroSecondOpOnly = false) {
49035   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
49036     return SDValue();
49037 
49038   // Look through a one-use zext.
49039   if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse())
49040     Y = Y.getOperand(0);
49041 
49042   X86::CondCode CC;
49043   SDValue EFLAGS;
49044   if (Y.getOpcode() == X86ISD::SETCC && Y.hasOneUse()) {
49045     CC = (X86::CondCode)Y.getConstantOperandVal(0);
49046     EFLAGS = Y.getOperand(1);
49047   } else if (Y.getOpcode() == ISD::AND && isOneConstant(Y.getOperand(1)) &&
49048              Y.hasOneUse()) {
49049     EFLAGS = LowerAndToBT(Y, ISD::SETNE, DL, DAG, CC);
49050   }
49051 
49052   if (!EFLAGS)
49053     return SDValue();
49054 
49055   // If X is -1 or 0, then we have an opportunity to avoid constants required in
49056   // the general case below.
49057   auto *ConstantX = dyn_cast<ConstantSDNode>(X);
49058   if (ConstantX && !ZeroSecondOpOnly) {
49059     if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnes()) ||
49060         (IsSub && CC == X86::COND_B && ConstantX->isZero())) {
49061       // This is a complicated way to get -1 or 0 from the carry flag:
49062       // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
49063       //  0 - SETB  -->  0 -  (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
49064       return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
49065                          DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
49066                          EFLAGS);
49067     }
49068 
49069     if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnes()) ||
49070         (IsSub && CC == X86::COND_A && ConstantX->isZero())) {
49071       if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
49072           EFLAGS.getValueType().isInteger() &&
49073           !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
49074         // Swap the operands of a SUB, and we have the same pattern as above.
49075         // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
49076         //  0 - SETA  (SUB A, B) -->  0 - SETB  (SUB B, A) --> SUB + SBB
49077         SDValue NewSub = DAG.getNode(
49078             X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
49079             EFLAGS.getOperand(1), EFLAGS.getOperand(0));
49080         SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
49081         return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
49082                            DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
49083                            NewEFLAGS);
49084       }
49085     }
49086   }
49087 
49088   if (CC == X86::COND_B) {
49089     // X + SETB Z --> adc X, 0
49090     // X - SETB Z --> sbb X, 0
49091     return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
49092                        DAG.getVTList(VT, MVT::i32), X,
49093                        DAG.getConstant(0, DL, VT), EFLAGS);
49094   }
49095 
49096   if (ZeroSecondOpOnly)
49097     return SDValue();
49098 
49099   if (CC == X86::COND_A) {
49100     // Try to convert COND_A into COND_B in an attempt to facilitate
49101     // materializing "setb reg".
49102     //
49103     // Do not flip "e > c", where "c" is a constant, because Cmp instruction
49104     // cannot take an immediate as its first operand.
49105     //
49106     if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
49107         EFLAGS.getValueType().isInteger() &&
49108         !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
49109       SDValue NewSub =
49110           DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
49111                       EFLAGS.getOperand(1), EFLAGS.getOperand(0));
49112       SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
49113       return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
49114                          DAG.getVTList(VT, MVT::i32), X,
49115                          DAG.getConstant(0, DL, VT), NewEFLAGS);
49116     }
49117   }
49118 
49119   if (CC == X86::COND_AE) {
49120     // X + SETAE --> sbb X, -1
49121     // X - SETAE --> adc X, -1
49122     return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
49123                        DAG.getVTList(VT, MVT::i32), X,
49124                        DAG.getConstant(-1, DL, VT), EFLAGS);
49125   }
49126 
49127   if (CC == X86::COND_BE) {
49128     // X + SETBE --> sbb X, -1
49129     // X - SETBE --> adc X, -1
49130     // Try to convert COND_BE into COND_AE in an attempt to facilitate
49131     // materializing "setae reg".
49132     //
49133     // Do not flip "e <= c", where "c" is a constant, because Cmp instruction
49134     // cannot take an immediate as its first operand.
49135     //
49136     if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
49137         EFLAGS.getValueType().isInteger() &&
49138         !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
49139       SDValue NewSub =
49140           DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
49141                       EFLAGS.getOperand(1), EFLAGS.getOperand(0));
49142       SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
49143       return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
49144                          DAG.getVTList(VT, MVT::i32), X,
49145                          DAG.getConstant(-1, DL, VT), NewEFLAGS);
49146     }
49147   }
49148 
49149   if (CC != X86::COND_E && CC != X86::COND_NE)
49150     return SDValue();
49151 
49152   if (EFLAGS.getOpcode() != X86ISD::CMP || !EFLAGS.hasOneUse() ||
49153       !X86::isZeroNode(EFLAGS.getOperand(1)) ||
49154       !EFLAGS.getOperand(0).getValueType().isInteger())
49155     return SDValue();
49156 
49157   SDValue Z = EFLAGS.getOperand(0);
49158   EVT ZVT = Z.getValueType();
49159 
49160   // If X is -1 or 0, then we have an opportunity to avoid constants required in
49161   // the general case below.
49162   if (ConstantX) {
49163     // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
49164     // fake operands:
49165     //  0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
49166     // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
49167     if ((IsSub && CC == X86::COND_NE && ConstantX->isZero()) ||
49168         (!IsSub && CC == X86::COND_E && ConstantX->isAllOnes())) {
49169       SDValue Zero = DAG.getConstant(0, DL, ZVT);
49170       SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
49171       SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
49172       return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
49173                          DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
49174                          SDValue(Neg.getNode(), 1));
49175     }
49176 
49177     // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
49178     // with fake operands:
49179     //  0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
49180     // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
49181     if ((IsSub && CC == X86::COND_E && ConstantX->isZero()) ||
49182         (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnes())) {
49183       SDValue One = DAG.getConstant(1, DL, ZVT);
49184       SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
49185       SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
49186       return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
49187                          DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
49188                          Cmp1.getValue(1));
49189     }
49190   }
49191 
49192   // (cmp Z, 1) sets the carry flag if Z is 0.
49193   SDValue One = DAG.getConstant(1, DL, ZVT);
49194   SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
49195   SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
49196 
49197   // Add the flags type for ADC/SBB nodes.
49198   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
49199 
49200   // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
49201   // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
49202   if (CC == X86::COND_NE)
49203     return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
49204                        DAG.getConstant(-1ULL, DL, VT), Cmp1.getValue(1));
49205 
49206   // X - (Z == 0) --> sub X, (zext(sete  Z, 0)) --> sbb X, 0, (cmp Z, 1)
49207   // X + (Z == 0) --> add X, (zext(sete  Z, 0)) --> adc X, 0, (cmp Z, 1)
49208   return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
49209                      DAG.getConstant(0, DL, VT), Cmp1.getValue(1));
49210 }
49211 
49212 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
49213 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
49214 /// with CMP+{ADC, SBB}.
49215 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
49216   bool IsSub = N->getOpcode() == ISD::SUB;
49217   SDValue X = N->getOperand(0);
49218   SDValue Y = N->getOperand(1);
49219   EVT VT = N->getValueType(0);
49220   SDLoc DL(N);
49221 
49222   if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, X, Y, DAG))
49223     return ADCOrSBB;
49224 
49225   // Commute and try again (negate the result for subtracts).
49226   if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, Y, X, DAG)) {
49227     if (IsSub)
49228       ADCOrSBB =
49229           DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), ADCOrSBB);
49230     return ADCOrSBB;
49231   }
49232 
49233   return SDValue();
49234 }
49235 
49236 static SDValue combineOrXorWithSETCC(SDNode *N, SDValue N0, SDValue N1,
49237                                      SelectionDAG &DAG) {
49238   assert((N->getOpcode() == ISD::XOR || N->getOpcode() == ISD::OR) &&
49239          "Unexpected opcode");
49240 
49241   // Delegate to combineAddOrSubToADCOrSBB if we have:
49242   //
49243   //   (xor/or (zero_extend (setcc)) imm)
49244   //
49245   // where imm is odd if and only if we have xor, in which case the XOR/OR are
49246   // equivalent to a SUB/ADD, respectively.
49247   if (N0.getOpcode() == ISD::ZERO_EXTEND &&
49248       N0.getOperand(0).getOpcode() == X86ISD::SETCC && N0.hasOneUse()) {
49249     if (auto *N1C = dyn_cast<ConstantSDNode>(N1)) {
49250       bool IsSub = N->getOpcode() == ISD::XOR;
49251       bool N1COdd = N1C->getZExtValue() & 1;
49252       if (IsSub ? N1COdd : !N1COdd) {
49253         SDLoc DL(N);
49254         EVT VT = N->getValueType(0);
49255         if (SDValue R = combineAddOrSubToADCOrSBB(IsSub, DL, VT, N1, N0, DAG))
49256           return R;
49257       }
49258     }
49259   }
49260 
49261   return SDValue();
49262 }
49263 
49264 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
49265                          TargetLowering::DAGCombinerInfo &DCI,
49266                          const X86Subtarget &Subtarget) {
49267   SDValue N0 = N->getOperand(0);
49268   SDValue N1 = N->getOperand(1);
49269   EVT VT = N->getValueType(0);
49270   SDLoc dl(N);
49271   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49272 
49273   // If this is SSE1 only convert to FOR to avoid scalarization.
49274   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
49275     return DAG.getBitcast(MVT::v4i32,
49276                           DAG.getNode(X86ISD::FOR, dl, MVT::v4f32,
49277                                       DAG.getBitcast(MVT::v4f32, N0),
49278                                       DAG.getBitcast(MVT::v4f32, N1)));
49279   }
49280 
49281   // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
49282   // TODO: Support multiple SrcOps.
49283   if (VT == MVT::i1) {
49284     SmallVector<SDValue, 2> SrcOps;
49285     SmallVector<APInt, 2> SrcPartials;
49286     if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps, &SrcPartials) &&
49287         SrcOps.size() == 1) {
49288       unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
49289       EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
49290       SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
49291       if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
49292         Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
49293       if (Mask) {
49294         assert(SrcPartials[0].getBitWidth() == NumElts &&
49295                "Unexpected partial reduction mask");
49296         SDValue ZeroBits = DAG.getConstant(0, dl, MaskVT);
49297         SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
49298         Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
49299         return DAG.getSetCC(dl, MVT::i1, Mask, ZeroBits, ISD::SETNE);
49300       }
49301     }
49302   }
49303 
49304   if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
49305     return R;
49306 
49307   if (SDValue R = combineBitOpWithShift(N, DAG))
49308     return R;
49309 
49310   if (SDValue R = combineBitOpWithPACK(N, DAG))
49311     return R;
49312 
49313   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
49314     return FPLogic;
49315 
49316   if (DCI.isBeforeLegalizeOps())
49317     return SDValue();
49318 
49319   if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
49320     return R;
49321 
49322   if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
49323     return R;
49324 
49325   if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
49326     return R;
49327 
49328   // (0 - SetCC) | C -> (zext (not SetCC)) * (C + 1) - 1 if we can get a LEA out of it.
49329   if ((VT == MVT::i32 || VT == MVT::i64) &&
49330       N0.getOpcode() == ISD::SUB && N0.hasOneUse() &&
49331       isNullConstant(N0.getOperand(0))) {
49332     SDValue Cond = N0.getOperand(1);
49333     if (Cond.getOpcode() == ISD::ZERO_EXTEND && Cond.hasOneUse())
49334       Cond = Cond.getOperand(0);
49335 
49336     if (Cond.getOpcode() == X86ISD::SETCC && Cond.hasOneUse()) {
49337       if (auto *CN = dyn_cast<ConstantSDNode>(N1)) {
49338         uint64_t Val = CN->getZExtValue();
49339         if (Val == 1 || Val == 2 || Val == 3 || Val == 4 || Val == 7 || Val == 8) {
49340           X86::CondCode CCode = (X86::CondCode)Cond.getConstantOperandVal(0);
49341           CCode = X86::GetOppositeBranchCondition(CCode);
49342           SDValue NotCond = getSETCC(CCode, Cond.getOperand(1), SDLoc(Cond), DAG);
49343 
49344           SDValue R = DAG.getZExtOrTrunc(NotCond, dl, VT);
49345           R = DAG.getNode(ISD::MUL, dl, VT, R, DAG.getConstant(Val + 1, dl, VT));
49346           R = DAG.getNode(ISD::SUB, dl, VT, R, DAG.getConstant(1, dl, VT));
49347           return R;
49348         }
49349       }
49350     }
49351   }
49352 
49353   // Combine OR(X,KSHIFTL(Y,Elts/2)) -> CONCAT_VECTORS(X,Y) == KUNPCK(X,Y).
49354   // Combine OR(KSHIFTL(X,Elts/2),Y) -> CONCAT_VECTORS(Y,X) == KUNPCK(Y,X).
49355   // iff the upper elements of the non-shifted arg are zero.
49356   // KUNPCK require 16+ bool vector elements.
49357   if (N0.getOpcode() == X86ISD::KSHIFTL || N1.getOpcode() == X86ISD::KSHIFTL) {
49358     unsigned NumElts = VT.getVectorNumElements();
49359     unsigned HalfElts = NumElts / 2;
49360     APInt UpperElts = APInt::getHighBitsSet(NumElts, HalfElts);
49361     if (NumElts >= 16 && N1.getOpcode() == X86ISD::KSHIFTL &&
49362         N1.getConstantOperandAPInt(1) == HalfElts &&
49363         DAG.MaskedVectorIsZero(N0, UpperElts)) {
49364       return DAG.getNode(
49365           ISD::CONCAT_VECTORS, dl, VT,
49366           extractSubVector(N0, 0, DAG, dl, HalfElts),
49367           extractSubVector(N1.getOperand(0), 0, DAG, dl, HalfElts));
49368     }
49369     if (NumElts >= 16 && N0.getOpcode() == X86ISD::KSHIFTL &&
49370         N0.getConstantOperandAPInt(1) == HalfElts &&
49371         DAG.MaskedVectorIsZero(N1, UpperElts)) {
49372       return DAG.getNode(
49373           ISD::CONCAT_VECTORS, dl, VT,
49374           extractSubVector(N1, 0, DAG, dl, HalfElts),
49375           extractSubVector(N0.getOperand(0), 0, DAG, dl, HalfElts));
49376     }
49377   }
49378 
49379   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
49380     // Attempt to recursively combine an OR of shuffles.
49381     SDValue Op(N, 0);
49382     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
49383       return Res;
49384 
49385     // If either operand is a constant mask, then only the elements that aren't
49386     // allones are actually demanded by the other operand.
49387     auto SimplifyUndemandedElts = [&](SDValue Op, SDValue OtherOp) {
49388       APInt UndefElts;
49389       SmallVector<APInt> EltBits;
49390       int NumElts = VT.getVectorNumElements();
49391       int EltSizeInBits = VT.getScalarSizeInBits();
49392       if (!getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts, EltBits))
49393         return false;
49394 
49395       APInt DemandedElts = APInt::getZero(NumElts);
49396       for (int I = 0; I != NumElts; ++I)
49397         if (!EltBits[I].isAllOnes())
49398           DemandedElts.setBit(I);
49399 
49400       return TLI.SimplifyDemandedVectorElts(OtherOp, DemandedElts, DCI);
49401     };
49402     if (SimplifyUndemandedElts(N0, N1) || SimplifyUndemandedElts(N1, N0)) {
49403       if (N->getOpcode() != ISD::DELETED_NODE)
49404         DCI.AddToWorklist(N);
49405       return SDValue(N, 0);
49406     }
49407   }
49408 
49409   // We should fold "masked merge" patterns when `andn` is not available.
49410   if (!Subtarget.hasBMI() && VT.isScalarInteger() && VT != MVT::i1)
49411     if (SDValue R = foldMaskedMerge(N, DAG))
49412       return R;
49413 
49414   if (SDValue R = combineOrXorWithSETCC(N, N0, N1, DAG))
49415     return R;
49416 
49417   return SDValue();
49418 }
49419 
49420 /// Try to turn tests against the signbit in the form of:
49421 ///   XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
49422 /// into:
49423 ///   SETGT(X, -1)
49424 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
49425   // This is only worth doing if the output type is i8 or i1.
49426   EVT ResultType = N->getValueType(0);
49427   if (ResultType != MVT::i8 && ResultType != MVT::i1)
49428     return SDValue();
49429 
49430   SDValue N0 = N->getOperand(0);
49431   SDValue N1 = N->getOperand(1);
49432 
49433   // We should be performing an xor against a truncated shift.
49434   if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
49435     return SDValue();
49436 
49437   // Make sure we are performing an xor against one.
49438   if (!isOneConstant(N1))
49439     return SDValue();
49440 
49441   // SetCC on x86 zero extends so only act on this if it's a logical shift.
49442   SDValue Shift = N0.getOperand(0);
49443   if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
49444     return SDValue();
49445 
49446   // Make sure we are truncating from one of i16, i32 or i64.
49447   EVT ShiftTy = Shift.getValueType();
49448   if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
49449     return SDValue();
49450 
49451   // Make sure the shift amount extracts the sign bit.
49452   if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
49453       Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
49454     return SDValue();
49455 
49456   // Create a greater-than comparison against -1.
49457   // N.B. Using SETGE against 0 works but we want a canonical looking
49458   // comparison, using SETGT matches up with what TranslateX86CC.
49459   SDLoc DL(N);
49460   SDValue ShiftOp = Shift.getOperand(0);
49461   EVT ShiftOpTy = ShiftOp.getValueType();
49462   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49463   EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
49464                                                *DAG.getContext(), ResultType);
49465   SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
49466                               DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
49467   if (SetCCResultType != ResultType)
49468     Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
49469   return Cond;
49470 }
49471 
49472 /// Turn vector tests of the signbit in the form of:
49473 ///   xor (sra X, elt_size(X)-1), -1
49474 /// into:
49475 ///   pcmpgt X, -1
49476 ///
49477 /// This should be called before type legalization because the pattern may not
49478 /// persist after that.
49479 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
49480                                          const X86Subtarget &Subtarget) {
49481   EVT VT = N->getValueType(0);
49482   if (!VT.isSimple())
49483     return SDValue();
49484 
49485   switch (VT.getSimpleVT().SimpleTy) {
49486   default: return SDValue();
49487   case MVT::v16i8:
49488   case MVT::v8i16:
49489   case MVT::v4i32:
49490   case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break;
49491   case MVT::v32i8:
49492   case MVT::v16i16:
49493   case MVT::v8i32:
49494   case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
49495   }
49496 
49497   // There must be a shift right algebraic before the xor, and the xor must be a
49498   // 'not' operation.
49499   SDValue Shift = N->getOperand(0);
49500   SDValue Ones = N->getOperand(1);
49501   if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
49502       !ISD::isBuildVectorAllOnes(Ones.getNode()))
49503     return SDValue();
49504 
49505   // The shift should be smearing the sign bit across each vector element.
49506   auto *ShiftAmt =
49507       isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
49508   if (!ShiftAmt ||
49509       ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
49510     return SDValue();
49511 
49512   // Create a greater-than comparison against -1. We don't use the more obvious
49513   // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
49514   return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
49515 }
49516 
49517 /// Detect patterns of truncation with unsigned saturation:
49518 ///
49519 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
49520 ///   Return the source value x to be truncated or SDValue() if the pattern was
49521 ///   not matched.
49522 ///
49523 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
49524 ///   where C1 >= 0 and C2 is unsigned max of destination type.
49525 ///
49526 ///    (truncate (smax (smin (x, C2), C1)) to dest_type)
49527 ///   where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
49528 ///
49529 ///   These two patterns are equivalent to:
49530 ///   (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
49531 ///   So return the smax(x, C1) value to be truncated or SDValue() if the
49532 ///   pattern was not matched.
49533 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
49534                                  const SDLoc &DL) {
49535   EVT InVT = In.getValueType();
49536 
49537   // Saturation with truncation. We truncate from InVT to VT.
49538   assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
49539          "Unexpected types for truncate operation");
49540 
49541   // Match min/max and return limit value as a parameter.
49542   auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
49543     if (V.getOpcode() == Opcode &&
49544         ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
49545       return V.getOperand(0);
49546     return SDValue();
49547   };
49548 
49549   APInt C1, C2;
49550   if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
49551     // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
49552     // the element size of the destination type.
49553     if (C2.isMask(VT.getScalarSizeInBits()))
49554       return UMin;
49555 
49556   if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
49557     if (MatchMinMax(SMin, ISD::SMAX, C1))
49558       if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
49559         return SMin;
49560 
49561   if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
49562     if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
49563       if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
49564           C2.uge(C1)) {
49565         return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
49566       }
49567 
49568   return SDValue();
49569 }
49570 
49571 /// Detect patterns of truncation with signed saturation:
49572 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
49573 ///                  signed_max_of_dest_type)) to dest_type)
49574 /// or:
49575 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
49576 ///                  signed_min_of_dest_type)) to dest_type).
49577 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
49578 /// Return the source value to be truncated or SDValue() if the pattern was not
49579 /// matched.
49580 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
49581   unsigned NumDstBits = VT.getScalarSizeInBits();
49582   unsigned NumSrcBits = In.getScalarValueSizeInBits();
49583   assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
49584 
49585   auto MatchMinMax = [](SDValue V, unsigned Opcode,
49586                         const APInt &Limit) -> SDValue {
49587     APInt C;
49588     if (V.getOpcode() == Opcode &&
49589         ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
49590       return V.getOperand(0);
49591     return SDValue();
49592   };
49593 
49594   APInt SignedMax, SignedMin;
49595   if (MatchPackUS) {
49596     SignedMax = APInt::getAllOnes(NumDstBits).zext(NumSrcBits);
49597     SignedMin = APInt(NumSrcBits, 0);
49598   } else {
49599     SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
49600     SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
49601   }
49602 
49603   if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
49604     if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
49605       return SMax;
49606 
49607   if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
49608     if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
49609       return SMin;
49610 
49611   return SDValue();
49612 }
49613 
49614 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
49615                                       SelectionDAG &DAG,
49616                                       const X86Subtarget &Subtarget) {
49617   if (!Subtarget.hasSSE2() || !VT.isVector())
49618     return SDValue();
49619 
49620   EVT SVT = VT.getVectorElementType();
49621   EVT InVT = In.getValueType();
49622   EVT InSVT = InVT.getVectorElementType();
49623 
49624   // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
49625   // split across two registers. We can use a packusdw+perm to clamp to 0-65535
49626   // and concatenate at the same time. Then we can use a final vpmovuswb to
49627   // clip to 0-255.
49628   if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
49629       InVT == MVT::v16i32 && VT == MVT::v16i8) {
49630     if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
49631       // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
49632       SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
49633                                            DL, DAG, Subtarget);
49634       assert(Mid && "Failed to pack!");
49635       return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
49636     }
49637   }
49638 
49639   // vXi32 truncate instructions are available with AVX512F.
49640   // vXi16 truncate instructions are only available with AVX512BW.
49641   // For 256-bit or smaller vectors, we require VLX.
49642   // FIXME: We could widen truncates to 512 to remove the VLX restriction.
49643   // If the result type is 256-bits or larger and we have disable 512-bit
49644   // registers, we should go ahead and use the pack instructions if possible.
49645   bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
49646                        (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
49647                       (InVT.getSizeInBits() > 128) &&
49648                       (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
49649                       !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
49650 
49651   if (!PreferAVX512 && VT.getVectorNumElements() > 1 &&
49652       isPowerOf2_32(VT.getVectorNumElements()) &&
49653       (SVT == MVT::i8 || SVT == MVT::i16) &&
49654       (InSVT == MVT::i16 || InSVT == MVT::i32)) {
49655     if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
49656       // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
49657       if (SVT == MVT::i8 && InSVT == MVT::i32) {
49658         EVT MidVT = VT.changeVectorElementType(MVT::i16);
49659         SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
49660                                              DAG, Subtarget);
49661         assert(Mid && "Failed to pack!");
49662         SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
49663                                            Subtarget);
49664         assert(V && "Failed to pack!");
49665         return V;
49666       } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
49667         return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
49668                                       Subtarget);
49669     }
49670     if (SDValue SSatVal = detectSSatPattern(In, VT))
49671       return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
49672                                     Subtarget);
49673   }
49674 
49675   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49676   if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
49677       Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI()) &&
49678       (SVT == MVT::i32 || SVT == MVT::i16 || SVT == MVT::i8)) {
49679     unsigned TruncOpc = 0;
49680     SDValue SatVal;
49681     if (SDValue SSatVal = detectSSatPattern(In, VT)) {
49682       SatVal = SSatVal;
49683       TruncOpc = X86ISD::VTRUNCS;
49684     } else if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL)) {
49685       SatVal = USatVal;
49686       TruncOpc = X86ISD::VTRUNCUS;
49687     }
49688     if (SatVal) {
49689       unsigned ResElts = VT.getVectorNumElements();
49690       // If the input type is less than 512 bits and we don't have VLX, we need
49691       // to widen to 512 bits.
49692       if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
49693         unsigned NumConcats = 512 / InVT.getSizeInBits();
49694         ResElts *= NumConcats;
49695         SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
49696         ConcatOps[0] = SatVal;
49697         InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
49698                                 NumConcats * InVT.getVectorNumElements());
49699         SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
49700       }
49701       // Widen the result if its narrower than 128 bits.
49702       if (ResElts * SVT.getSizeInBits() < 128)
49703         ResElts = 128 / SVT.getSizeInBits();
49704       EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
49705       SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
49706       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
49707                          DAG.getIntPtrConstant(0, DL));
49708     }
49709   }
49710 
49711   return SDValue();
49712 }
49713 
49714 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
49715 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
49716 /// ISD::AVGCEILU (AVG) instruction.
49717 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
49718                                 const X86Subtarget &Subtarget,
49719                                 const SDLoc &DL) {
49720   if (!VT.isVector())
49721     return SDValue();
49722   EVT InVT = In.getValueType();
49723   unsigned NumElems = VT.getVectorNumElements();
49724 
49725   EVT ScalarVT = VT.getVectorElementType();
49726   if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) && NumElems >= 2))
49727     return SDValue();
49728 
49729   // InScalarVT is the intermediate type in AVG pattern and it should be greater
49730   // than the original input type (i8/i16).
49731   EVT InScalarVT = InVT.getVectorElementType();
49732   if (InScalarVT.getFixedSizeInBits() <= ScalarVT.getFixedSizeInBits())
49733     return SDValue();
49734 
49735   if (!Subtarget.hasSSE2())
49736     return SDValue();
49737 
49738   // Detect the following pattern:
49739   //
49740   //   %1 = zext <N x i8> %a to <N x i32>
49741   //   %2 = zext <N x i8> %b to <N x i32>
49742   //   %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
49743   //   %4 = add nuw nsw <N x i32> %3, %2
49744   //   %5 = lshr <N x i32> %N, <i32 1 x N>
49745   //   %6 = trunc <N x i32> %5 to <N x i8>
49746   //
49747   // In AVX512, the last instruction can also be a trunc store.
49748   if (In.getOpcode() != ISD::SRL)
49749     return SDValue();
49750 
49751   // A lambda checking the given SDValue is a constant vector and each element
49752   // is in the range [Min, Max].
49753   auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
49754     return ISD::matchUnaryPredicate(V, [Min, Max](ConstantSDNode *C) {
49755       return !(C->getAPIntValue().ult(Min) || C->getAPIntValue().ugt(Max));
49756     });
49757   };
49758 
49759   auto IsZExtLike = [DAG = &DAG, ScalarVT](SDValue V) {
49760     unsigned MaxActiveBits = DAG->computeKnownBits(V).countMaxActiveBits();
49761     return MaxActiveBits <= ScalarVT.getSizeInBits();
49762   };
49763 
49764   // Check if each element of the vector is right-shifted by one.
49765   SDValue LHS = In.getOperand(0);
49766   SDValue RHS = In.getOperand(1);
49767   if (!IsConstVectorInRange(RHS, 1, 1))
49768     return SDValue();
49769   if (LHS.getOpcode() != ISD::ADD)
49770     return SDValue();
49771 
49772   // Detect a pattern of a + b + 1 where the order doesn't matter.
49773   SDValue Operands[3];
49774   Operands[0] = LHS.getOperand(0);
49775   Operands[1] = LHS.getOperand(1);
49776 
49777   auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
49778                        ArrayRef<SDValue> Ops) {
49779     return DAG.getNode(ISD::AVGCEILU, DL, Ops[0].getValueType(), Ops);
49780   };
49781 
49782   auto AVGSplitter = [&](std::array<SDValue, 2> Ops) {
49783     for (SDValue &Op : Ops)
49784       if (Op.getValueType() != VT)
49785         Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
49786     // Pad to a power-of-2 vector, split+apply and extract the original vector.
49787     unsigned NumElemsPow2 = PowerOf2Ceil(NumElems);
49788     EVT Pow2VT = EVT::getVectorVT(*DAG.getContext(), ScalarVT, NumElemsPow2);
49789     if (NumElemsPow2 != NumElems) {
49790       for (SDValue &Op : Ops) {
49791         SmallVector<SDValue, 32> EltsOfOp(NumElemsPow2, DAG.getUNDEF(ScalarVT));
49792         for (unsigned i = 0; i != NumElems; ++i) {
49793           SDValue Idx = DAG.getIntPtrConstant(i, DL);
49794           EltsOfOp[i] =
49795               DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT, Op, Idx);
49796         }
49797         Op = DAG.getBuildVector(Pow2VT, DL, EltsOfOp);
49798       }
49799     }
49800     SDValue Res = SplitOpsAndApply(DAG, Subtarget, DL, Pow2VT, Ops, AVGBuilder);
49801     if (NumElemsPow2 == NumElems)
49802       return Res;
49803     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
49804                        DAG.getIntPtrConstant(0, DL));
49805   };
49806 
49807   // Take care of the case when one of the operands is a constant vector whose
49808   // element is in the range [1, 256].
49809   if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
49810       IsZExtLike(Operands[0])) {
49811     // The pattern is detected. Subtract one from the constant vector, then
49812     // demote it and emit X86ISD::AVG instruction.
49813     SDValue VecOnes = DAG.getConstant(1, DL, InVT);
49814     Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
49815     return AVGSplitter({Operands[0], Operands[1]});
49816   }
49817 
49818   // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
49819   // Match the or case only if its 'add-like' - can be replaced by an add.
49820   auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
49821     if (ISD::ADD == V.getOpcode()) {
49822       Op0 = V.getOperand(0);
49823       Op1 = V.getOperand(1);
49824       return true;
49825     }
49826     if (ISD::ZERO_EXTEND != V.getOpcode())
49827       return false;
49828     V = V.getOperand(0);
49829     if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
49830         !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
49831       return false;
49832     Op0 = V.getOperand(0);
49833     Op1 = V.getOperand(1);
49834     return true;
49835   };
49836 
49837   SDValue Op0, Op1;
49838   if (FindAddLike(Operands[0], Op0, Op1))
49839     std::swap(Operands[0], Operands[1]);
49840   else if (!FindAddLike(Operands[1], Op0, Op1))
49841     return SDValue();
49842   Operands[2] = Op0;
49843   Operands[1] = Op1;
49844 
49845   // Now we have three operands of two additions. Check that one of them is a
49846   // constant vector with ones, and the other two can be promoted from i8/i16.
49847   for (SDValue &Op : Operands) {
49848     if (!IsConstVectorInRange(Op, 1, 1))
49849       continue;
49850     std::swap(Op, Operands[2]);
49851 
49852     // Check if Operands[0] and Operands[1] are results of type promotion.
49853     for (int j = 0; j < 2; ++j)
49854       if (Operands[j].getValueType() != VT)
49855         if (!IsZExtLike(Operands[j]))
49856           return SDValue();
49857 
49858     // The pattern is detected, emit X86ISD::AVG instruction(s).
49859     return AVGSplitter({Operands[0], Operands[1]});
49860   }
49861 
49862   return SDValue();
49863 }
49864 
49865 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
49866                            TargetLowering::DAGCombinerInfo &DCI,
49867                            const X86Subtarget &Subtarget) {
49868   LoadSDNode *Ld = cast<LoadSDNode>(N);
49869   EVT RegVT = Ld->getValueType(0);
49870   EVT MemVT = Ld->getMemoryVT();
49871   SDLoc dl(Ld);
49872   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49873 
49874   // For chips with slow 32-byte unaligned loads, break the 32-byte operation
49875   // into two 16-byte operations. Also split non-temporal aligned loads on
49876   // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
49877   ISD::LoadExtType Ext = Ld->getExtensionType();
49878   unsigned Fast;
49879   if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
49880       Ext == ISD::NON_EXTLOAD &&
49881       ((Ld->isNonTemporal() && !Subtarget.hasInt256() &&
49882         Ld->getAlign() >= Align(16)) ||
49883        (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
49884                                *Ld->getMemOperand(), &Fast) &&
49885         !Fast))) {
49886     unsigned NumElems = RegVT.getVectorNumElements();
49887     if (NumElems < 2)
49888       return SDValue();
49889 
49890     unsigned HalfOffset = 16;
49891     SDValue Ptr1 = Ld->getBasePtr();
49892     SDValue Ptr2 =
49893         DAG.getMemBasePlusOffset(Ptr1, TypeSize::getFixed(HalfOffset), dl);
49894     EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
49895                                   NumElems / 2);
49896     SDValue Load1 =
49897         DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
49898                     Ld->getOriginalAlign(),
49899                     Ld->getMemOperand()->getFlags());
49900     SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
49901                                 Ld->getPointerInfo().getWithOffset(HalfOffset),
49902                                 Ld->getOriginalAlign(),
49903                                 Ld->getMemOperand()->getFlags());
49904     SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
49905                              Load1.getValue(1), Load2.getValue(1));
49906 
49907     SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
49908     return DCI.CombineTo(N, NewVec, TF, true);
49909   }
49910 
49911   // Bool vector load - attempt to cast to an integer, as we have good
49912   // (vXiY *ext(vXi1 bitcast(iX))) handling.
49913   if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
49914       RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
49915     unsigned NumElts = RegVT.getVectorNumElements();
49916     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
49917     if (TLI.isTypeLegal(IntVT)) {
49918       SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
49919                                     Ld->getPointerInfo(),
49920                                     Ld->getOriginalAlign(),
49921                                     Ld->getMemOperand()->getFlags());
49922       SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
49923       return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
49924     }
49925   }
49926 
49927   // If we also load/broadcast this to a wider type, then just extract the
49928   // lowest subvector.
49929   if (Ext == ISD::NON_EXTLOAD && Subtarget.hasAVX() && Ld->isSimple() &&
49930       (RegVT.is128BitVector() || RegVT.is256BitVector())) {
49931     SDValue Ptr = Ld->getBasePtr();
49932     SDValue Chain = Ld->getChain();
49933     for (SDNode *User : Chain->uses()) {
49934       if (User != N &&
49935           (User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD ||
49936            User->getOpcode() == X86ISD::VBROADCAST_LOAD ||
49937            ISD::isNormalLoad(User)) &&
49938           cast<MemSDNode>(User)->getChain() == Chain &&
49939           !User->hasAnyUseOfValue(1) &&
49940           User->getValueSizeInBits(0).getFixedValue() >
49941               RegVT.getFixedSizeInBits()) {
49942         if (User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
49943             cast<MemSDNode>(User)->getBasePtr() == Ptr &&
49944             cast<MemSDNode>(User)->getMemoryVT().getSizeInBits() ==
49945                 MemVT.getSizeInBits()) {
49946           SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
49947                                              RegVT.getSizeInBits());
49948           Extract = DAG.getBitcast(RegVT, Extract);
49949           return DCI.CombineTo(N, Extract, SDValue(User, 1));
49950         }
49951         auto MatchingBits = [](const APInt &Undefs, const APInt &UserUndefs,
49952                                ArrayRef<APInt> Bits, ArrayRef<APInt> UserBits) {
49953           for (unsigned I = 0, E = Undefs.getBitWidth(); I != E; ++I) {
49954             if (Undefs[I])
49955               continue;
49956             if (UserUndefs[I] || Bits[I] != UserBits[I])
49957               return false;
49958           }
49959           return true;
49960         };
49961         // See if we are loading a constant that matches in the lower
49962         // bits of a longer constant (but from a different constant pool ptr).
49963         EVT UserVT = User->getValueType(0);
49964         SDValue UserPtr = cast<MemSDNode>(User)->getBasePtr();
49965         const Constant *LdC = getTargetConstantFromBasePtr(Ptr);
49966         const Constant *UserC = getTargetConstantFromBasePtr(UserPtr);
49967         if (LdC && UserC && UserPtr != Ptr) {
49968           unsigned LdSize = LdC->getType()->getPrimitiveSizeInBits();
49969           unsigned UserSize = UserC->getType()->getPrimitiveSizeInBits();
49970           if (LdSize < UserSize || !ISD::isNormalLoad(User)) {
49971             APInt Undefs, UserUndefs;
49972             SmallVector<APInt> Bits, UserBits;
49973             unsigned NumBits = std::min(RegVT.getScalarSizeInBits(),
49974                                         UserVT.getScalarSizeInBits());
49975             if (getTargetConstantBitsFromNode(SDValue(N, 0), NumBits, Undefs,
49976                                               Bits) &&
49977                 getTargetConstantBitsFromNode(SDValue(User, 0), NumBits,
49978                                               UserUndefs, UserBits)) {
49979               if (MatchingBits(Undefs, UserUndefs, Bits, UserBits)) {
49980                 SDValue Extract = extractSubVector(
49981                     SDValue(User, 0), 0, DAG, SDLoc(N), RegVT.getSizeInBits());
49982                 Extract = DAG.getBitcast(RegVT, Extract);
49983                 return DCI.CombineTo(N, Extract, SDValue(User, 1));
49984               }
49985             }
49986           }
49987         }
49988       }
49989     }
49990   }
49991 
49992   // Cast ptr32 and ptr64 pointers to the default address space before a load.
49993   unsigned AddrSpace = Ld->getAddressSpace();
49994   if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
49995       AddrSpace == X86AS::PTR32_UPTR) {
49996     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
49997     if (PtrVT != Ld->getBasePtr().getSimpleValueType()) {
49998       SDValue Cast =
49999           DAG.getAddrSpaceCast(dl, PtrVT, Ld->getBasePtr(), AddrSpace, 0);
50000       return DAG.getExtLoad(Ext, dl, RegVT, Ld->getChain(), Cast,
50001                             Ld->getPointerInfo(), MemVT, Ld->getOriginalAlign(),
50002                             Ld->getMemOperand()->getFlags());
50003     }
50004   }
50005 
50006   return SDValue();
50007 }
50008 
50009 /// If V is a build vector of boolean constants and exactly one of those
50010 /// constants is true, return the operand index of that true element.
50011 /// Otherwise, return -1.
50012 static int getOneTrueElt(SDValue V) {
50013   // This needs to be a build vector of booleans.
50014   // TODO: Checking for the i1 type matches the IR definition for the mask,
50015   // but the mask check could be loosened to i8 or other types. That might
50016   // also require checking more than 'allOnesValue'; eg, the x86 HW
50017   // instructions only require that the MSB is set for each mask element.
50018   // The ISD::MSTORE comments/definition do not specify how the mask operand
50019   // is formatted.
50020   auto *BV = dyn_cast<BuildVectorSDNode>(V);
50021   if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
50022     return -1;
50023 
50024   int TrueIndex = -1;
50025   unsigned NumElts = BV->getValueType(0).getVectorNumElements();
50026   for (unsigned i = 0; i < NumElts; ++i) {
50027     const SDValue &Op = BV->getOperand(i);
50028     if (Op.isUndef())
50029       continue;
50030     auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
50031     if (!ConstNode)
50032       return -1;
50033     if (ConstNode->getAPIntValue().countr_one() >= 1) {
50034       // If we already found a one, this is too many.
50035       if (TrueIndex >= 0)
50036         return -1;
50037       TrueIndex = i;
50038     }
50039   }
50040   return TrueIndex;
50041 }
50042 
50043 /// Given a masked memory load/store operation, return true if it has one mask
50044 /// bit set. If it has one mask bit set, then also return the memory address of
50045 /// the scalar element to load/store, the vector index to insert/extract that
50046 /// scalar element, and the alignment for the scalar memory access.
50047 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
50048                                          SelectionDAG &DAG, SDValue &Addr,
50049                                          SDValue &Index, Align &Alignment,
50050                                          unsigned &Offset) {
50051   int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
50052   if (TrueMaskElt < 0)
50053     return false;
50054 
50055   // Get the address of the one scalar element that is specified by the mask
50056   // using the appropriate offset from the base pointer.
50057   EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
50058   Offset = 0;
50059   Addr = MaskedOp->getBasePtr();
50060   if (TrueMaskElt != 0) {
50061     Offset = TrueMaskElt * EltVT.getStoreSize();
50062     Addr = DAG.getMemBasePlusOffset(Addr, TypeSize::getFixed(Offset),
50063                                     SDLoc(MaskedOp));
50064   }
50065 
50066   Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
50067   Alignment = commonAlignment(MaskedOp->getOriginalAlign(),
50068                               EltVT.getStoreSize());
50069   return true;
50070 }
50071 
50072 /// If exactly one element of the mask is set for a non-extending masked load,
50073 /// it is a scalar load and vector insert.
50074 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
50075 /// mask have already been optimized in IR, so we don't bother with those here.
50076 static SDValue
50077 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
50078                              TargetLowering::DAGCombinerInfo &DCI,
50079                              const X86Subtarget &Subtarget) {
50080   assert(ML->isUnindexed() && "Unexpected indexed masked load!");
50081   // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
50082   // However, some target hooks may need to be added to know when the transform
50083   // is profitable. Endianness would also have to be considered.
50084 
50085   SDValue Addr, VecIndex;
50086   Align Alignment;
50087   unsigned Offset;
50088   if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment, Offset))
50089     return SDValue();
50090 
50091   // Load the one scalar element that is specified by the mask using the
50092   // appropriate offset from the base pointer.
50093   SDLoc DL(ML);
50094   EVT VT = ML->getValueType(0);
50095   EVT EltVT = VT.getVectorElementType();
50096 
50097   EVT CastVT = VT;
50098   if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
50099     EltVT = MVT::f64;
50100     CastVT = VT.changeVectorElementType(EltVT);
50101   }
50102 
50103   SDValue Load =
50104       DAG.getLoad(EltVT, DL, ML->getChain(), Addr,
50105                   ML->getPointerInfo().getWithOffset(Offset),
50106                   Alignment, ML->getMemOperand()->getFlags());
50107 
50108   SDValue PassThru = DAG.getBitcast(CastVT, ML->getPassThru());
50109 
50110   // Insert the loaded element into the appropriate place in the vector.
50111   SDValue Insert =
50112       DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, CastVT, PassThru, Load, VecIndex);
50113   Insert = DAG.getBitcast(VT, Insert);
50114   return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
50115 }
50116 
50117 static SDValue
50118 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
50119                               TargetLowering::DAGCombinerInfo &DCI) {
50120   assert(ML->isUnindexed() && "Unexpected indexed masked load!");
50121   if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
50122     return SDValue();
50123 
50124   SDLoc DL(ML);
50125   EVT VT = ML->getValueType(0);
50126 
50127   // If we are loading the first and last elements of a vector, it is safe and
50128   // always faster to load the whole vector. Replace the masked load with a
50129   // vector load and select.
50130   unsigned NumElts = VT.getVectorNumElements();
50131   BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
50132   bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
50133   bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
50134   if (LoadFirstElt && LoadLastElt) {
50135     SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
50136                                 ML->getMemOperand());
50137     SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
50138                                   ML->getPassThru());
50139     return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
50140   }
50141 
50142   // Convert a masked load with a constant mask into a masked load and a select.
50143   // This allows the select operation to use a faster kind of select instruction
50144   // (for example, vblendvps -> vblendps).
50145 
50146   // Don't try this if the pass-through operand is already undefined. That would
50147   // cause an infinite loop because that's what we're about to create.
50148   if (ML->getPassThru().isUndef())
50149     return SDValue();
50150 
50151   if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
50152     return SDValue();
50153 
50154   // The new masked load has an undef pass-through operand. The select uses the
50155   // original pass-through operand.
50156   SDValue NewML = DAG.getMaskedLoad(
50157       VT, DL, ML->getChain(), ML->getBasePtr(), ML->getOffset(), ML->getMask(),
50158       DAG.getUNDEF(VT), ML->getMemoryVT(), ML->getMemOperand(),
50159       ML->getAddressingMode(), ML->getExtensionType());
50160   SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
50161                                 ML->getPassThru());
50162 
50163   return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
50164 }
50165 
50166 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
50167                                  TargetLowering::DAGCombinerInfo &DCI,
50168                                  const X86Subtarget &Subtarget) {
50169   auto *Mld = cast<MaskedLoadSDNode>(N);
50170 
50171   // TODO: Expanding load with constant mask may be optimized as well.
50172   if (Mld->isExpandingLoad())
50173     return SDValue();
50174 
50175   if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
50176     if (SDValue ScalarLoad =
50177             reduceMaskedLoadToScalarLoad(Mld, DAG, DCI, Subtarget))
50178       return ScalarLoad;
50179 
50180     // TODO: Do some AVX512 subsets benefit from this transform?
50181     if (!Subtarget.hasAVX512())
50182       if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
50183         return Blend;
50184   }
50185 
50186   // If the mask value has been legalized to a non-boolean vector, try to
50187   // simplify ops leading up to it. We only demand the MSB of each lane.
50188   SDValue Mask = Mld->getMask();
50189   if (Mask.getScalarValueSizeInBits() != 1) {
50190     EVT VT = Mld->getValueType(0);
50191     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50192     APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
50193     if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
50194       if (N->getOpcode() != ISD::DELETED_NODE)
50195         DCI.AddToWorklist(N);
50196       return SDValue(N, 0);
50197     }
50198     if (SDValue NewMask =
50199             TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
50200       return DAG.getMaskedLoad(
50201           VT, SDLoc(N), Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(),
50202           NewMask, Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(),
50203           Mld->getAddressingMode(), Mld->getExtensionType());
50204   }
50205 
50206   return SDValue();
50207 }
50208 
50209 /// If exactly one element of the mask is set for a non-truncating masked store,
50210 /// it is a vector extract and scalar store.
50211 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
50212 /// mask have already been optimized in IR, so we don't bother with those here.
50213 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
50214                                               SelectionDAG &DAG,
50215                                               const X86Subtarget &Subtarget) {
50216   // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
50217   // However, some target hooks may need to be added to know when the transform
50218   // is profitable. Endianness would also have to be considered.
50219 
50220   SDValue Addr, VecIndex;
50221   Align Alignment;
50222   unsigned Offset;
50223   if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment, Offset))
50224     return SDValue();
50225 
50226   // Extract the one scalar element that is actually being stored.
50227   SDLoc DL(MS);
50228   SDValue Value = MS->getValue();
50229   EVT VT = Value.getValueType();
50230   EVT EltVT = VT.getVectorElementType();
50231   if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
50232     EltVT = MVT::f64;
50233     EVT CastVT = VT.changeVectorElementType(EltVT);
50234     Value = DAG.getBitcast(CastVT, Value);
50235   }
50236   SDValue Extract =
50237       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Value, VecIndex);
50238 
50239   // Store that element at the appropriate offset from the base pointer.
50240   return DAG.getStore(MS->getChain(), DL, Extract, Addr,
50241                       MS->getPointerInfo().getWithOffset(Offset),
50242                       Alignment, MS->getMemOperand()->getFlags());
50243 }
50244 
50245 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
50246                                   TargetLowering::DAGCombinerInfo &DCI,
50247                                   const X86Subtarget &Subtarget) {
50248   MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
50249   if (Mst->isCompressingStore())
50250     return SDValue();
50251 
50252   EVT VT = Mst->getValue().getValueType();
50253   SDLoc dl(Mst);
50254   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50255 
50256   if (Mst->isTruncatingStore())
50257     return SDValue();
50258 
50259   if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG, Subtarget))
50260     return ScalarStore;
50261 
50262   // If the mask value has been legalized to a non-boolean vector, try to
50263   // simplify ops leading up to it. We only demand the MSB of each lane.
50264   SDValue Mask = Mst->getMask();
50265   if (Mask.getScalarValueSizeInBits() != 1) {
50266     APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
50267     if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
50268       if (N->getOpcode() != ISD::DELETED_NODE)
50269         DCI.AddToWorklist(N);
50270       return SDValue(N, 0);
50271     }
50272     if (SDValue NewMask =
50273             TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
50274       return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Mst->getValue(),
50275                                 Mst->getBasePtr(), Mst->getOffset(), NewMask,
50276                                 Mst->getMemoryVT(), Mst->getMemOperand(),
50277                                 Mst->getAddressingMode());
50278   }
50279 
50280   SDValue Value = Mst->getValue();
50281   if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
50282       TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
50283                             Mst->getMemoryVT())) {
50284     return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
50285                               Mst->getBasePtr(), Mst->getOffset(), Mask,
50286                               Mst->getMemoryVT(), Mst->getMemOperand(),
50287                               Mst->getAddressingMode(), true);
50288   }
50289 
50290   return SDValue();
50291 }
50292 
50293 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
50294                             TargetLowering::DAGCombinerInfo &DCI,
50295                             const X86Subtarget &Subtarget) {
50296   StoreSDNode *St = cast<StoreSDNode>(N);
50297   EVT StVT = St->getMemoryVT();
50298   SDLoc dl(St);
50299   SDValue StoredVal = St->getValue();
50300   EVT VT = StoredVal.getValueType();
50301   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50302 
50303   // Convert a store of vXi1 into a store of iX and a bitcast.
50304   if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
50305       VT.getVectorElementType() == MVT::i1) {
50306 
50307     EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
50308     StoredVal = DAG.getBitcast(NewVT, StoredVal);
50309 
50310     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50311                         St->getPointerInfo(), St->getOriginalAlign(),
50312                         St->getMemOperand()->getFlags());
50313   }
50314 
50315   // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
50316   // This will avoid a copy to k-register.
50317   if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
50318       StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
50319       StoredVal.getOperand(0).getValueType() == MVT::i8) {
50320     SDValue Val = StoredVal.getOperand(0);
50321     // We must store zeros to the unused bits.
50322     Val = DAG.getZeroExtendInReg(Val, dl, MVT::i1);
50323     return DAG.getStore(St->getChain(), dl, Val,
50324                         St->getBasePtr(), St->getPointerInfo(),
50325                         St->getOriginalAlign(),
50326                         St->getMemOperand()->getFlags());
50327   }
50328 
50329   // Widen v2i1/v4i1 stores to v8i1.
50330   if ((VT == MVT::v1i1 || VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
50331       Subtarget.hasAVX512()) {
50332     unsigned NumConcats = 8 / VT.getVectorNumElements();
50333     // We must store zeros to the unused bits.
50334     SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, VT));
50335     Ops[0] = StoredVal;
50336     StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
50337     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50338                         St->getPointerInfo(), St->getOriginalAlign(),
50339                         St->getMemOperand()->getFlags());
50340   }
50341 
50342   // Turn vXi1 stores of constants into a scalar store.
50343   if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
50344        VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
50345       ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
50346     // If its a v64i1 store without 64-bit support, we need two stores.
50347     if (!DCI.isBeforeLegalize() && VT == MVT::v64i1 && !Subtarget.is64Bit()) {
50348       SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
50349                                       StoredVal->ops().slice(0, 32));
50350       Lo = combinevXi1ConstantToInteger(Lo, DAG);
50351       SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
50352                                       StoredVal->ops().slice(32, 32));
50353       Hi = combinevXi1ConstantToInteger(Hi, DAG);
50354 
50355       SDValue Ptr0 = St->getBasePtr();
50356       SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, TypeSize::getFixed(4), dl);
50357 
50358       SDValue Ch0 =
50359           DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
50360                        St->getOriginalAlign(),
50361                        St->getMemOperand()->getFlags());
50362       SDValue Ch1 =
50363           DAG.getStore(St->getChain(), dl, Hi, Ptr1,
50364                        St->getPointerInfo().getWithOffset(4),
50365                        St->getOriginalAlign(),
50366                        St->getMemOperand()->getFlags());
50367       return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
50368     }
50369 
50370     StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
50371     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50372                         St->getPointerInfo(), St->getOriginalAlign(),
50373                         St->getMemOperand()->getFlags());
50374   }
50375 
50376   // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
50377   // Sandy Bridge, perform two 16-byte stores.
50378   unsigned Fast;
50379   if (VT.is256BitVector() && StVT == VT &&
50380       TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
50381                              *St->getMemOperand(), &Fast) &&
50382       !Fast) {
50383     unsigned NumElems = VT.getVectorNumElements();
50384     if (NumElems < 2)
50385       return SDValue();
50386 
50387     return splitVectorStore(St, DAG);
50388   }
50389 
50390   // Split under-aligned vector non-temporal stores.
50391   if (St->isNonTemporal() && StVT == VT &&
50392       St->getAlign().value() < VT.getStoreSize()) {
50393     // ZMM/YMM nt-stores - either it can be stored as a series of shorter
50394     // vectors or the legalizer can scalarize it to use MOVNTI.
50395     if (VT.is256BitVector() || VT.is512BitVector()) {
50396       unsigned NumElems = VT.getVectorNumElements();
50397       if (NumElems < 2)
50398         return SDValue();
50399       return splitVectorStore(St, DAG);
50400     }
50401 
50402     // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
50403     // to use MOVNTI.
50404     if (VT.is128BitVector() && Subtarget.hasSSE2()) {
50405       MVT NTVT = Subtarget.hasSSE4A()
50406                      ? MVT::v2f64
50407                      : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
50408       return scalarizeVectorStore(St, NTVT, DAG);
50409     }
50410   }
50411 
50412   // Try to optimize v16i16->v16i8 truncating stores when BWI is not
50413   // supported, but avx512f is by extending to v16i32 and truncating.
50414   if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
50415       St->getValue().getOpcode() == ISD::TRUNCATE &&
50416       St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
50417       TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
50418       St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
50419     SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32,
50420                               St->getValue().getOperand(0));
50421     return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
50422                              MVT::v16i8, St->getMemOperand());
50423   }
50424 
50425   // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
50426   if (!St->isTruncatingStore() &&
50427       (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
50428        StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
50429       StoredVal.hasOneUse() &&
50430       TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
50431     bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
50432     return EmitTruncSStore(IsSigned, St->getChain(),
50433                            dl, StoredVal.getOperand(0), St->getBasePtr(),
50434                            VT, St->getMemOperand(), DAG);
50435   }
50436 
50437   // Try to fold a extract_element(VTRUNC) pattern into a truncating store.
50438   if (!St->isTruncatingStore()) {
50439     auto IsExtractedElement = [](SDValue V) {
50440       if (V.getOpcode() == ISD::TRUNCATE && V.hasOneUse())
50441         V = V.getOperand(0);
50442       unsigned Opc = V.getOpcode();
50443       if ((Opc == ISD::EXTRACT_VECTOR_ELT || Opc == X86ISD::PEXTRW) &&
50444           isNullConstant(V.getOperand(1)) && V.hasOneUse() &&
50445           V.getOperand(0).hasOneUse())
50446         return V.getOperand(0);
50447       return SDValue();
50448     };
50449     if (SDValue Extract = IsExtractedElement(StoredVal)) {
50450       SDValue Trunc = peekThroughOneUseBitcasts(Extract);
50451       if (Trunc.getOpcode() == X86ISD::VTRUNC) {
50452         SDValue Src = Trunc.getOperand(0);
50453         MVT DstVT = Trunc.getSimpleValueType();
50454         MVT SrcVT = Src.getSimpleValueType();
50455         unsigned NumSrcElts = SrcVT.getVectorNumElements();
50456         unsigned NumTruncBits = DstVT.getScalarSizeInBits() * NumSrcElts;
50457         MVT TruncVT = MVT::getVectorVT(DstVT.getScalarType(), NumSrcElts);
50458         if (NumTruncBits == VT.getSizeInBits() &&
50459             TLI.isTruncStoreLegal(SrcVT, TruncVT)) {
50460           return DAG.getTruncStore(St->getChain(), dl, Src, St->getBasePtr(),
50461                                    TruncVT, St->getMemOperand());
50462         }
50463       }
50464     }
50465   }
50466 
50467   // Optimize trunc store (of multiple scalars) to shuffle and store.
50468   // First, pack all of the elements in one place. Next, store to memory
50469   // in fewer chunks.
50470   if (St->isTruncatingStore() && VT.isVector()) {
50471     // Check if we can detect an AVG pattern from the truncation. If yes,
50472     // replace the trunc store by a normal store with the result of X86ISD::AVG
50473     // instruction.
50474     if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
50475       if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
50476                                          Subtarget, dl))
50477         return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
50478                             St->getPointerInfo(), St->getOriginalAlign(),
50479                             St->getMemOperand()->getFlags());
50480 
50481     if (TLI.isTruncStoreLegal(VT, StVT)) {
50482       if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
50483         return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
50484                                dl, Val, St->getBasePtr(),
50485                                St->getMemoryVT(), St->getMemOperand(), DAG);
50486       if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
50487                                           DAG, dl))
50488         return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
50489                                dl, Val, St->getBasePtr(),
50490                                St->getMemoryVT(), St->getMemOperand(), DAG);
50491     }
50492 
50493     return SDValue();
50494   }
50495 
50496   // Cast ptr32 and ptr64 pointers to the default address space before a store.
50497   unsigned AddrSpace = St->getAddressSpace();
50498   if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
50499       AddrSpace == X86AS::PTR32_UPTR) {
50500     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
50501     if (PtrVT != St->getBasePtr().getSimpleValueType()) {
50502       SDValue Cast =
50503           DAG.getAddrSpaceCast(dl, PtrVT, St->getBasePtr(), AddrSpace, 0);
50504       return DAG.getTruncStore(
50505           St->getChain(), dl, StoredVal, Cast, St->getPointerInfo(), StVT,
50506           St->getOriginalAlign(), St->getMemOperand()->getFlags(),
50507           St->getAAInfo());
50508     }
50509   }
50510 
50511   // Turn load->store of MMX types into GPR load/stores.  This avoids clobbering
50512   // the FP state in cases where an emms may be missing.
50513   // A preferable solution to the general problem is to figure out the right
50514   // places to insert EMMS.  This qualifies as a quick hack.
50515 
50516   // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
50517   if (VT.getSizeInBits() != 64)
50518     return SDValue();
50519 
50520   const Function &F = DAG.getMachineFunction().getFunction();
50521   bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
50522   bool F64IsLegal =
50523       !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
50524 
50525   if (!F64IsLegal || Subtarget.is64Bit())
50526     return SDValue();
50527 
50528   if (VT == MVT::i64 && isa<LoadSDNode>(St->getValue()) &&
50529       cast<LoadSDNode>(St->getValue())->isSimple() &&
50530       St->getChain().hasOneUse() && St->isSimple()) {
50531     auto *Ld = cast<LoadSDNode>(St->getValue());
50532 
50533     if (!ISD::isNormalLoad(Ld))
50534       return SDValue();
50535 
50536     // Avoid the transformation if there are multiple uses of the loaded value.
50537     if (!Ld->hasNUsesOfValue(1, 0))
50538       return SDValue();
50539 
50540     SDLoc LdDL(Ld);
50541     SDLoc StDL(N);
50542     // Lower to a single movq load/store pair.
50543     SDValue NewLd = DAG.getLoad(MVT::f64, LdDL, Ld->getChain(),
50544                                 Ld->getBasePtr(), Ld->getMemOperand());
50545 
50546     // Make sure new load is placed in same chain order.
50547     DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
50548     return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
50549                         St->getMemOperand());
50550   }
50551 
50552   // This is similar to the above case, but here we handle a scalar 64-bit
50553   // integer store that is extracted from a vector on a 32-bit target.
50554   // If we have SSE2, then we can treat it like a floating-point double
50555   // to get past legalization. The execution dependencies fixup pass will
50556   // choose the optimal machine instruction for the store if this really is
50557   // an integer or v2f32 rather than an f64.
50558   if (VT == MVT::i64 &&
50559       St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
50560     SDValue OldExtract = St->getOperand(1);
50561     SDValue ExtOp0 = OldExtract.getOperand(0);
50562     unsigned VecSize = ExtOp0.getValueSizeInBits();
50563     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
50564     SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
50565     SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
50566                                      BitCast, OldExtract.getOperand(1));
50567     return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
50568                         St->getPointerInfo(), St->getOriginalAlign(),
50569                         St->getMemOperand()->getFlags());
50570   }
50571 
50572   return SDValue();
50573 }
50574 
50575 static SDValue combineVEXTRACT_STORE(SDNode *N, SelectionDAG &DAG,
50576                                      TargetLowering::DAGCombinerInfo &DCI,
50577                                      const X86Subtarget &Subtarget) {
50578   auto *St = cast<MemIntrinsicSDNode>(N);
50579 
50580   SDValue StoredVal = N->getOperand(1);
50581   MVT VT = StoredVal.getSimpleValueType();
50582   EVT MemVT = St->getMemoryVT();
50583 
50584   // Figure out which elements we demand.
50585   unsigned StElts = MemVT.getSizeInBits() / VT.getScalarSizeInBits();
50586   APInt DemandedElts = APInt::getLowBitsSet(VT.getVectorNumElements(), StElts);
50587 
50588   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50589   if (TLI.SimplifyDemandedVectorElts(StoredVal, DemandedElts, DCI)) {
50590     if (N->getOpcode() != ISD::DELETED_NODE)
50591       DCI.AddToWorklist(N);
50592     return SDValue(N, 0);
50593   }
50594 
50595   return SDValue();
50596 }
50597 
50598 /// Return 'true' if this vector operation is "horizontal"
50599 /// and return the operands for the horizontal operation in LHS and RHS.  A
50600 /// horizontal operation performs the binary operation on successive elements
50601 /// of its first operand, then on successive elements of its second operand,
50602 /// returning the resulting values in a vector.  For example, if
50603 ///   A = < float a0, float a1, float a2, float a3 >
50604 /// and
50605 ///   B = < float b0, float b1, float b2, float b3 >
50606 /// then the result of doing a horizontal operation on A and B is
50607 ///   A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
50608 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
50609 /// A horizontal-op B, for some already available A and B, and if so then LHS is
50610 /// set to A, RHS to B, and the routine returns 'true'.
50611 static bool isHorizontalBinOp(unsigned HOpcode, SDValue &LHS, SDValue &RHS,
50612                               SelectionDAG &DAG, const X86Subtarget &Subtarget,
50613                               bool IsCommutative,
50614                               SmallVectorImpl<int> &PostShuffleMask) {
50615   // If either operand is undef, bail out. The binop should be simplified.
50616   if (LHS.isUndef() || RHS.isUndef())
50617     return false;
50618 
50619   // Look for the following pattern:
50620   //   A = < float a0, float a1, float a2, float a3 >
50621   //   B = < float b0, float b1, float b2, float b3 >
50622   // and
50623   //   LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
50624   //   RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
50625   // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
50626   // which is A horizontal-op B.
50627 
50628   MVT VT = LHS.getSimpleValueType();
50629   assert((VT.is128BitVector() || VT.is256BitVector()) &&
50630          "Unsupported vector type for horizontal add/sub");
50631   unsigned NumElts = VT.getVectorNumElements();
50632 
50633   auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
50634                         SmallVectorImpl<int> &ShuffleMask) {
50635     bool UseSubVector = false;
50636     if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
50637         Op.getOperand(0).getValueType().is256BitVector() &&
50638         llvm::isNullConstant(Op.getOperand(1))) {
50639       Op = Op.getOperand(0);
50640       UseSubVector = true;
50641     }
50642     SmallVector<SDValue, 2> SrcOps;
50643     SmallVector<int, 16> SrcMask, ScaledMask;
50644     SDValue BC = peekThroughBitcasts(Op);
50645     if (getTargetShuffleInputs(BC, SrcOps, SrcMask, DAG) &&
50646         !isAnyZero(SrcMask) && all_of(SrcOps, [BC](SDValue Op) {
50647           return Op.getValueSizeInBits() == BC.getValueSizeInBits();
50648         })) {
50649       resolveTargetShuffleInputsAndMask(SrcOps, SrcMask);
50650       if (!UseSubVector && SrcOps.size() <= 2 &&
50651           scaleShuffleElements(SrcMask, NumElts, ScaledMask)) {
50652         N0 = !SrcOps.empty() ? SrcOps[0] : SDValue();
50653         N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
50654         ShuffleMask.assign(ScaledMask.begin(), ScaledMask.end());
50655       }
50656       if (UseSubVector && SrcOps.size() == 1 &&
50657           scaleShuffleElements(SrcMask, 2 * NumElts, ScaledMask)) {
50658         std::tie(N0, N1) = DAG.SplitVector(SrcOps[0], SDLoc(Op));
50659         ArrayRef<int> Mask = ArrayRef<int>(ScaledMask).slice(0, NumElts);
50660         ShuffleMask.assign(Mask.begin(), Mask.end());
50661       }
50662     }
50663   };
50664 
50665   // View LHS in the form
50666   //   LHS = VECTOR_SHUFFLE A, B, LMask
50667   // If LHS is not a shuffle, then pretend it is the identity shuffle:
50668   //   LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
50669   // NOTE: A default initialized SDValue represents an UNDEF of type VT.
50670   SDValue A, B;
50671   SmallVector<int, 16> LMask;
50672   GetShuffle(LHS, A, B, LMask);
50673 
50674   // Likewise, view RHS in the form
50675   //   RHS = VECTOR_SHUFFLE C, D, RMask
50676   SDValue C, D;
50677   SmallVector<int, 16> RMask;
50678   GetShuffle(RHS, C, D, RMask);
50679 
50680   // At least one of the operands should be a vector shuffle.
50681   unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
50682   if (NumShuffles == 0)
50683     return false;
50684 
50685   if (LMask.empty()) {
50686     A = LHS;
50687     for (unsigned i = 0; i != NumElts; ++i)
50688       LMask.push_back(i);
50689   }
50690 
50691   if (RMask.empty()) {
50692     C = RHS;
50693     for (unsigned i = 0; i != NumElts; ++i)
50694       RMask.push_back(i);
50695   }
50696 
50697   // If we have an unary mask, ensure the other op is set to null.
50698   if (isUndefOrInRange(LMask, 0, NumElts))
50699     B = SDValue();
50700   else if (isUndefOrInRange(LMask, NumElts, NumElts * 2))
50701     A = SDValue();
50702 
50703   if (isUndefOrInRange(RMask, 0, NumElts))
50704     D = SDValue();
50705   else if (isUndefOrInRange(RMask, NumElts, NumElts * 2))
50706     C = SDValue();
50707 
50708   // If A and B occur in reverse order in RHS, then canonicalize by commuting
50709   // RHS operands and shuffle mask.
50710   if (A != C) {
50711     std::swap(C, D);
50712     ShuffleVectorSDNode::commuteMask(RMask);
50713   }
50714   // Check that the shuffles are both shuffling the same vectors.
50715   if (!(A == C && B == D))
50716     return false;
50717 
50718   PostShuffleMask.clear();
50719   PostShuffleMask.append(NumElts, SM_SentinelUndef);
50720 
50721   // LHS and RHS are now:
50722   //   LHS = shuffle A, B, LMask
50723   //   RHS = shuffle A, B, RMask
50724   // Check that the masks correspond to performing a horizontal operation.
50725   // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
50726   // so we just repeat the inner loop if this is a 256-bit op.
50727   unsigned Num128BitChunks = VT.getSizeInBits() / 128;
50728   unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
50729   unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
50730   assert((NumEltsPer128BitChunk % 2 == 0) &&
50731          "Vector type should have an even number of elements in each lane");
50732   for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
50733     for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
50734       // Ignore undefined components.
50735       int LIdx = LMask[i + j], RIdx = RMask[i + j];
50736       if (LIdx < 0 || RIdx < 0 ||
50737           (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
50738           (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
50739         continue;
50740 
50741       // Check that successive odd/even elements are being operated on. If not,
50742       // this is not a horizontal operation.
50743       if (!((RIdx & 1) == 1 && (LIdx + 1) == RIdx) &&
50744           !((LIdx & 1) == 1 && (RIdx + 1) == LIdx && IsCommutative))
50745         return false;
50746 
50747       // Compute the post-shuffle mask index based on where the element
50748       // is stored in the HOP result, and where it needs to be moved to.
50749       int Base = LIdx & ~1u;
50750       int Index = ((Base % NumEltsPer128BitChunk) / 2) +
50751                   ((Base % NumElts) & ~(NumEltsPer128BitChunk - 1));
50752 
50753       // The  low half of the 128-bit result must choose from A.
50754       // The high half of the 128-bit result must choose from B,
50755       // unless B is undef. In that case, we are always choosing from A.
50756       if ((B && Base >= (int)NumElts) || (!B && i >= NumEltsPer64BitChunk))
50757         Index += NumEltsPer64BitChunk;
50758       PostShuffleMask[i + j] = Index;
50759     }
50760   }
50761 
50762   SDValue NewLHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
50763   SDValue NewRHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
50764 
50765   bool IsIdentityPostShuffle =
50766       isSequentialOrUndefInRange(PostShuffleMask, 0, NumElts, 0);
50767   if (IsIdentityPostShuffle)
50768     PostShuffleMask.clear();
50769 
50770   // Avoid 128-bit multi lane shuffles if pre-AVX2 and FP (integer will split).
50771   if (!IsIdentityPostShuffle && !Subtarget.hasAVX2() && VT.isFloatingPoint() &&
50772       isMultiLaneShuffleMask(128, VT.getScalarSizeInBits(), PostShuffleMask))
50773     return false;
50774 
50775   // If the source nodes are already used in HorizOps then always accept this.
50776   // Shuffle folding should merge these back together.
50777   bool FoundHorizLHS = llvm::any_of(NewLHS->uses(), [&](SDNode *User) {
50778     return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
50779   });
50780   bool FoundHorizRHS = llvm::any_of(NewRHS->uses(), [&](SDNode *User) {
50781     return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
50782   });
50783   bool ForceHorizOp = FoundHorizLHS && FoundHorizRHS;
50784 
50785   // Assume a SingleSource HOP if we only shuffle one input and don't need to
50786   // shuffle the result.
50787   if (!ForceHorizOp &&
50788       !shouldUseHorizontalOp(NewLHS == NewRHS &&
50789                                  (NumShuffles < 2 || !IsIdentityPostShuffle),
50790                              DAG, Subtarget))
50791     return false;
50792 
50793   LHS = DAG.getBitcast(VT, NewLHS);
50794   RHS = DAG.getBitcast(VT, NewRHS);
50795   return true;
50796 }
50797 
50798 // Try to synthesize horizontal (f)hadd/hsub from (f)adds/subs of shuffles.
50799 static SDValue combineToHorizontalAddSub(SDNode *N, SelectionDAG &DAG,
50800                                          const X86Subtarget &Subtarget) {
50801   EVT VT = N->getValueType(0);
50802   unsigned Opcode = N->getOpcode();
50803   bool IsAdd = (Opcode == ISD::FADD) || (Opcode == ISD::ADD);
50804   SmallVector<int, 8> PostShuffleMask;
50805 
50806   switch (Opcode) {
50807   case ISD::FADD:
50808   case ISD::FSUB:
50809     if ((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
50810         (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
50811       SDValue LHS = N->getOperand(0);
50812       SDValue RHS = N->getOperand(1);
50813       auto HorizOpcode = IsAdd ? X86ISD::FHADD : X86ISD::FHSUB;
50814       if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
50815                             PostShuffleMask)) {
50816         SDValue HorizBinOp = DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
50817         if (!PostShuffleMask.empty())
50818           HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
50819                                             DAG.getUNDEF(VT), PostShuffleMask);
50820         return HorizBinOp;
50821       }
50822     }
50823     break;
50824   case ISD::ADD:
50825   case ISD::SUB:
50826     if (Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
50827                                  VT == MVT::v16i16 || VT == MVT::v8i32)) {
50828       SDValue LHS = N->getOperand(0);
50829       SDValue RHS = N->getOperand(1);
50830       auto HorizOpcode = IsAdd ? X86ISD::HADD : X86ISD::HSUB;
50831       if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
50832                             PostShuffleMask)) {
50833         auto HOpBuilder = [HorizOpcode](SelectionDAG &DAG, const SDLoc &DL,
50834                                         ArrayRef<SDValue> Ops) {
50835           return DAG.getNode(HorizOpcode, DL, Ops[0].getValueType(), Ops);
50836         };
50837         SDValue HorizBinOp = SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
50838                                               {LHS, RHS}, HOpBuilder);
50839         if (!PostShuffleMask.empty())
50840           HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
50841                                             DAG.getUNDEF(VT), PostShuffleMask);
50842         return HorizBinOp;
50843       }
50844     }
50845     break;
50846   }
50847 
50848   return SDValue();
50849 }
50850 
50851 //  Try to combine the following nodes
50852 //  t29: i64 = X86ISD::Wrapper TargetConstantPool:i64
50853 //    <i32 -2147483648[float -0.000000e+00]> 0
50854 //  t27: v16i32[v16f32],ch = X86ISD::VBROADCAST_LOAD
50855 //    <(load 4 from constant-pool)> t0, t29
50856 //  [t30: v16i32 = bitcast t27]
50857 //  t6: v16i32 = xor t7, t27[t30]
50858 //  t11: v16f32 = bitcast t6
50859 //  t21: v16f32 = X86ISD::VFMULC[X86ISD::VCFMULC] t11, t8
50860 //  into X86ISD::VFCMULC[X86ISD::VFMULC] if possible:
50861 //  t22: v16f32 = bitcast t7
50862 //  t23: v16f32 = X86ISD::VFCMULC[X86ISD::VFMULC] t8, t22
50863 //  t24: v32f16 = bitcast t23
50864 static SDValue combineFMulcFCMulc(SDNode *N, SelectionDAG &DAG,
50865                                   const X86Subtarget &Subtarget) {
50866   EVT VT = N->getValueType(0);
50867   SDValue LHS = N->getOperand(0);
50868   SDValue RHS = N->getOperand(1);
50869   int CombineOpcode =
50870       N->getOpcode() == X86ISD::VFCMULC ? X86ISD::VFMULC : X86ISD::VFCMULC;
50871   auto combineConjugation = [&](SDValue &r) {
50872     if (LHS->getOpcode() == ISD::BITCAST && RHS.hasOneUse()) {
50873       SDValue XOR = LHS.getOperand(0);
50874       if (XOR->getOpcode() == ISD::XOR && XOR.hasOneUse()) {
50875         KnownBits XORRHS = DAG.computeKnownBits(XOR.getOperand(1));
50876         if (XORRHS.isConstant()) {
50877           APInt ConjugationInt32 = APInt(32, 0x80000000, true);
50878           APInt ConjugationInt64 = APInt(64, 0x8000000080000000ULL, true);
50879           if ((XORRHS.getBitWidth() == 32 &&
50880                XORRHS.getConstant() == ConjugationInt32) ||
50881               (XORRHS.getBitWidth() == 64 &&
50882                XORRHS.getConstant() == ConjugationInt64)) {
50883             SelectionDAG::FlagInserter FlagsInserter(DAG, N);
50884             SDValue I2F = DAG.getBitcast(VT, LHS.getOperand(0).getOperand(0));
50885             SDValue FCMulC = DAG.getNode(CombineOpcode, SDLoc(N), VT, RHS, I2F);
50886             r = DAG.getBitcast(VT, FCMulC);
50887             return true;
50888           }
50889         }
50890       }
50891     }
50892     return false;
50893   };
50894   SDValue Res;
50895   if (combineConjugation(Res))
50896     return Res;
50897   std::swap(LHS, RHS);
50898   if (combineConjugation(Res))
50899     return Res;
50900   return Res;
50901 }
50902 
50903 //  Try to combine the following nodes:
50904 //  FADD(A, FMA(B, C, 0)) and FADD(A, FMUL(B, C)) to FMA(B, C, A)
50905 static SDValue combineFaddCFmul(SDNode *N, SelectionDAG &DAG,
50906                                 const X86Subtarget &Subtarget) {
50907   auto AllowContract = [&DAG](const SDNodeFlags &Flags) {
50908     return DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
50909            Flags.hasAllowContract();
50910   };
50911 
50912   auto HasNoSignedZero = [&DAG](const SDNodeFlags &Flags) {
50913     return DAG.getTarget().Options.NoSignedZerosFPMath ||
50914            Flags.hasNoSignedZeros();
50915   };
50916   auto IsVectorAllNegativeZero = [&DAG](SDValue Op) {
50917     APInt AI = APInt(32, 0x80008000, true);
50918     KnownBits Bits = DAG.computeKnownBits(Op);
50919     return Bits.getBitWidth() == 32 && Bits.isConstant() &&
50920            Bits.getConstant() == AI;
50921   };
50922 
50923   if (N->getOpcode() != ISD::FADD || !Subtarget.hasFP16() ||
50924       !AllowContract(N->getFlags()))
50925     return SDValue();
50926 
50927   EVT VT = N->getValueType(0);
50928   if (VT != MVT::v8f16 && VT != MVT::v16f16 && VT != MVT::v32f16)
50929     return SDValue();
50930 
50931   SDValue LHS = N->getOperand(0);
50932   SDValue RHS = N->getOperand(1);
50933   bool IsConj;
50934   SDValue FAddOp1, MulOp0, MulOp1;
50935   auto GetCFmulFrom = [&MulOp0, &MulOp1, &IsConj, &AllowContract,
50936                        &IsVectorAllNegativeZero,
50937                        &HasNoSignedZero](SDValue N) -> bool {
50938     if (!N.hasOneUse() || N.getOpcode() != ISD::BITCAST)
50939       return false;
50940     SDValue Op0 = N.getOperand(0);
50941     unsigned Opcode = Op0.getOpcode();
50942     if (Op0.hasOneUse() && AllowContract(Op0->getFlags())) {
50943       if ((Opcode == X86ISD::VFMULC || Opcode == X86ISD::VFCMULC)) {
50944         MulOp0 = Op0.getOperand(0);
50945         MulOp1 = Op0.getOperand(1);
50946         IsConj = Opcode == X86ISD::VFCMULC;
50947         return true;
50948       }
50949       if ((Opcode == X86ISD::VFMADDC || Opcode == X86ISD::VFCMADDC) &&
50950           ((ISD::isBuildVectorAllZeros(Op0->getOperand(2).getNode()) &&
50951             HasNoSignedZero(Op0->getFlags())) ||
50952            IsVectorAllNegativeZero(Op0->getOperand(2)))) {
50953         MulOp0 = Op0.getOperand(0);
50954         MulOp1 = Op0.getOperand(1);
50955         IsConj = Opcode == X86ISD::VFCMADDC;
50956         return true;
50957       }
50958     }
50959     return false;
50960   };
50961 
50962   if (GetCFmulFrom(LHS))
50963     FAddOp1 = RHS;
50964   else if (GetCFmulFrom(RHS))
50965     FAddOp1 = LHS;
50966   else
50967     return SDValue();
50968 
50969   MVT CVT = MVT::getVectorVT(MVT::f32, VT.getVectorNumElements() / 2);
50970   FAddOp1 = DAG.getBitcast(CVT, FAddOp1);
50971   unsigned NewOp = IsConj ? X86ISD::VFCMADDC : X86ISD::VFMADDC;
50972   // FIXME: How do we handle when fast math flags of FADD are different from
50973   // CFMUL's?
50974   SDValue CFmul =
50975       DAG.getNode(NewOp, SDLoc(N), CVT, MulOp0, MulOp1, FAddOp1, N->getFlags());
50976   return DAG.getBitcast(VT, CFmul);
50977 }
50978 
50979 /// Do target-specific dag combines on floating-point adds/subs.
50980 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
50981                                const X86Subtarget &Subtarget) {
50982   if (SDValue HOp = combineToHorizontalAddSub(N, DAG, Subtarget))
50983     return HOp;
50984 
50985   if (SDValue COp = combineFaddCFmul(N, DAG, Subtarget))
50986     return COp;
50987 
50988   return SDValue();
50989 }
50990 
50991 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
50992 /// the codegen.
50993 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
50994 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
50995 ///       anything that is guaranteed to be transformed by DAGCombiner.
50996 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
50997                                           const X86Subtarget &Subtarget,
50998                                           const SDLoc &DL) {
50999   assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
51000   SDValue Src = N->getOperand(0);
51001   unsigned SrcOpcode = Src.getOpcode();
51002   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51003 
51004   EVT VT = N->getValueType(0);
51005   EVT SrcVT = Src.getValueType();
51006 
51007   auto IsFreeTruncation = [VT](SDValue Op) {
51008     unsigned TruncSizeInBits = VT.getScalarSizeInBits();
51009 
51010     // See if this has been extended from a smaller/equal size to
51011     // the truncation size, allowing a truncation to combine with the extend.
51012     unsigned Opcode = Op.getOpcode();
51013     if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
51014          Opcode == ISD::ZERO_EXTEND) &&
51015         Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
51016       return true;
51017 
51018     // See if this is a single use constant which can be constant folded.
51019     // NOTE: We don't peek throught bitcasts here because there is currently
51020     // no support for constant folding truncate+bitcast+vector_of_constants. So
51021     // we'll just send up with a truncate on both operands which will
51022     // get turned back into (truncate (binop)) causing an infinite loop.
51023     return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
51024   };
51025 
51026   auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
51027     SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
51028     SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
51029     return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
51030   };
51031 
51032   // Don't combine if the operation has other uses.
51033   if (!Src.hasOneUse())
51034     return SDValue();
51035 
51036   // Only support vector truncation for now.
51037   // TODO: i64 scalar math would benefit as well.
51038   if (!VT.isVector())
51039     return SDValue();
51040 
51041   // In most cases its only worth pre-truncating if we're only facing the cost
51042   // of one truncation.
51043   // i.e. if one of the inputs will constant fold or the input is repeated.
51044   switch (SrcOpcode) {
51045   case ISD::MUL:
51046     // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
51047     // better to truncate if we have the chance.
51048     if (SrcVT.getScalarType() == MVT::i64 &&
51049         TLI.isOperationLegal(SrcOpcode, VT) &&
51050         !TLI.isOperationLegal(SrcOpcode, SrcVT))
51051       return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
51052     [[fallthrough]];
51053   case ISD::AND:
51054   case ISD::XOR:
51055   case ISD::OR:
51056   case ISD::ADD:
51057   case ISD::SUB: {
51058     SDValue Op0 = Src.getOperand(0);
51059     SDValue Op1 = Src.getOperand(1);
51060     if (TLI.isOperationLegal(SrcOpcode, VT) &&
51061         (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
51062       return TruncateArithmetic(Op0, Op1);
51063     break;
51064   }
51065   }
51066 
51067   return SDValue();
51068 }
51069 
51070 // Try to form a MULHU or MULHS node by looking for
51071 // (trunc (srl (mul ext, ext), 16))
51072 // TODO: This is X86 specific because we want to be able to handle wide types
51073 // before type legalization. But we can only do it if the vector will be
51074 // legalized via widening/splitting. Type legalization can't handle promotion
51075 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
51076 // combiner.
51077 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
51078                             SelectionDAG &DAG, const X86Subtarget &Subtarget) {
51079   // First instruction should be a right shift of a multiply.
51080   if (Src.getOpcode() != ISD::SRL ||
51081       Src.getOperand(0).getOpcode() != ISD::MUL)
51082     return SDValue();
51083 
51084   if (!Subtarget.hasSSE2())
51085     return SDValue();
51086 
51087   // Only handle vXi16 types that are at least 128-bits unless they will be
51088   // widened.
51089   if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
51090     return SDValue();
51091 
51092   // Input type should be at least vXi32.
51093   EVT InVT = Src.getValueType();
51094   if (InVT.getVectorElementType().getSizeInBits() < 32)
51095     return SDValue();
51096 
51097   // Need a shift by 16.
51098   APInt ShiftAmt;
51099   if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
51100       ShiftAmt != 16)
51101     return SDValue();
51102 
51103   SDValue LHS = Src.getOperand(0).getOperand(0);
51104   SDValue RHS = Src.getOperand(0).getOperand(1);
51105 
51106   // Count leading sign/zero bits on both inputs - if there are enough then
51107   // truncation back to vXi16 will be cheap - either as a pack/shuffle
51108   // sequence or using AVX512 truncations. If the inputs are sext/zext then the
51109   // truncations may actually be free by peeking through to the ext source.
51110   auto IsSext = [&DAG](SDValue V) {
51111     return DAG.ComputeMaxSignificantBits(V) <= 16;
51112   };
51113   auto IsZext = [&DAG](SDValue V) {
51114     return DAG.computeKnownBits(V).countMaxActiveBits() <= 16;
51115   };
51116 
51117   bool IsSigned = IsSext(LHS) && IsSext(RHS);
51118   bool IsUnsigned = IsZext(LHS) && IsZext(RHS);
51119   if (!IsSigned && !IsUnsigned)
51120     return SDValue();
51121 
51122   // Check if both inputs are extensions, which will be removed by truncation.
51123   bool IsTruncateFree = (LHS.getOpcode() == ISD::SIGN_EXTEND ||
51124                          LHS.getOpcode() == ISD::ZERO_EXTEND) &&
51125                         (RHS.getOpcode() == ISD::SIGN_EXTEND ||
51126                          RHS.getOpcode() == ISD::ZERO_EXTEND) &&
51127                         LHS.getOperand(0).getScalarValueSizeInBits() <= 16 &&
51128                         RHS.getOperand(0).getScalarValueSizeInBits() <= 16;
51129 
51130   // For AVX2+ targets, with the upper bits known zero, we can perform MULHU on
51131   // the (bitcasted) inputs directly, and then cheaply pack/truncate the result
51132   // (upper elts will be zero). Don't attempt this with just AVX512F as MULHU
51133   // will have to split anyway.
51134   unsigned InSizeInBits = InVT.getSizeInBits();
51135   if (IsUnsigned && !IsTruncateFree && Subtarget.hasInt256() &&
51136       !(Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.is256BitVector()) &&
51137       (InSizeInBits % 16) == 0) {
51138     EVT BCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
51139                                 InVT.getSizeInBits() / 16);
51140     SDValue Res = DAG.getNode(ISD::MULHU, DL, BCVT, DAG.getBitcast(BCVT, LHS),
51141                               DAG.getBitcast(BCVT, RHS));
51142     return DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getBitcast(InVT, Res));
51143   }
51144 
51145   // Truncate back to source type.
51146   LHS = DAG.getNode(ISD::TRUNCATE, DL, VT, LHS);
51147   RHS = DAG.getNode(ISD::TRUNCATE, DL, VT, RHS);
51148 
51149   unsigned Opc = IsSigned ? ISD::MULHS : ISD::MULHU;
51150   return DAG.getNode(Opc, DL, VT, LHS, RHS);
51151 }
51152 
51153 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
51154 // from one vector with signed bytes from another vector, adds together
51155 // adjacent pairs of 16-bit products, and saturates the result before
51156 // truncating to 16-bits.
51157 //
51158 // Which looks something like this:
51159 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
51160 //                 (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
51161 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
51162                                const X86Subtarget &Subtarget,
51163                                const SDLoc &DL) {
51164   if (!VT.isVector() || !Subtarget.hasSSSE3())
51165     return SDValue();
51166 
51167   unsigned NumElems = VT.getVectorNumElements();
51168   EVT ScalarVT = VT.getVectorElementType();
51169   if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
51170     return SDValue();
51171 
51172   SDValue SSatVal = detectSSatPattern(In, VT);
51173   if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
51174     return SDValue();
51175 
51176   // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
51177   // of multiplies from even/odd elements.
51178   SDValue N0 = SSatVal.getOperand(0);
51179   SDValue N1 = SSatVal.getOperand(1);
51180 
51181   if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
51182     return SDValue();
51183 
51184   SDValue N00 = N0.getOperand(0);
51185   SDValue N01 = N0.getOperand(1);
51186   SDValue N10 = N1.getOperand(0);
51187   SDValue N11 = N1.getOperand(1);
51188 
51189   // TODO: Handle constant vectors and use knownbits/computenumsignbits?
51190   // Canonicalize zero_extend to LHS.
51191   if (N01.getOpcode() == ISD::ZERO_EXTEND)
51192     std::swap(N00, N01);
51193   if (N11.getOpcode() == ISD::ZERO_EXTEND)
51194     std::swap(N10, N11);
51195 
51196   // Ensure we have a zero_extend and a sign_extend.
51197   if (N00.getOpcode() != ISD::ZERO_EXTEND ||
51198       N01.getOpcode() != ISD::SIGN_EXTEND ||
51199       N10.getOpcode() != ISD::ZERO_EXTEND ||
51200       N11.getOpcode() != ISD::SIGN_EXTEND)
51201     return SDValue();
51202 
51203   // Peek through the extends.
51204   N00 = N00.getOperand(0);
51205   N01 = N01.getOperand(0);
51206   N10 = N10.getOperand(0);
51207   N11 = N11.getOperand(0);
51208 
51209   // Ensure the extend is from vXi8.
51210   if (N00.getValueType().getVectorElementType() != MVT::i8 ||
51211       N01.getValueType().getVectorElementType() != MVT::i8 ||
51212       N10.getValueType().getVectorElementType() != MVT::i8 ||
51213       N11.getValueType().getVectorElementType() != MVT::i8)
51214     return SDValue();
51215 
51216   // All inputs should be build_vectors.
51217   if (N00.getOpcode() != ISD::BUILD_VECTOR ||
51218       N01.getOpcode() != ISD::BUILD_VECTOR ||
51219       N10.getOpcode() != ISD::BUILD_VECTOR ||
51220       N11.getOpcode() != ISD::BUILD_VECTOR)
51221     return SDValue();
51222 
51223   // N00/N10 are zero extended. N01/N11 are sign extended.
51224 
51225   // For each element, we need to ensure we have an odd element from one vector
51226   // multiplied by the odd element of another vector and the even element from
51227   // one of the same vectors being multiplied by the even element from the
51228   // other vector. So we need to make sure for each element i, this operator
51229   // is being performed:
51230   //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
51231   SDValue ZExtIn, SExtIn;
51232   for (unsigned i = 0; i != NumElems; ++i) {
51233     SDValue N00Elt = N00.getOperand(i);
51234     SDValue N01Elt = N01.getOperand(i);
51235     SDValue N10Elt = N10.getOperand(i);
51236     SDValue N11Elt = N11.getOperand(i);
51237     // TODO: Be more tolerant to undefs.
51238     if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51239         N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51240         N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51241         N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
51242       return SDValue();
51243     auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
51244     auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
51245     auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
51246     auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
51247     if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
51248       return SDValue();
51249     unsigned IdxN00 = ConstN00Elt->getZExtValue();
51250     unsigned IdxN01 = ConstN01Elt->getZExtValue();
51251     unsigned IdxN10 = ConstN10Elt->getZExtValue();
51252     unsigned IdxN11 = ConstN11Elt->getZExtValue();
51253     // Add is commutative so indices can be reordered.
51254     if (IdxN00 > IdxN10) {
51255       std::swap(IdxN00, IdxN10);
51256       std::swap(IdxN01, IdxN11);
51257     }
51258     // N0 indices be the even element. N1 indices must be the next odd element.
51259     if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
51260         IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
51261       return SDValue();
51262     SDValue N00In = N00Elt.getOperand(0);
51263     SDValue N01In = N01Elt.getOperand(0);
51264     SDValue N10In = N10Elt.getOperand(0);
51265     SDValue N11In = N11Elt.getOperand(0);
51266     // First time we find an input capture it.
51267     if (!ZExtIn) {
51268       ZExtIn = N00In;
51269       SExtIn = N01In;
51270     }
51271     if (ZExtIn != N00In || SExtIn != N01In ||
51272         ZExtIn != N10In || SExtIn != N11In)
51273       return SDValue();
51274   }
51275 
51276   auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
51277                          ArrayRef<SDValue> Ops) {
51278     // Shrink by adding truncate nodes and let DAGCombine fold with the
51279     // sources.
51280     EVT InVT = Ops[0].getValueType();
51281     assert(InVT.getScalarType() == MVT::i8 &&
51282            "Unexpected scalar element type");
51283     assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
51284     EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
51285                                  InVT.getVectorNumElements() / 2);
51286     return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
51287   };
51288   return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
51289                           PMADDBuilder);
51290 }
51291 
51292 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
51293                                const X86Subtarget &Subtarget) {
51294   EVT VT = N->getValueType(0);
51295   SDValue Src = N->getOperand(0);
51296   SDLoc DL(N);
51297 
51298   // Attempt to pre-truncate inputs to arithmetic ops instead.
51299   if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
51300     return V;
51301 
51302   // Try to detect AVG pattern first.
51303   if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
51304     return Avg;
51305 
51306   // Try to detect PMADD
51307   if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
51308     return PMAdd;
51309 
51310   // Try to combine truncation with signed/unsigned saturation.
51311   if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
51312     return Val;
51313 
51314   // Try to combine PMULHUW/PMULHW for vXi16.
51315   if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
51316     return V;
51317 
51318   // The bitcast source is a direct mmx result.
51319   // Detect bitcasts between i32 to x86mmx
51320   if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
51321     SDValue BCSrc = Src.getOperand(0);
51322     if (BCSrc.getValueType() == MVT::x86mmx)
51323       return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
51324   }
51325 
51326   return SDValue();
51327 }
51328 
51329 static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG,
51330                              TargetLowering::DAGCombinerInfo &DCI) {
51331   EVT VT = N->getValueType(0);
51332   SDValue In = N->getOperand(0);
51333   SDLoc DL(N);
51334 
51335   if (SDValue SSatVal = detectSSatPattern(In, VT))
51336     return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
51337   if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL))
51338     return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
51339 
51340   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51341   APInt DemandedMask(APInt::getAllOnes(VT.getScalarSizeInBits()));
51342   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
51343     return SDValue(N, 0);
51344 
51345   return SDValue();
51346 }
51347 
51348 /// Returns the negated value if the node \p N flips sign of FP value.
51349 ///
51350 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
51351 /// or FSUB(0, x)
51352 /// AVX512F does not have FXOR, so FNEG is lowered as
51353 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
51354 /// In this case we go though all bitcasts.
51355 /// This also recognizes splat of a negated value and returns the splat of that
51356 /// value.
51357 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
51358   if (N->getOpcode() == ISD::FNEG)
51359     return N->getOperand(0);
51360 
51361   // Don't recurse exponentially.
51362   if (Depth > SelectionDAG::MaxRecursionDepth)
51363     return SDValue();
51364 
51365   unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
51366 
51367   SDValue Op = peekThroughBitcasts(SDValue(N, 0));
51368   EVT VT = Op->getValueType(0);
51369 
51370   // Make sure the element size doesn't change.
51371   if (VT.getScalarSizeInBits() != ScalarSize)
51372     return SDValue();
51373 
51374   unsigned Opc = Op.getOpcode();
51375   switch (Opc) {
51376   case ISD::VECTOR_SHUFFLE: {
51377     // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
51378     // of this is VECTOR_SHUFFLE(-VEC1, UNDEF).  The mask can be anything here.
51379     if (!Op.getOperand(1).isUndef())
51380       return SDValue();
51381     if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
51382       if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
51383         return DAG.getVectorShuffle(VT, SDLoc(Op), NegOp0, DAG.getUNDEF(VT),
51384                                     cast<ShuffleVectorSDNode>(Op)->getMask());
51385     break;
51386   }
51387   case ISD::INSERT_VECTOR_ELT: {
51388     // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
51389     // -V, INDEX).
51390     SDValue InsVector = Op.getOperand(0);
51391     SDValue InsVal = Op.getOperand(1);
51392     if (!InsVector.isUndef())
51393       return SDValue();
51394     if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
51395       if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
51396         return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
51397                            NegInsVal, Op.getOperand(2));
51398     break;
51399   }
51400   case ISD::FSUB:
51401   case ISD::XOR:
51402   case X86ISD::FXOR: {
51403     SDValue Op1 = Op.getOperand(1);
51404     SDValue Op0 = Op.getOperand(0);
51405 
51406     // For XOR and FXOR, we want to check if constant
51407     // bits of Op1 are sign bit masks. For FSUB, we
51408     // have to check if constant bits of Op0 are sign
51409     // bit masks and hence we swap the operands.
51410     if (Opc == ISD::FSUB)
51411       std::swap(Op0, Op1);
51412 
51413     APInt UndefElts;
51414     SmallVector<APInt, 16> EltBits;
51415     // Extract constant bits and see if they are all
51416     // sign bit masks. Ignore the undef elements.
51417     if (getTargetConstantBitsFromNode(Op1, ScalarSize, UndefElts, EltBits,
51418                                       /* AllowWholeUndefs */ true,
51419                                       /* AllowPartialUndefs */ false)) {
51420       for (unsigned I = 0, E = EltBits.size(); I < E; I++)
51421         if (!UndefElts[I] && !EltBits[I].isSignMask())
51422           return SDValue();
51423 
51424       // Only allow bitcast from correctly-sized constant.
51425       Op0 = peekThroughBitcasts(Op0);
51426       if (Op0.getScalarValueSizeInBits() == ScalarSize)
51427         return Op0;
51428     }
51429     break;
51430   } // case
51431   } // switch
51432 
51433   return SDValue();
51434 }
51435 
51436 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
51437                                 bool NegRes) {
51438   if (NegMul) {
51439     switch (Opcode) {
51440     default: llvm_unreachable("Unexpected opcode");
51441     case ISD::FMA:              Opcode = X86ISD::FNMADD;        break;
51442     case ISD::STRICT_FMA:       Opcode = X86ISD::STRICT_FNMADD; break;
51443     case X86ISD::FMADD_RND:     Opcode = X86ISD::FNMADD_RND;    break;
51444     case X86ISD::FMSUB:         Opcode = X86ISD::FNMSUB;        break;
51445     case X86ISD::STRICT_FMSUB:  Opcode = X86ISD::STRICT_FNMSUB; break;
51446     case X86ISD::FMSUB_RND:     Opcode = X86ISD::FNMSUB_RND;    break;
51447     case X86ISD::FNMADD:        Opcode = ISD::FMA;              break;
51448     case X86ISD::STRICT_FNMADD: Opcode = ISD::STRICT_FMA;       break;
51449     case X86ISD::FNMADD_RND:    Opcode = X86ISD::FMADD_RND;     break;
51450     case X86ISD::FNMSUB:        Opcode = X86ISD::FMSUB;         break;
51451     case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FMSUB;  break;
51452     case X86ISD::FNMSUB_RND:    Opcode = X86ISD::FMSUB_RND;     break;
51453     }
51454   }
51455 
51456   if (NegAcc) {
51457     switch (Opcode) {
51458     default: llvm_unreachable("Unexpected opcode");
51459     case ISD::FMA:              Opcode = X86ISD::FMSUB;         break;
51460     case ISD::STRICT_FMA:       Opcode = X86ISD::STRICT_FMSUB;  break;
51461     case X86ISD::FMADD_RND:     Opcode = X86ISD::FMSUB_RND;     break;
51462     case X86ISD::FMSUB:         Opcode = ISD::FMA;              break;
51463     case X86ISD::STRICT_FMSUB:  Opcode = ISD::STRICT_FMA;       break;
51464     case X86ISD::FMSUB_RND:     Opcode = X86ISD::FMADD_RND;     break;
51465     case X86ISD::FNMADD:        Opcode = X86ISD::FNMSUB;        break;
51466     case X86ISD::STRICT_FNMADD: Opcode = X86ISD::STRICT_FNMSUB; break;
51467     case X86ISD::FNMADD_RND:    Opcode = X86ISD::FNMSUB_RND;    break;
51468     case X86ISD::FNMSUB:        Opcode = X86ISD::FNMADD;        break;
51469     case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FNMADD; break;
51470     case X86ISD::FNMSUB_RND:    Opcode = X86ISD::FNMADD_RND;    break;
51471     case X86ISD::FMADDSUB:      Opcode = X86ISD::FMSUBADD;      break;
51472     case X86ISD::FMADDSUB_RND:  Opcode = X86ISD::FMSUBADD_RND;  break;
51473     case X86ISD::FMSUBADD:      Opcode = X86ISD::FMADDSUB;      break;
51474     case X86ISD::FMSUBADD_RND:  Opcode = X86ISD::FMADDSUB_RND;  break;
51475     }
51476   }
51477 
51478   if (NegRes) {
51479     switch (Opcode) {
51480     // For accuracy reason, we never combine fneg and fma under strict FP.
51481     default: llvm_unreachable("Unexpected opcode");
51482     case ISD::FMA:             Opcode = X86ISD::FNMSUB;       break;
51483     case X86ISD::FMADD_RND:    Opcode = X86ISD::FNMSUB_RND;   break;
51484     case X86ISD::FMSUB:        Opcode = X86ISD::FNMADD;       break;
51485     case X86ISD::FMSUB_RND:    Opcode = X86ISD::FNMADD_RND;   break;
51486     case X86ISD::FNMADD:       Opcode = X86ISD::FMSUB;        break;
51487     case X86ISD::FNMADD_RND:   Opcode = X86ISD::FMSUB_RND;    break;
51488     case X86ISD::FNMSUB:       Opcode = ISD::FMA;             break;
51489     case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FMADD_RND;    break;
51490     }
51491   }
51492 
51493   return Opcode;
51494 }
51495 
51496 /// Do target-specific dag combines on floating point negations.
51497 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
51498                            TargetLowering::DAGCombinerInfo &DCI,
51499                            const X86Subtarget &Subtarget) {
51500   EVT OrigVT = N->getValueType(0);
51501   SDValue Arg = isFNEG(DAG, N);
51502   if (!Arg)
51503     return SDValue();
51504 
51505   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51506   EVT VT = Arg.getValueType();
51507   EVT SVT = VT.getScalarType();
51508   SDLoc DL(N);
51509 
51510   // Let legalize expand this if it isn't a legal type yet.
51511   if (!TLI.isTypeLegal(VT))
51512     return SDValue();
51513 
51514   // If we're negating a FMUL node on a target with FMA, then we can avoid the
51515   // use of a constant by performing (-0 - A*B) instead.
51516   // FIXME: Check rounding control flags as well once it becomes available.
51517   if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
51518       Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
51519     SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
51520     SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
51521                                   Arg.getOperand(1), Zero);
51522     return DAG.getBitcast(OrigVT, NewNode);
51523   }
51524 
51525   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
51526   bool LegalOperations = !DCI.isBeforeLegalizeOps();
51527   if (SDValue NegArg =
51528           TLI.getNegatedExpression(Arg, DAG, LegalOperations, CodeSize))
51529     return DAG.getBitcast(OrigVT, NegArg);
51530 
51531   return SDValue();
51532 }
51533 
51534 SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
51535                                                 bool LegalOperations,
51536                                                 bool ForCodeSize,
51537                                                 NegatibleCost &Cost,
51538                                                 unsigned Depth) const {
51539   // fneg patterns are removable even if they have multiple uses.
51540   if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth)) {
51541     Cost = NegatibleCost::Cheaper;
51542     return DAG.getBitcast(Op.getValueType(), Arg);
51543   }
51544 
51545   EVT VT = Op.getValueType();
51546   EVT SVT = VT.getScalarType();
51547   unsigned Opc = Op.getOpcode();
51548   SDNodeFlags Flags = Op.getNode()->getFlags();
51549   switch (Opc) {
51550   case ISD::FMA:
51551   case X86ISD::FMSUB:
51552   case X86ISD::FNMADD:
51553   case X86ISD::FNMSUB:
51554   case X86ISD::FMADD_RND:
51555   case X86ISD::FMSUB_RND:
51556   case X86ISD::FNMADD_RND:
51557   case X86ISD::FNMSUB_RND: {
51558     if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
51559         !(SVT == MVT::f32 || SVT == MVT::f64) ||
51560         !isOperationLegal(ISD::FMA, VT))
51561       break;
51562 
51563     // Don't fold (fneg (fma (fneg x), y, (fneg z))) to (fma x, y, z)
51564     // if it may have signed zeros.
51565     if (!Flags.hasNoSignedZeros())
51566       break;
51567 
51568     // This is always negatible for free but we might be able to remove some
51569     // extra operand negations as well.
51570     SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
51571     for (int i = 0; i != 3; ++i)
51572       NewOps[i] = getCheaperNegatedExpression(
51573           Op.getOperand(i), DAG, LegalOperations, ForCodeSize, Depth + 1);
51574 
51575     bool NegA = !!NewOps[0];
51576     bool NegB = !!NewOps[1];
51577     bool NegC = !!NewOps[2];
51578     unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
51579 
51580     Cost = (NegA || NegB || NegC) ? NegatibleCost::Cheaper
51581                                   : NegatibleCost::Neutral;
51582 
51583     // Fill in the non-negated ops with the original values.
51584     for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
51585       if (!NewOps[i])
51586         NewOps[i] = Op.getOperand(i);
51587     return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
51588   }
51589   case X86ISD::FRCP:
51590     if (SDValue NegOp0 =
51591             getNegatedExpression(Op.getOperand(0), DAG, LegalOperations,
51592                                  ForCodeSize, Cost, Depth + 1))
51593       return DAG.getNode(Opc, SDLoc(Op), VT, NegOp0);
51594     break;
51595   }
51596 
51597   return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
51598                                               ForCodeSize, Cost, Depth);
51599 }
51600 
51601 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
51602                                  const X86Subtarget &Subtarget) {
51603   MVT VT = N->getSimpleValueType(0);
51604   // If we have integer vector types available, use the integer opcodes.
51605   if (!VT.isVector() || !Subtarget.hasSSE2())
51606     return SDValue();
51607 
51608   SDLoc dl(N);
51609 
51610   unsigned IntBits = VT.getScalarSizeInBits();
51611   MVT IntSVT = MVT::getIntegerVT(IntBits);
51612   MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
51613 
51614   SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
51615   SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
51616   unsigned IntOpcode;
51617   switch (N->getOpcode()) {
51618   default: llvm_unreachable("Unexpected FP logic op");
51619   case X86ISD::FOR:   IntOpcode = ISD::OR; break;
51620   case X86ISD::FXOR:  IntOpcode = ISD::XOR; break;
51621   case X86ISD::FAND:  IntOpcode = ISD::AND; break;
51622   case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
51623   }
51624   SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
51625   return DAG.getBitcast(VT, IntOp);
51626 }
51627 
51628 
51629 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
51630 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
51631   if (N->getOpcode() != ISD::XOR)
51632     return SDValue();
51633 
51634   SDValue LHS = N->getOperand(0);
51635   if (!isOneConstant(N->getOperand(1)) || LHS->getOpcode() != X86ISD::SETCC)
51636     return SDValue();
51637 
51638   X86::CondCode NewCC = X86::GetOppositeBranchCondition(
51639       X86::CondCode(LHS->getConstantOperandVal(0)));
51640   SDLoc DL(N);
51641   return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
51642 }
51643 
51644 static SDValue combineXorSubCTLZ(SDNode *N, SelectionDAG &DAG,
51645                                  const X86Subtarget &Subtarget) {
51646   assert((N->getOpcode() == ISD::XOR || N->getOpcode() == ISD::SUB) &&
51647          "Invalid opcode for combing with CTLZ");
51648   if (Subtarget.hasFastLZCNT())
51649     return SDValue();
51650 
51651   EVT VT = N->getValueType(0);
51652   if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32 &&
51653       (VT != MVT::i64 || !Subtarget.is64Bit()))
51654     return SDValue();
51655 
51656   SDValue N0 = N->getOperand(0);
51657   SDValue N1 = N->getOperand(1);
51658 
51659   if (N0.getOpcode() != ISD::CTLZ_ZERO_UNDEF &&
51660       N1.getOpcode() != ISD::CTLZ_ZERO_UNDEF)
51661     return SDValue();
51662 
51663   SDValue OpCTLZ;
51664   SDValue OpSizeTM1;
51665 
51666   if (N1.getOpcode() == ISD::CTLZ_ZERO_UNDEF) {
51667     OpCTLZ = N1;
51668     OpSizeTM1 = N0;
51669   } else if (N->getOpcode() == ISD::SUB) {
51670     return SDValue();
51671   } else {
51672     OpCTLZ = N0;
51673     OpSizeTM1 = N1;
51674   }
51675 
51676   if (!OpCTLZ.hasOneUse())
51677     return SDValue();
51678   auto *C = dyn_cast<ConstantSDNode>(OpSizeTM1);
51679   if (!C)
51680     return SDValue();
51681 
51682   if (C->getZExtValue() != uint64_t(OpCTLZ.getValueSizeInBits() - 1))
51683     return SDValue();
51684   SDLoc DL(N);
51685   EVT OpVT = VT;
51686   SDValue Op = OpCTLZ.getOperand(0);
51687   if (VT == MVT::i8) {
51688     // Zero extend to i32 since there is not an i8 bsr.
51689     OpVT = MVT::i32;
51690     Op = DAG.getNode(ISD::ZERO_EXTEND, DL, OpVT, Op);
51691   }
51692 
51693   SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
51694   Op = DAG.getNode(X86ISD::BSR, DL, VTs, Op);
51695   if (VT == MVT::i8)
51696     Op = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Op);
51697 
51698   return Op;
51699 }
51700 
51701 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
51702                           TargetLowering::DAGCombinerInfo &DCI,
51703                           const X86Subtarget &Subtarget) {
51704   SDValue N0 = N->getOperand(0);
51705   SDValue N1 = N->getOperand(1);
51706   EVT VT = N->getValueType(0);
51707 
51708   // If this is SSE1 only convert to FXOR to avoid scalarization.
51709   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
51710     return DAG.getBitcast(MVT::v4i32,
51711                           DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
51712                                       DAG.getBitcast(MVT::v4f32, N0),
51713                                       DAG.getBitcast(MVT::v4f32, N1)));
51714   }
51715 
51716   if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
51717     return Cmp;
51718 
51719   if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
51720     return R;
51721 
51722   if (SDValue R = combineBitOpWithShift(N, DAG))
51723     return R;
51724 
51725   if (SDValue R = combineBitOpWithPACK(N, DAG))
51726     return R;
51727 
51728   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
51729     return FPLogic;
51730 
51731   if (SDValue R = combineXorSubCTLZ(N, DAG, Subtarget))
51732     return R;
51733 
51734   if (DCI.isBeforeLegalizeOps())
51735     return SDValue();
51736 
51737   if (SDValue SetCC = foldXor1SetCC(N, DAG))
51738     return SetCC;
51739 
51740   if (SDValue R = combineOrXorWithSETCC(N, N0, N1, DAG))
51741     return R;
51742 
51743   if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
51744     return RV;
51745 
51746   // Fold not(iX bitcast(vXi1)) -> (iX bitcast(not(vec))) for legal boolvecs.
51747   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51748   if (llvm::isAllOnesConstant(N1) && N0.getOpcode() == ISD::BITCAST &&
51749       N0.getOperand(0).getValueType().isVector() &&
51750       N0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
51751       TLI.isTypeLegal(N0.getOperand(0).getValueType()) && N0.hasOneUse()) {
51752     return DAG.getBitcast(VT, DAG.getNOT(SDLoc(N), N0.getOperand(0),
51753                                          N0.getOperand(0).getValueType()));
51754   }
51755 
51756   // Handle AVX512 mask widening.
51757   // Fold not(insert_subvector(undef,sub)) -> insert_subvector(undef,not(sub))
51758   if (ISD::isBuildVectorAllOnes(N1.getNode()) && VT.isVector() &&
51759       VT.getVectorElementType() == MVT::i1 &&
51760       N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.getOperand(0).isUndef() &&
51761       TLI.isTypeLegal(N0.getOperand(1).getValueType())) {
51762     return DAG.getNode(
51763         ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0),
51764         DAG.getNOT(SDLoc(N), N0.getOperand(1), N0.getOperand(1).getValueType()),
51765         N0.getOperand(2));
51766   }
51767 
51768   // Fold xor(zext(xor(x,c1)),c2) -> xor(zext(x),xor(zext(c1),c2))
51769   // Fold xor(truncate(xor(x,c1)),c2) -> xor(truncate(x),xor(truncate(c1),c2))
51770   // TODO: Under what circumstances could this be performed in DAGCombine?
51771   if ((N0.getOpcode() == ISD::TRUNCATE || N0.getOpcode() == ISD::ZERO_EXTEND) &&
51772       N0.getOperand(0).getOpcode() == N->getOpcode()) {
51773     SDValue TruncExtSrc = N0.getOperand(0);
51774     auto *N1C = dyn_cast<ConstantSDNode>(N1);
51775     auto *N001C = dyn_cast<ConstantSDNode>(TruncExtSrc.getOperand(1));
51776     if (N1C && !N1C->isOpaque() && N001C && !N001C->isOpaque()) {
51777       SDLoc DL(N);
51778       SDValue LHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(0), DL, VT);
51779       SDValue RHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(1), DL, VT);
51780       return DAG.getNode(ISD::XOR, DL, VT, LHS,
51781                          DAG.getNode(ISD::XOR, DL, VT, RHS, N1));
51782     }
51783   }
51784 
51785   if (SDValue R = combineBMILogicOp(N, DAG, Subtarget))
51786     return R;
51787 
51788   return combineFneg(N, DAG, DCI, Subtarget);
51789 }
51790 
51791 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
51792                             TargetLowering::DAGCombinerInfo &DCI,
51793                             const X86Subtarget &Subtarget) {
51794   EVT VT = N->getValueType(0);
51795   unsigned NumBits = VT.getSizeInBits();
51796 
51797   // TODO - Constant Folding.
51798 
51799   // Simplify the inputs.
51800   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51801   APInt DemandedMask(APInt::getAllOnes(NumBits));
51802   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
51803     return SDValue(N, 0);
51804 
51805   return SDValue();
51806 }
51807 
51808 static bool isNullFPScalarOrVectorConst(SDValue V) {
51809   return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
51810 }
51811 
51812 /// If a value is a scalar FP zero or a vector FP zero (potentially including
51813 /// undefined elements), return a zero constant that may be used to fold away
51814 /// that value. In the case of a vector, the returned constant will not contain
51815 /// undefined elements even if the input parameter does. This makes it suitable
51816 /// to be used as a replacement operand with operations (eg, bitwise-and) where
51817 /// an undef should not propagate.
51818 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
51819                                         const X86Subtarget &Subtarget) {
51820   if (!isNullFPScalarOrVectorConst(V))
51821     return SDValue();
51822 
51823   if (V.getValueType().isVector())
51824     return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
51825 
51826   return V;
51827 }
51828 
51829 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
51830                                       const X86Subtarget &Subtarget) {
51831   SDValue N0 = N->getOperand(0);
51832   SDValue N1 = N->getOperand(1);
51833   EVT VT = N->getValueType(0);
51834   SDLoc DL(N);
51835 
51836   // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
51837   if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
51838         (VT == MVT::f64 && Subtarget.hasSSE2()) ||
51839         (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
51840     return SDValue();
51841 
51842   auto isAllOnesConstantFP = [](SDValue V) {
51843     if (V.getSimpleValueType().isVector())
51844       return ISD::isBuildVectorAllOnes(V.getNode());
51845     auto *C = dyn_cast<ConstantFPSDNode>(V);
51846     return C && C->getConstantFPValue()->isAllOnesValue();
51847   };
51848 
51849   // fand (fxor X, -1), Y --> fandn X, Y
51850   if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
51851     return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
51852 
51853   // fand X, (fxor Y, -1) --> fandn Y, X
51854   if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
51855     return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
51856 
51857   return SDValue();
51858 }
51859 
51860 /// Do target-specific dag combines on X86ISD::FAND nodes.
51861 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
51862                            const X86Subtarget &Subtarget) {
51863   // FAND(0.0, x) -> 0.0
51864   if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
51865     return V;
51866 
51867   // FAND(x, 0.0) -> 0.0
51868   if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
51869     return V;
51870 
51871   if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
51872     return V;
51873 
51874   return lowerX86FPLogicOp(N, DAG, Subtarget);
51875 }
51876 
51877 /// Do target-specific dag combines on X86ISD::FANDN nodes.
51878 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
51879                             const X86Subtarget &Subtarget) {
51880   // FANDN(0.0, x) -> x
51881   if (isNullFPScalarOrVectorConst(N->getOperand(0)))
51882     return N->getOperand(1);
51883 
51884   // FANDN(x, 0.0) -> 0.0
51885   if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
51886     return V;
51887 
51888   return lowerX86FPLogicOp(N, DAG, Subtarget);
51889 }
51890 
51891 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
51892 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
51893                           TargetLowering::DAGCombinerInfo &DCI,
51894                           const X86Subtarget &Subtarget) {
51895   assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
51896 
51897   // F[X]OR(0.0, x) -> x
51898   if (isNullFPScalarOrVectorConst(N->getOperand(0)))
51899     return N->getOperand(1);
51900 
51901   // F[X]OR(x, 0.0) -> x
51902   if (isNullFPScalarOrVectorConst(N->getOperand(1)))
51903     return N->getOperand(0);
51904 
51905   if (SDValue NewVal = combineFneg(N, DAG, DCI, Subtarget))
51906     return NewVal;
51907 
51908   return lowerX86FPLogicOp(N, DAG, Subtarget);
51909 }
51910 
51911 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
51912 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
51913   assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
51914 
51915   // FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
51916   if (!DAG.getTarget().Options.NoNaNsFPMath ||
51917       !DAG.getTarget().Options.NoSignedZerosFPMath)
51918     return SDValue();
51919 
51920   // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
51921   // into FMINC and FMAXC, which are Commutative operations.
51922   unsigned NewOp = 0;
51923   switch (N->getOpcode()) {
51924     default: llvm_unreachable("unknown opcode");
51925     case X86ISD::FMIN:  NewOp = X86ISD::FMINC; break;
51926     case X86ISD::FMAX:  NewOp = X86ISD::FMAXC; break;
51927   }
51928 
51929   return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
51930                      N->getOperand(0), N->getOperand(1));
51931 }
51932 
51933 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
51934                                      const X86Subtarget &Subtarget) {
51935   EVT VT = N->getValueType(0);
51936   if (Subtarget.useSoftFloat() || isSoftF16(VT, Subtarget))
51937     return SDValue();
51938 
51939   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51940 
51941   if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
51942         (Subtarget.hasSSE2() && VT == MVT::f64) ||
51943         (Subtarget.hasFP16() && VT == MVT::f16) ||
51944         (VT.isVector() && TLI.isTypeLegal(VT))))
51945     return SDValue();
51946 
51947   SDValue Op0 = N->getOperand(0);
51948   SDValue Op1 = N->getOperand(1);
51949   SDLoc DL(N);
51950   auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
51951 
51952   // If we don't have to respect NaN inputs, this is a direct translation to x86
51953   // min/max instructions.
51954   if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
51955     return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
51956 
51957   // If one of the operands is known non-NaN use the native min/max instructions
51958   // with the non-NaN input as second operand.
51959   if (DAG.isKnownNeverNaN(Op1))
51960     return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
51961   if (DAG.isKnownNeverNaN(Op0))
51962     return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
51963 
51964   // If we have to respect NaN inputs, this takes at least 3 instructions.
51965   // Favor a library call when operating on a scalar and minimizing code size.
51966   if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
51967     return SDValue();
51968 
51969   EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
51970                                          VT);
51971 
51972   // There are 4 possibilities involving NaN inputs, and these are the required
51973   // outputs:
51974   //                   Op1
51975   //               Num     NaN
51976   //            ----------------
51977   //       Num  |  Max  |  Op0 |
51978   // Op0        ----------------
51979   //       NaN  |  Op1  |  NaN |
51980   //            ----------------
51981   //
51982   // The SSE FP max/min instructions were not designed for this case, but rather
51983   // to implement:
51984   //   Min = Op1 < Op0 ? Op1 : Op0
51985   //   Max = Op1 > Op0 ? Op1 : Op0
51986   //
51987   // So they always return Op0 if either input is a NaN. However, we can still
51988   // use those instructions for fmaxnum by selecting away a NaN input.
51989 
51990   // If either operand is NaN, the 2nd source operand (Op0) is passed through.
51991   SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
51992   SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
51993 
51994   // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
51995   // are NaN, the NaN value of Op1 is the result.
51996   return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
51997 }
51998 
51999 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
52000                                    TargetLowering::DAGCombinerInfo &DCI) {
52001   EVT VT = N->getValueType(0);
52002   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52003 
52004   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
52005   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
52006     return SDValue(N, 0);
52007 
52008   // Convert a full vector load into vzload when not all bits are needed.
52009   SDValue In = N->getOperand(0);
52010   MVT InVT = In.getSimpleValueType();
52011   if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
52012       ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
52013     assert(InVT.is128BitVector() && "Expected 128-bit input vector");
52014     LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
52015     unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
52016     MVT MemVT = MVT::getIntegerVT(NumBits);
52017     MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
52018     if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
52019       SDLoc dl(N);
52020       SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
52021                                     DAG.getBitcast(InVT, VZLoad));
52022       DCI.CombineTo(N, Convert);
52023       DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52024       DCI.recursivelyDeleteUnusedNodes(LN);
52025       return SDValue(N, 0);
52026     }
52027   }
52028 
52029   return SDValue();
52030 }
52031 
52032 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
52033                                      TargetLowering::DAGCombinerInfo &DCI) {
52034   bool IsStrict = N->isTargetStrictFPOpcode();
52035   EVT VT = N->getValueType(0);
52036 
52037   // Convert a full vector load into vzload when not all bits are needed.
52038   SDValue In = N->getOperand(IsStrict ? 1 : 0);
52039   MVT InVT = In.getSimpleValueType();
52040   if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
52041       ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
52042     assert(InVT.is128BitVector() && "Expected 128-bit input vector");
52043     LoadSDNode *LN = cast<LoadSDNode>(In);
52044     unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
52045     MVT MemVT = MVT::getFloatingPointVT(NumBits);
52046     MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
52047     if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
52048       SDLoc dl(N);
52049       if (IsStrict) {
52050         SDValue Convert =
52051             DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
52052                         {N->getOperand(0), DAG.getBitcast(InVT, VZLoad)});
52053         DCI.CombineTo(N, Convert, Convert.getValue(1));
52054       } else {
52055         SDValue Convert =
52056             DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(InVT, VZLoad));
52057         DCI.CombineTo(N, Convert);
52058       }
52059       DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52060       DCI.recursivelyDeleteUnusedNodes(LN);
52061       return SDValue(N, 0);
52062     }
52063   }
52064 
52065   return SDValue();
52066 }
52067 
52068 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
52069 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
52070                             TargetLowering::DAGCombinerInfo &DCI,
52071                             const X86Subtarget &Subtarget) {
52072   SDValue N0 = N->getOperand(0);
52073   SDValue N1 = N->getOperand(1);
52074   MVT VT = N->getSimpleValueType(0);
52075   int NumElts = VT.getVectorNumElements();
52076   unsigned EltSizeInBits = VT.getScalarSizeInBits();
52077   SDLoc DL(N);
52078 
52079   // ANDNP(undef, x) -> 0
52080   // ANDNP(x, undef) -> 0
52081   if (N0.isUndef() || N1.isUndef())
52082     return DAG.getConstant(0, DL, VT);
52083 
52084   // ANDNP(0, x) -> x
52085   if (ISD::isBuildVectorAllZeros(N0.getNode()))
52086     return N1;
52087 
52088   // ANDNP(x, 0) -> 0
52089   if (ISD::isBuildVectorAllZeros(N1.getNode()))
52090     return DAG.getConstant(0, DL, VT);
52091 
52092   // ANDNP(x, -1) -> NOT(x) -> XOR(x, -1)
52093   if (ISD::isBuildVectorAllOnes(N1.getNode()))
52094     return DAG.getNOT(DL, N0, VT);
52095 
52096   // Turn ANDNP back to AND if input is inverted.
52097   if (SDValue Not = IsNOT(N0, DAG))
52098     return DAG.getNode(ISD::AND, DL, VT, DAG.getBitcast(VT, Not), N1);
52099 
52100   // Fold for better commutatvity:
52101   // ANDNP(x,NOT(y)) -> AND(NOT(x),NOT(y)) -> NOT(OR(X,Y)).
52102   if (N1->hasOneUse())
52103     if (SDValue Not = IsNOT(N1, DAG))
52104       return DAG.getNOT(
52105           DL, DAG.getNode(ISD::OR, DL, VT, N0, DAG.getBitcast(VT, Not)), VT);
52106 
52107   // Constant Folding
52108   APInt Undefs0, Undefs1;
52109   SmallVector<APInt> EltBits0, EltBits1;
52110   if (getTargetConstantBitsFromNode(N0, EltSizeInBits, Undefs0, EltBits0)) {
52111     if (getTargetConstantBitsFromNode(N1, EltSizeInBits, Undefs1, EltBits1)) {
52112       SmallVector<APInt> ResultBits;
52113       for (int I = 0; I != NumElts; ++I)
52114         ResultBits.push_back(~EltBits0[I] & EltBits1[I]);
52115       return getConstVector(ResultBits, VT, DAG, DL);
52116     }
52117 
52118     // Constant fold NOT(N0) to allow us to use AND.
52119     // Ensure this is only performed if we can confirm that the bitcasted source
52120     // has oneuse to prevent an infinite loop with canonicalizeBitSelect.
52121     if (N0->hasOneUse()) {
52122       SDValue BC0 = peekThroughOneUseBitcasts(N0);
52123       if (BC0.getOpcode() != ISD::BITCAST) {
52124         for (APInt &Elt : EltBits0)
52125           Elt = ~Elt;
52126         SDValue Not = getConstVector(EltBits0, VT, DAG, DL);
52127         return DAG.getNode(ISD::AND, DL, VT, Not, N1);
52128       }
52129     }
52130   }
52131 
52132   // Attempt to recursively combine a bitmask ANDNP with shuffles.
52133   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
52134     SDValue Op(N, 0);
52135     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
52136       return Res;
52137 
52138     // If either operand is a constant mask, then only the elements that aren't
52139     // zero are actually demanded by the other operand.
52140     auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
52141       APInt UndefElts;
52142       SmallVector<APInt> EltBits;
52143       APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
52144       APInt DemandedElts = APInt::getAllOnes(NumElts);
52145       if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
52146                                         EltBits)) {
52147         DemandedBits.clearAllBits();
52148         DemandedElts.clearAllBits();
52149         for (int I = 0; I != NumElts; ++I) {
52150           if (UndefElts[I]) {
52151             // We can't assume an undef src element gives an undef dst - the
52152             // other src might be zero.
52153             DemandedBits.setAllBits();
52154             DemandedElts.setBit(I);
52155           } else if ((Invert && !EltBits[I].isAllOnes()) ||
52156                      (!Invert && !EltBits[I].isZero())) {
52157             DemandedBits |= Invert ? ~EltBits[I] : EltBits[I];
52158             DemandedElts.setBit(I);
52159           }
52160         }
52161       }
52162       return std::make_pair(DemandedBits, DemandedElts);
52163     };
52164     APInt Bits0, Elts0;
52165     APInt Bits1, Elts1;
52166     std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
52167     std::tie(Bits1, Elts1) = GetDemandedMasks(N0, true);
52168 
52169     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52170     if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
52171         TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
52172         TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
52173         TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
52174       if (N->getOpcode() != ISD::DELETED_NODE)
52175         DCI.AddToWorklist(N);
52176       return SDValue(N, 0);
52177     }
52178   }
52179 
52180   return SDValue();
52181 }
52182 
52183 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
52184                          TargetLowering::DAGCombinerInfo &DCI) {
52185   SDValue N1 = N->getOperand(1);
52186 
52187   // BT ignores high bits in the bit index operand.
52188   unsigned BitWidth = N1.getValueSizeInBits();
52189   APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
52190   if (DAG.getTargetLoweringInfo().SimplifyDemandedBits(N1, DemandedMask, DCI)) {
52191     if (N->getOpcode() != ISD::DELETED_NODE)
52192       DCI.AddToWorklist(N);
52193     return SDValue(N, 0);
52194   }
52195 
52196   return SDValue();
52197 }
52198 
52199 static SDValue combineCVTPH2PS(SDNode *N, SelectionDAG &DAG,
52200                                TargetLowering::DAGCombinerInfo &DCI) {
52201   bool IsStrict = N->getOpcode() == X86ISD::STRICT_CVTPH2PS;
52202   SDValue Src = N->getOperand(IsStrict ? 1 : 0);
52203 
52204   if (N->getValueType(0) == MVT::v4f32 && Src.getValueType() == MVT::v8i16) {
52205     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52206     APInt DemandedElts = APInt::getLowBitsSet(8, 4);
52207     if (TLI.SimplifyDemandedVectorElts(Src, DemandedElts, DCI)) {
52208       if (N->getOpcode() != ISD::DELETED_NODE)
52209         DCI.AddToWorklist(N);
52210       return SDValue(N, 0);
52211     }
52212 
52213     // Convert a full vector load into vzload when not all bits are needed.
52214     if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
52215       LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(IsStrict ? 1 : 0));
52216       if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::i64, MVT::v2i64, DAG)) {
52217         SDLoc dl(N);
52218         if (IsStrict) {
52219           SDValue Convert = DAG.getNode(
52220               N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
52221               {N->getOperand(0), DAG.getBitcast(MVT::v8i16, VZLoad)});
52222           DCI.CombineTo(N, Convert, Convert.getValue(1));
52223         } else {
52224           SDValue Convert = DAG.getNode(N->getOpcode(), dl, MVT::v4f32,
52225                                         DAG.getBitcast(MVT::v8i16, VZLoad));
52226           DCI.CombineTo(N, Convert);
52227         }
52228 
52229         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52230         DCI.recursivelyDeleteUnusedNodes(LN);
52231         return SDValue(N, 0);
52232       }
52233     }
52234   }
52235 
52236   return SDValue();
52237 }
52238 
52239 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
52240 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
52241   assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
52242 
52243   EVT DstVT = N->getValueType(0);
52244 
52245   SDValue N0 = N->getOperand(0);
52246   SDValue N1 = N->getOperand(1);
52247   EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
52248 
52249   if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
52250     return SDValue();
52251 
52252   // Look through single use any_extends / truncs.
52253   SDValue IntermediateBitwidthOp;
52254   if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
52255       N0.hasOneUse()) {
52256     IntermediateBitwidthOp = N0;
52257     N0 = N0.getOperand(0);
52258   }
52259 
52260   // See if we have a single use cmov.
52261   if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
52262     return SDValue();
52263 
52264   SDValue CMovOp0 = N0.getOperand(0);
52265   SDValue CMovOp1 = N0.getOperand(1);
52266 
52267   // Make sure both operands are constants.
52268   if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
52269       !isa<ConstantSDNode>(CMovOp1.getNode()))
52270     return SDValue();
52271 
52272   SDLoc DL(N);
52273 
52274   // If we looked through an any_extend/trunc above, add one to the constants.
52275   if (IntermediateBitwidthOp) {
52276     unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
52277     CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
52278     CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
52279   }
52280 
52281   CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
52282   CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
52283 
52284   EVT CMovVT = DstVT;
52285   // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
52286   if (DstVT == MVT::i16) {
52287     CMovVT = MVT::i32;
52288     CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
52289     CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
52290   }
52291 
52292   SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
52293                              N0.getOperand(2), N0.getOperand(3));
52294 
52295   if (CMovVT != DstVT)
52296     CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
52297 
52298   return CMov;
52299 }
52300 
52301 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
52302                                       const X86Subtarget &Subtarget) {
52303   assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
52304 
52305   if (SDValue V = combineSextInRegCmov(N, DAG))
52306     return V;
52307 
52308   EVT VT = N->getValueType(0);
52309   SDValue N0 = N->getOperand(0);
52310   SDValue N1 = N->getOperand(1);
52311   EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
52312   SDLoc dl(N);
52313 
52314   // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
52315   // both SSE and AVX2 since there is no sign-extended shift right
52316   // operation on a vector with 64-bit elements.
52317   //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
52318   // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
52319   if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
52320                            N0.getOpcode() == ISD::SIGN_EXTEND)) {
52321     SDValue N00 = N0.getOperand(0);
52322 
52323     // EXTLOAD has a better solution on AVX2,
52324     // it may be replaced with X86ISD::VSEXT node.
52325     if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
52326       if (!ISD::isNormalLoad(N00.getNode()))
52327         return SDValue();
52328 
52329     // Attempt to promote any comparison mask ops before moving the
52330     // SIGN_EXTEND_INREG in the way.
52331     if (SDValue Promote = PromoteMaskArithmetic(N0.getNode(), DAG, Subtarget))
52332       return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Promote, N1);
52333 
52334     if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
52335       SDValue Tmp =
52336           DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, N00, N1);
52337       return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
52338     }
52339   }
52340   return SDValue();
52341 }
52342 
52343 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
52344 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
52345 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
52346 /// opportunities to combine math ops, use an LEA, or use a complex addressing
52347 /// mode. This can eliminate extend, add, and shift instructions.
52348 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
52349                                    const X86Subtarget &Subtarget) {
52350   if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
52351       Ext->getOpcode() != ISD::ZERO_EXTEND)
52352     return SDValue();
52353 
52354   // TODO: This should be valid for other integer types.
52355   EVT VT = Ext->getValueType(0);
52356   if (VT != MVT::i64)
52357     return SDValue();
52358 
52359   SDValue Add = Ext->getOperand(0);
52360   if (Add.getOpcode() != ISD::ADD)
52361     return SDValue();
52362 
52363   SDValue AddOp0 = Add.getOperand(0);
52364   SDValue AddOp1 = Add.getOperand(1);
52365   bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
52366   bool NSW = Add->getFlags().hasNoSignedWrap();
52367   bool NUW = Add->getFlags().hasNoUnsignedWrap();
52368   NSW = NSW || (Sext && DAG.willNotOverflowAdd(true, AddOp0, AddOp1));
52369   NUW = NUW || (!Sext && DAG.willNotOverflowAdd(false, AddOp0, AddOp1));
52370 
52371   // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
52372   // into the 'zext'
52373   if ((Sext && !NSW) || (!Sext && !NUW))
52374     return SDValue();
52375 
52376   // Having a constant operand to the 'add' ensures that we are not increasing
52377   // the instruction count because the constant is extended for free below.
52378   // A constant operand can also become the displacement field of an LEA.
52379   auto *AddOp1C = dyn_cast<ConstantSDNode>(AddOp1);
52380   if (!AddOp1C)
52381     return SDValue();
52382 
52383   // Don't make the 'add' bigger if there's no hope of combining it with some
52384   // other 'add' or 'shl' instruction.
52385   // TODO: It may be profitable to generate simpler LEA instructions in place
52386   // of single 'add' instructions, but the cost model for selecting an LEA
52387   // currently has a high threshold.
52388   bool HasLEAPotential = false;
52389   for (auto *User : Ext->uses()) {
52390     if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
52391       HasLEAPotential = true;
52392       break;
52393     }
52394   }
52395   if (!HasLEAPotential)
52396     return SDValue();
52397 
52398   // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
52399   int64_t AddC = Sext ? AddOp1C->getSExtValue() : AddOp1C->getZExtValue();
52400   SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
52401   SDValue NewConstant = DAG.getConstant(AddC, SDLoc(Add), VT);
52402 
52403   // The wider add is guaranteed to not wrap because both operands are
52404   // sign-extended.
52405   SDNodeFlags Flags;
52406   Flags.setNoSignedWrap(NSW);
52407   Flags.setNoUnsignedWrap(NUW);
52408   return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
52409 }
52410 
52411 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
52412 // operands and the result of CMOV is not used anywhere else - promote CMOV
52413 // itself instead of promoting its result. This could be beneficial, because:
52414 //     1) X86TargetLowering::EmitLoweredSelect later can do merging of two
52415 //        (or more) pseudo-CMOVs only when they go one-after-another and
52416 //        getting rid of result extension code after CMOV will help that.
52417 //     2) Promotion of constant CMOV arguments is free, hence the
52418 //        {ANY,SIGN,ZERO}_EXTEND will just be deleted.
52419 //     3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
52420 //        promotion is also good in terms of code-size.
52421 //        (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
52422 //         promotion).
52423 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
52424   SDValue CMovN = Extend->getOperand(0);
52425   if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
52426     return SDValue();
52427 
52428   EVT TargetVT = Extend->getValueType(0);
52429   unsigned ExtendOpcode = Extend->getOpcode();
52430   SDLoc DL(Extend);
52431 
52432   EVT VT = CMovN.getValueType();
52433   SDValue CMovOp0 = CMovN.getOperand(0);
52434   SDValue CMovOp1 = CMovN.getOperand(1);
52435 
52436   if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
52437       !isa<ConstantSDNode>(CMovOp1.getNode()))
52438     return SDValue();
52439 
52440   // Only extend to i32 or i64.
52441   if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
52442     return SDValue();
52443 
52444   // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
52445   // are free.
52446   if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
52447     return SDValue();
52448 
52449   // If this a zero extend to i64, we should only extend to i32 and use a free
52450   // zero extend to finish.
52451   EVT ExtendVT = TargetVT;
52452   if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
52453     ExtendVT = MVT::i32;
52454 
52455   CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
52456   CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
52457 
52458   SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
52459                             CMovN.getOperand(2), CMovN.getOperand(3));
52460 
52461   // Finish extending if needed.
52462   if (ExtendVT != TargetVT)
52463     Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
52464 
52465   return Res;
52466 }
52467 
52468 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
52469 // result type.
52470 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
52471                                const X86Subtarget &Subtarget) {
52472   SDValue N0 = N->getOperand(0);
52473   EVT VT = N->getValueType(0);
52474   SDLoc dl(N);
52475 
52476   // Only do this combine with AVX512 for vector extends.
52477   if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
52478     return SDValue();
52479 
52480   // Only combine legal element types.
52481   EVT SVT = VT.getVectorElementType();
52482   if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
52483       SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
52484     return SDValue();
52485 
52486   // We don't have CMPP Instruction for vxf16
52487   if (N0.getOperand(0).getValueType().getVectorElementType() == MVT::f16)
52488     return SDValue();
52489   // We can only do this if the vector size in 256 bits or less.
52490   unsigned Size = VT.getSizeInBits();
52491   if (Size > 256 && Subtarget.useAVX512Regs())
52492     return SDValue();
52493 
52494   // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
52495   // that's the only integer compares with we have.
52496   ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
52497   if (ISD::isUnsignedIntSetCC(CC))
52498     return SDValue();
52499 
52500   // Only do this combine if the extension will be fully consumed by the setcc.
52501   EVT N00VT = N0.getOperand(0).getValueType();
52502   EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
52503   if (Size != MatchingVecType.getSizeInBits())
52504     return SDValue();
52505 
52506   SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
52507 
52508   if (N->getOpcode() == ISD::ZERO_EXTEND)
52509     Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType());
52510 
52511   return Res;
52512 }
52513 
52514 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
52515                            TargetLowering::DAGCombinerInfo &DCI,
52516                            const X86Subtarget &Subtarget) {
52517   SDValue N0 = N->getOperand(0);
52518   EVT VT = N->getValueType(0);
52519   SDLoc DL(N);
52520 
52521   // (i32 (sext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
52522   if (!DCI.isBeforeLegalizeOps() &&
52523       N0.getOpcode() == X86ISD::SETCC_CARRY) {
52524     SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, N0->getOperand(0),
52525                                  N0->getOperand(1));
52526     bool ReplaceOtherUses = !N0.hasOneUse();
52527     DCI.CombineTo(N, Setcc);
52528     // Replace other uses with a truncate of the widened setcc_carry.
52529     if (ReplaceOtherUses) {
52530       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
52531                                   N0.getValueType(), Setcc);
52532       DCI.CombineTo(N0.getNode(), Trunc);
52533     }
52534 
52535     return SDValue(N, 0);
52536   }
52537 
52538   if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
52539     return NewCMov;
52540 
52541   if (!DCI.isBeforeLegalizeOps())
52542     return SDValue();
52543 
52544   if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
52545     return V;
52546 
52547   if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), DL, VT, N0,
52548                                                  DAG, DCI, Subtarget))
52549     return V;
52550 
52551   if (VT.isVector()) {
52552     if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
52553       return R;
52554 
52555     if (N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG)
52556       return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0));
52557   }
52558 
52559   if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
52560     return NewAdd;
52561 
52562   return SDValue();
52563 }
52564 
52565 // Inverting a constant vector is profitable if it can be eliminated and the
52566 // inverted vector is already present in DAG. Otherwise, it will be loaded
52567 // anyway.
52568 //
52569 // We determine which of the values can be completely eliminated and invert it.
52570 // If both are eliminable, select a vector with the first negative element.
52571 static SDValue getInvertedVectorForFMA(SDValue V, SelectionDAG &DAG) {
52572   assert(ISD::isBuildVectorOfConstantFPSDNodes(V.getNode()) &&
52573          "ConstantFP build vector expected");
52574   // Check if we can eliminate V. We assume if a value is only used in FMAs, we
52575   // can eliminate it. Since this function is invoked for each FMA with this
52576   // vector.
52577   auto IsNotFMA = [](SDNode *Use) {
52578     return Use->getOpcode() != ISD::FMA && Use->getOpcode() != ISD::STRICT_FMA;
52579   };
52580   if (llvm::any_of(V->uses(), IsNotFMA))
52581     return SDValue();
52582 
52583   SmallVector<SDValue, 8> Ops;
52584   EVT VT = V.getValueType();
52585   EVT EltVT = VT.getVectorElementType();
52586   for (auto Op : V->op_values()) {
52587     if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
52588       Ops.push_back(DAG.getConstantFP(-Cst->getValueAPF(), SDLoc(Op), EltVT));
52589     } else {
52590       assert(Op.isUndef());
52591       Ops.push_back(DAG.getUNDEF(EltVT));
52592     }
52593   }
52594 
52595   SDNode *NV = DAG.getNodeIfExists(ISD::BUILD_VECTOR, DAG.getVTList(VT), Ops);
52596   if (!NV)
52597     return SDValue();
52598 
52599   // If an inverted version cannot be eliminated, choose it instead of the
52600   // original version.
52601   if (llvm::any_of(NV->uses(), IsNotFMA))
52602     return SDValue(NV, 0);
52603 
52604   // If the inverted version also can be eliminated, we have to consistently
52605   // prefer one of the values. We prefer a constant with a negative value on
52606   // the first place.
52607   // N.B. We need to skip undefs that may precede a value.
52608   for (auto op : V->op_values()) {
52609     if (auto *Cst = dyn_cast<ConstantFPSDNode>(op)) {
52610       if (Cst->isNegative())
52611         return SDValue();
52612       break;
52613     }
52614   }
52615   return SDValue(NV, 0);
52616 }
52617 
52618 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
52619                           TargetLowering::DAGCombinerInfo &DCI,
52620                           const X86Subtarget &Subtarget) {
52621   SDLoc dl(N);
52622   EVT VT = N->getValueType(0);
52623   bool IsStrict = N->isStrictFPOpcode() || N->isTargetStrictFPOpcode();
52624 
52625   // Let legalize expand this if it isn't a legal type yet.
52626   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52627   if (!TLI.isTypeLegal(VT))
52628     return SDValue();
52629 
52630   SDValue A = N->getOperand(IsStrict ? 1 : 0);
52631   SDValue B = N->getOperand(IsStrict ? 2 : 1);
52632   SDValue C = N->getOperand(IsStrict ? 3 : 2);
52633 
52634   // If the operation allows fast-math and the target does not support FMA,
52635   // split this into mul+add to avoid libcall(s).
52636   SDNodeFlags Flags = N->getFlags();
52637   if (!IsStrict && Flags.hasAllowReassociation() &&
52638       TLI.isOperationExpand(ISD::FMA, VT)) {
52639     SDValue Fmul = DAG.getNode(ISD::FMUL, dl, VT, A, B, Flags);
52640     return DAG.getNode(ISD::FADD, dl, VT, Fmul, C, Flags);
52641   }
52642 
52643   EVT ScalarVT = VT.getScalarType();
52644   if (((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
52645        !Subtarget.hasAnyFMA()) &&
52646       !(ScalarVT == MVT::f16 && Subtarget.hasFP16()))
52647     return SDValue();
52648 
52649   auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
52650     bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
52651     bool LegalOperations = !DCI.isBeforeLegalizeOps();
52652     if (SDValue NegV = TLI.getCheaperNegatedExpression(V, DAG, LegalOperations,
52653                                                        CodeSize)) {
52654       V = NegV;
52655       return true;
52656     }
52657     // Look through extract_vector_elts. If it comes from an FNEG, create a
52658     // new extract from the FNEG input.
52659     if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
52660         isNullConstant(V.getOperand(1))) {
52661       SDValue Vec = V.getOperand(0);
52662       if (SDValue NegV = TLI.getCheaperNegatedExpression(
52663               Vec, DAG, LegalOperations, CodeSize)) {
52664         V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
52665                         NegV, V.getOperand(1));
52666         return true;
52667       }
52668     }
52669     // Lookup if there is an inverted version of constant vector V in DAG.
52670     if (ISD::isBuildVectorOfConstantFPSDNodes(V.getNode())) {
52671       if (SDValue NegV = getInvertedVectorForFMA(V, DAG)) {
52672         V = NegV;
52673         return true;
52674       }
52675     }
52676     return false;
52677   };
52678 
52679   // Do not convert the passthru input of scalar intrinsics.
52680   // FIXME: We could allow negations of the lower element only.
52681   bool NegA = invertIfNegative(A);
52682   bool NegB = invertIfNegative(B);
52683   bool NegC = invertIfNegative(C);
52684 
52685   if (!NegA && !NegB && !NegC)
52686     return SDValue();
52687 
52688   unsigned NewOpcode =
52689       negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
52690 
52691   // Propagate fast-math-flags to new FMA node.
52692   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
52693   if (IsStrict) {
52694     assert(N->getNumOperands() == 4 && "Shouldn't be greater than 4");
52695     return DAG.getNode(NewOpcode, dl, {VT, MVT::Other},
52696                        {N->getOperand(0), A, B, C});
52697   } else {
52698     if (N->getNumOperands() == 4)
52699       return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
52700     return DAG.getNode(NewOpcode, dl, VT, A, B, C);
52701   }
52702 }
52703 
52704 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
52705 // Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
52706 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
52707                                TargetLowering::DAGCombinerInfo &DCI) {
52708   SDLoc dl(N);
52709   EVT VT = N->getValueType(0);
52710   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52711   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
52712   bool LegalOperations = !DCI.isBeforeLegalizeOps();
52713 
52714   SDValue N2 = N->getOperand(2);
52715 
52716   SDValue NegN2 =
52717       TLI.getCheaperNegatedExpression(N2, DAG, LegalOperations, CodeSize);
52718   if (!NegN2)
52719     return SDValue();
52720   unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
52721 
52722   if (N->getNumOperands() == 4)
52723     return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
52724                        NegN2, N->getOperand(3));
52725   return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
52726                      NegN2);
52727 }
52728 
52729 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
52730                            TargetLowering::DAGCombinerInfo &DCI,
52731                            const X86Subtarget &Subtarget) {
52732   SDLoc dl(N);
52733   SDValue N0 = N->getOperand(0);
52734   EVT VT = N->getValueType(0);
52735 
52736   // (i32 (aext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
52737   // FIXME: Is this needed? We don't seem to have any tests for it.
52738   if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ANY_EXTEND &&
52739       N0.getOpcode() == X86ISD::SETCC_CARRY) {
52740     SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, N0->getOperand(0),
52741                                  N0->getOperand(1));
52742     bool ReplaceOtherUses = !N0.hasOneUse();
52743     DCI.CombineTo(N, Setcc);
52744     // Replace other uses with a truncate of the widened setcc_carry.
52745     if (ReplaceOtherUses) {
52746       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
52747                                   N0.getValueType(), Setcc);
52748       DCI.CombineTo(N0.getNode(), Trunc);
52749     }
52750 
52751     return SDValue(N, 0);
52752   }
52753 
52754   if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
52755     return NewCMov;
52756 
52757   if (DCI.isBeforeLegalizeOps())
52758     if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
52759       return V;
52760 
52761   if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), dl, VT, N0,
52762                                                  DAG, DCI, Subtarget))
52763     return V;
52764 
52765   if (VT.isVector())
52766     if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
52767       return R;
52768 
52769   if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
52770     return NewAdd;
52771 
52772   if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
52773     return R;
52774 
52775   // TODO: Combine with any target/faux shuffle.
52776   if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
52777       VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
52778     SDValue N00 = N0.getOperand(0);
52779     SDValue N01 = N0.getOperand(1);
52780     unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
52781     APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
52782     if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
52783         (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
52784       return concatSubVectors(N00, N01, DAG, dl);
52785     }
52786   }
52787 
52788   return SDValue();
52789 }
52790 
52791 /// If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
52792 /// pre-promote its result type since vXi1 vectors don't get promoted
52793 /// during type legalization.
52794 static SDValue truncateAVX512SetCCNoBWI(EVT VT, EVT OpVT, SDValue LHS,
52795                                         SDValue RHS, ISD::CondCode CC,
52796                                         const SDLoc &DL, SelectionDAG &DAG,
52797                                         const X86Subtarget &Subtarget) {
52798   if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
52799       VT.getVectorElementType() == MVT::i1 &&
52800       (OpVT.getVectorElementType() == MVT::i8 ||
52801        OpVT.getVectorElementType() == MVT::i16)) {
52802     SDValue Setcc = DAG.getSetCC(DL, OpVT, LHS, RHS, CC);
52803     return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
52804   }
52805   return SDValue();
52806 }
52807 
52808 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
52809                             TargetLowering::DAGCombinerInfo &DCI,
52810                             const X86Subtarget &Subtarget) {
52811   const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
52812   const SDValue LHS = N->getOperand(0);
52813   const SDValue RHS = N->getOperand(1);
52814   EVT VT = N->getValueType(0);
52815   EVT OpVT = LHS.getValueType();
52816   SDLoc DL(N);
52817 
52818   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
52819     if (SDValue V = combineVectorSizedSetCCEquality(VT, LHS, RHS, CC, DL, DAG,
52820                                                     Subtarget))
52821       return V;
52822 
52823     if (VT == MVT::i1) {
52824       X86::CondCode X86CC;
52825       if (SDValue V =
52826               MatchVectorAllEqualTest(LHS, RHS, CC, DL, Subtarget, DAG, X86CC))
52827         return DAG.getNode(ISD::TRUNCATE, DL, VT, getSETCC(X86CC, V, DL, DAG));
52828     }
52829 
52830     if (OpVT.isScalarInteger()) {
52831       // cmpeq(or(X,Y),X) --> cmpeq(and(~X,Y),0)
52832       // cmpne(or(X,Y),X) --> cmpne(and(~X,Y),0)
52833       auto MatchOrCmpEq = [&](SDValue N0, SDValue N1) {
52834         if (N0.getOpcode() == ISD::OR && N0->hasOneUse()) {
52835           if (N0.getOperand(0) == N1)
52836             return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
52837                                N0.getOperand(1));
52838           if (N0.getOperand(1) == N1)
52839             return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
52840                                N0.getOperand(0));
52841         }
52842         return SDValue();
52843       };
52844       if (SDValue AndN = MatchOrCmpEq(LHS, RHS))
52845         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52846       if (SDValue AndN = MatchOrCmpEq(RHS, LHS))
52847         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52848 
52849       // cmpeq(and(X,Y),Y) --> cmpeq(and(~X,Y),0)
52850       // cmpne(and(X,Y),Y) --> cmpne(and(~X,Y),0)
52851       auto MatchAndCmpEq = [&](SDValue N0, SDValue N1) {
52852         if (N0.getOpcode() == ISD::AND && N0->hasOneUse()) {
52853           if (N0.getOperand(0) == N1)
52854             return DAG.getNode(ISD::AND, DL, OpVT, N1,
52855                                DAG.getNOT(DL, N0.getOperand(1), OpVT));
52856           if (N0.getOperand(1) == N1)
52857             return DAG.getNode(ISD::AND, DL, OpVT, N1,
52858                                DAG.getNOT(DL, N0.getOperand(0), OpVT));
52859         }
52860         return SDValue();
52861       };
52862       if (SDValue AndN = MatchAndCmpEq(LHS, RHS))
52863         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52864       if (SDValue AndN = MatchAndCmpEq(RHS, LHS))
52865         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52866 
52867       // cmpeq(trunc(x),C) --> cmpeq(x,C)
52868       // cmpne(trunc(x),C) --> cmpne(x,C)
52869       // iff x upper bits are zero.
52870       if (LHS.getOpcode() == ISD::TRUNCATE &&
52871           LHS.getOperand(0).getScalarValueSizeInBits() >= 32 &&
52872           isa<ConstantSDNode>(RHS) && !DCI.isBeforeLegalize()) {
52873         EVT SrcVT = LHS.getOperand(0).getValueType();
52874         APInt UpperBits = APInt::getBitsSetFrom(SrcVT.getScalarSizeInBits(),
52875                                                 OpVT.getScalarSizeInBits());
52876         const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52877         auto *C = cast<ConstantSDNode>(RHS);
52878         if (DAG.MaskedValueIsZero(LHS.getOperand(0), UpperBits) &&
52879             TLI.isTypeLegal(LHS.getOperand(0).getValueType()))
52880           return DAG.getSetCC(DL, VT, LHS.getOperand(0),
52881                               DAG.getConstant(C->getAPIntValue().zextOrTrunc(
52882                                                   SrcVT.getScalarSizeInBits()),
52883                                               DL, SrcVT),
52884                               CC);
52885       }
52886 
52887       // With C as a power of 2 and C != 0 and C != INT_MIN:
52888       //    icmp eq Abs(X) C ->
52889       //        (icmp eq A, C) | (icmp eq A, -C)
52890       //    icmp ne Abs(X) C ->
52891       //        (icmp ne A, C) & (icmp ne A, -C)
52892       // Both of these patterns can be better optimized in
52893       // DAGCombiner::foldAndOrOfSETCC. Note this only applies for scalar
52894       // integers which is checked above.
52895       if (LHS.getOpcode() == ISD::ABS && LHS.hasOneUse()) {
52896         if (auto *C = dyn_cast<ConstantSDNode>(RHS)) {
52897           const APInt &CInt = C->getAPIntValue();
52898           // We can better optimize this case in DAGCombiner::foldAndOrOfSETCC.
52899           if (CInt.isPowerOf2() && !CInt.isMinSignedValue()) {
52900             SDValue BaseOp = LHS.getOperand(0);
52901             SDValue SETCC0 = DAG.getSetCC(DL, VT, BaseOp, RHS, CC);
52902             SDValue SETCC1 = DAG.getSetCC(
52903                 DL, VT, BaseOp, DAG.getConstant(-CInt, DL, OpVT), CC);
52904             return DAG.getNode(CC == ISD::SETEQ ? ISD::OR : ISD::AND, DL, VT,
52905                                SETCC0, SETCC1);
52906           }
52907         }
52908       }
52909     }
52910   }
52911 
52912   if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
52913       (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
52914     // Using temporaries to avoid messing up operand ordering for later
52915     // transformations if this doesn't work.
52916     SDValue Op0 = LHS;
52917     SDValue Op1 = RHS;
52918     ISD::CondCode TmpCC = CC;
52919     // Put build_vector on the right.
52920     if (Op0.getOpcode() == ISD::BUILD_VECTOR) {
52921       std::swap(Op0, Op1);
52922       TmpCC = ISD::getSetCCSwappedOperands(TmpCC);
52923     }
52924 
52925     bool IsSEXT0 =
52926         (Op0.getOpcode() == ISD::SIGN_EXTEND) &&
52927         (Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
52928     bool IsVZero1 = ISD::isBuildVectorAllZeros(Op1.getNode());
52929 
52930     if (IsSEXT0 && IsVZero1) {
52931       assert(VT == Op0.getOperand(0).getValueType() &&
52932              "Unexpected operand type");
52933       if (TmpCC == ISD::SETGT)
52934         return DAG.getConstant(0, DL, VT);
52935       if (TmpCC == ISD::SETLE)
52936         return DAG.getConstant(1, DL, VT);
52937       if (TmpCC == ISD::SETEQ || TmpCC == ISD::SETGE)
52938         return DAG.getNOT(DL, Op0.getOperand(0), VT);
52939 
52940       assert((TmpCC == ISD::SETNE || TmpCC == ISD::SETLT) &&
52941              "Unexpected condition code!");
52942       return Op0.getOperand(0);
52943     }
52944   }
52945 
52946   // Try and make unsigned vector comparison signed. On pre AVX512 targets there
52947   // only are unsigned comparisons (`PCMPGT`) and on AVX512 its often better to
52948   // use `PCMPGT` if the result is mean to stay in a vector (and if its going to
52949   // a mask, there are signed AVX512 comparisons).
52950   if (VT.isVector() && OpVT.isVector() && OpVT.isInteger()) {
52951     bool CanMakeSigned = false;
52952     if (ISD::isUnsignedIntSetCC(CC)) {
52953       KnownBits CmpKnown =
52954           DAG.computeKnownBits(LHS).intersectWith(DAG.computeKnownBits(RHS));
52955       // If we know LHS/RHS share the same sign bit at each element we can
52956       // make this signed.
52957       // NOTE: `computeKnownBits` on a vector type aggregates common bits
52958       // across all lanes. So a pattern where the sign varies from lane to
52959       // lane, but at each lane Sign(LHS) is known to equal Sign(RHS), will be
52960       // missed. We could get around this by demanding each lane
52961       // independently, but this isn't the most important optimization and
52962       // that may eat into compile time.
52963       CanMakeSigned =
52964           CmpKnown.Zero.isSignBitSet() || CmpKnown.One.isSignBitSet();
52965     }
52966     if (CanMakeSigned || ISD::isSignedIntSetCC(CC)) {
52967       SDValue LHSOut = LHS;
52968       SDValue RHSOut = RHS;
52969       ISD::CondCode NewCC = CC;
52970       switch (CC) {
52971       case ISD::SETGE:
52972       case ISD::SETUGE:
52973         if (SDValue NewLHS = incDecVectorConstant(LHS, DAG, /*IsInc*/ true,
52974                                                   /*NSW*/ true))
52975           LHSOut = NewLHS;
52976         else if (SDValue NewRHS = incDecVectorConstant(
52977                      RHS, DAG, /*IsInc*/ false, /*NSW*/ true))
52978           RHSOut = NewRHS;
52979         else
52980           break;
52981 
52982         [[fallthrough]];
52983       case ISD::SETUGT:
52984         NewCC = ISD::SETGT;
52985         break;
52986 
52987       case ISD::SETLE:
52988       case ISD::SETULE:
52989         if (SDValue NewLHS = incDecVectorConstant(LHS, DAG, /*IsInc*/ false,
52990                                                   /*NSW*/ true))
52991           LHSOut = NewLHS;
52992         else if (SDValue NewRHS = incDecVectorConstant(RHS, DAG, /*IsInc*/ true,
52993                                                        /*NSW*/ true))
52994           RHSOut = NewRHS;
52995         else
52996           break;
52997 
52998         [[fallthrough]];
52999       case ISD::SETULT:
53000         // Will be swapped to SETGT in LowerVSETCC*.
53001         NewCC = ISD::SETLT;
53002         break;
53003       default:
53004         break;
53005       }
53006       if (NewCC != CC) {
53007         if (SDValue R = truncateAVX512SetCCNoBWI(VT, OpVT, LHSOut, RHSOut,
53008                                                  NewCC, DL, DAG, Subtarget))
53009           return R;
53010         return DAG.getSetCC(DL, VT, LHSOut, RHSOut, NewCC);
53011       }
53012     }
53013   }
53014 
53015   if (SDValue R =
53016           truncateAVX512SetCCNoBWI(VT, OpVT, LHS, RHS, CC, DL, DAG, Subtarget))
53017     return R;
53018 
53019   // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
53020   // to avoid scalarization via legalization because v4i32 is not a legal type.
53021   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
53022       LHS.getValueType() == MVT::v4f32)
53023     return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
53024 
53025   // X pred 0.0 --> X pred -X
53026   // If the negation of X already exists, use it in the comparison. This removes
53027   // the need to materialize 0.0 and allows matching to SSE's MIN/MAX
53028   // instructions in patterns with a 'select' node.
53029   if (isNullFPScalarOrVectorConst(RHS)) {
53030     SDVTList FNegVT = DAG.getVTList(OpVT);
53031     if (SDNode *FNeg = DAG.getNodeIfExists(ISD::FNEG, FNegVT, {LHS}))
53032       return DAG.getSetCC(DL, VT, LHS, SDValue(FNeg, 0), CC);
53033   }
53034 
53035   return SDValue();
53036 }
53037 
53038 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
53039                              TargetLowering::DAGCombinerInfo &DCI,
53040                              const X86Subtarget &Subtarget) {
53041   SDValue Src = N->getOperand(0);
53042   MVT SrcVT = Src.getSimpleValueType();
53043   MVT VT = N->getSimpleValueType(0);
53044   unsigned NumBits = VT.getScalarSizeInBits();
53045   unsigned NumElts = SrcVT.getVectorNumElements();
53046   unsigned NumBitsPerElt = SrcVT.getScalarSizeInBits();
53047   assert(VT == MVT::i32 && NumElts <= NumBits && "Unexpected MOVMSK types");
53048 
53049   // Perform constant folding.
53050   APInt UndefElts;
53051   SmallVector<APInt, 32> EltBits;
53052   if (getTargetConstantBitsFromNode(Src, NumBitsPerElt, UndefElts, EltBits)) {
53053     APInt Imm(32, 0);
53054     for (unsigned Idx = 0; Idx != NumElts; ++Idx)
53055       if (!UndefElts[Idx] && EltBits[Idx].isNegative())
53056         Imm.setBit(Idx);
53057 
53058     return DAG.getConstant(Imm, SDLoc(N), VT);
53059   }
53060 
53061   // Look through int->fp bitcasts that don't change the element width.
53062   unsigned EltWidth = SrcVT.getScalarSizeInBits();
53063   if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
53064       Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
53065     return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
53066 
53067   // Fold movmsk(not(x)) -> not(movmsk(x)) to improve folding of movmsk results
53068   // with scalar comparisons.
53069   if (SDValue NotSrc = IsNOT(Src, DAG)) {
53070     SDLoc DL(N);
53071     APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
53072     NotSrc = DAG.getBitcast(SrcVT, NotSrc);
53073     return DAG.getNode(ISD::XOR, DL, VT,
53074                        DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
53075                        DAG.getConstant(NotMask, DL, VT));
53076   }
53077 
53078   // Fold movmsk(icmp_sgt(x,-1)) -> not(movmsk(x)) to improve folding of movmsk
53079   // results with scalar comparisons.
53080   if (Src.getOpcode() == X86ISD::PCMPGT &&
53081       ISD::isBuildVectorAllOnes(Src.getOperand(1).getNode())) {
53082     SDLoc DL(N);
53083     APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
53084     return DAG.getNode(ISD::XOR, DL, VT,
53085                        DAG.getNode(X86ISD::MOVMSK, DL, VT, Src.getOperand(0)),
53086                        DAG.getConstant(NotMask, DL, VT));
53087   }
53088 
53089   // Fold movmsk(icmp_eq(and(x,c1),c1)) -> movmsk(shl(x,c2))
53090   // Fold movmsk(icmp_eq(and(x,c1),0)) -> movmsk(not(shl(x,c2)))
53091   // iff pow2splat(c1).
53092   // Use KnownBits to determine if only a single bit is non-zero
53093   // in each element (pow2 or zero), and shift that bit to the msb.
53094   if (Src.getOpcode() == X86ISD::PCMPEQ) {
53095     KnownBits KnownLHS = DAG.computeKnownBits(Src.getOperand(0));
53096     KnownBits KnownRHS = DAG.computeKnownBits(Src.getOperand(1));
53097     unsigned ShiftAmt = KnownLHS.countMinLeadingZeros();
53098     if (KnownLHS.countMaxPopulation() == 1 &&
53099         (KnownRHS.isZero() || (KnownRHS.countMaxPopulation() == 1 &&
53100                                ShiftAmt == KnownRHS.countMinLeadingZeros()))) {
53101       SDLoc DL(N);
53102       MVT ShiftVT = SrcVT;
53103       SDValue ShiftLHS = Src.getOperand(0);
53104       SDValue ShiftRHS = Src.getOperand(1);
53105       if (ShiftVT.getScalarType() == MVT::i8) {
53106         // vXi8 shifts - we only care about the signbit so can use PSLLW.
53107         ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
53108         ShiftLHS = DAG.getBitcast(ShiftVT, ShiftLHS);
53109         ShiftRHS = DAG.getBitcast(ShiftVT, ShiftRHS);
53110       }
53111       ShiftLHS = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, ShiftVT,
53112                                             ShiftLHS, ShiftAmt, DAG);
53113       ShiftRHS = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, ShiftVT,
53114                                             ShiftRHS, ShiftAmt, DAG);
53115       ShiftLHS = DAG.getBitcast(SrcVT, ShiftLHS);
53116       ShiftRHS = DAG.getBitcast(SrcVT, ShiftRHS);
53117       SDValue Res = DAG.getNode(ISD::XOR, DL, SrcVT, ShiftLHS, ShiftRHS);
53118       return DAG.getNode(X86ISD::MOVMSK, DL, VT, DAG.getNOT(DL, Res, SrcVT));
53119     }
53120   }
53121 
53122   // Fold movmsk(logic(X,C)) -> logic(movmsk(X),C)
53123   if (N->isOnlyUserOf(Src.getNode())) {
53124     SDValue SrcBC = peekThroughOneUseBitcasts(Src);
53125     if (ISD::isBitwiseLogicOp(SrcBC.getOpcode())) {
53126       APInt UndefElts;
53127       SmallVector<APInt, 32> EltBits;
53128       if (getTargetConstantBitsFromNode(SrcBC.getOperand(1), NumBitsPerElt,
53129                                         UndefElts, EltBits)) {
53130         APInt Mask = APInt::getZero(NumBits);
53131         for (unsigned Idx = 0; Idx != NumElts; ++Idx) {
53132           if (!UndefElts[Idx] && EltBits[Idx].isNegative())
53133             Mask.setBit(Idx);
53134         }
53135         SDLoc DL(N);
53136         SDValue NewSrc = DAG.getBitcast(SrcVT, SrcBC.getOperand(0));
53137         SDValue NewMovMsk = DAG.getNode(X86ISD::MOVMSK, DL, VT, NewSrc);
53138         return DAG.getNode(SrcBC.getOpcode(), DL, VT, NewMovMsk,
53139                            DAG.getConstant(Mask, DL, VT));
53140       }
53141     }
53142   }
53143 
53144   // Simplify the inputs.
53145   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53146   APInt DemandedMask(APInt::getAllOnes(NumBits));
53147   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
53148     return SDValue(N, 0);
53149 
53150   return SDValue();
53151 }
53152 
53153 static SDValue combineTESTP(SDNode *N, SelectionDAG &DAG,
53154                             TargetLowering::DAGCombinerInfo &DCI,
53155                             const X86Subtarget &Subtarget) {
53156   MVT VT = N->getSimpleValueType(0);
53157   unsigned NumBits = VT.getScalarSizeInBits();
53158 
53159   // Simplify the inputs.
53160   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53161   APInt DemandedMask(APInt::getAllOnes(NumBits));
53162   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
53163     return SDValue(N, 0);
53164 
53165   return SDValue();
53166 }
53167 
53168 static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
53169                                        TargetLowering::DAGCombinerInfo &DCI) {
53170   auto *MemOp = cast<X86MaskedGatherScatterSDNode>(N);
53171   SDValue Mask = MemOp->getMask();
53172 
53173   // With vector masks we only demand the upper bit of the mask.
53174   if (Mask.getScalarValueSizeInBits() != 1) {
53175     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53176     APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
53177     if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
53178       if (N->getOpcode() != ISD::DELETED_NODE)
53179         DCI.AddToWorklist(N);
53180       return SDValue(N, 0);
53181     }
53182   }
53183 
53184   return SDValue();
53185 }
53186 
53187 static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
53188                                     SDValue Index, SDValue Base, SDValue Scale,
53189                                     SelectionDAG &DAG) {
53190   SDLoc DL(GorS);
53191 
53192   if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
53193     SDValue Ops[] = { Gather->getChain(), Gather->getPassThru(),
53194                       Gather->getMask(), Base, Index, Scale } ;
53195     return DAG.getMaskedGather(Gather->getVTList(),
53196                                Gather->getMemoryVT(), DL, Ops,
53197                                Gather->getMemOperand(),
53198                                Gather->getIndexType(),
53199                                Gather->getExtensionType());
53200   }
53201   auto *Scatter = cast<MaskedScatterSDNode>(GorS);
53202   SDValue Ops[] = { Scatter->getChain(), Scatter->getValue(),
53203                     Scatter->getMask(), Base, Index, Scale };
53204   return DAG.getMaskedScatter(Scatter->getVTList(),
53205                               Scatter->getMemoryVT(), DL,
53206                               Ops, Scatter->getMemOperand(),
53207                               Scatter->getIndexType(),
53208                               Scatter->isTruncatingStore());
53209 }
53210 
53211 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
53212                                     TargetLowering::DAGCombinerInfo &DCI) {
53213   SDLoc DL(N);
53214   auto *GorS = cast<MaskedGatherScatterSDNode>(N);
53215   SDValue Index = GorS->getIndex();
53216   SDValue Base = GorS->getBasePtr();
53217   SDValue Scale = GorS->getScale();
53218 
53219   if (DCI.isBeforeLegalize()) {
53220     unsigned IndexWidth = Index.getScalarValueSizeInBits();
53221 
53222     // Shrink constant indices if they are larger than 32-bits.
53223     // Only do this before legalize types since v2i64 could become v2i32.
53224     // FIXME: We could check that the type is legal if we're after legalize
53225     // types, but then we would need to construct test cases where that happens.
53226     // FIXME: We could support more than just constant vectors, but we need to
53227     // careful with costing. A truncate that can be optimized out would be fine.
53228     // Otherwise we might only want to create a truncate if it avoids a split.
53229     if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
53230       if (BV->isConstant() && IndexWidth > 32 &&
53231           DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
53232         EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
53233         Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
53234         return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53235       }
53236     }
53237 
53238     // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
53239     // there are sufficient sign bits. Only do this before legalize types to
53240     // avoid creating illegal types in truncate.
53241     if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
53242          Index.getOpcode() == ISD::ZERO_EXTEND) &&
53243         IndexWidth > 32 &&
53244         Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
53245         DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
53246       EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
53247       Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
53248       return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53249     }
53250   }
53251 
53252   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53253   EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
53254   // Try to move splat constant adders from the index operand to the base
53255   // pointer operand. Taking care to multiply by the scale. We can only do
53256   // this when index element type is the same as the pointer type.
53257   // Otherwise we need to be sure the math doesn't wrap before the scale.
53258   if (Index.getOpcode() == ISD::ADD &&
53259       Index.getValueType().getVectorElementType() == PtrVT &&
53260       isa<ConstantSDNode>(Scale)) {
53261     uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
53262     if (auto *BV = dyn_cast<BuildVectorSDNode>(Index.getOperand(1))) {
53263       BitVector UndefElts;
53264       if (ConstantSDNode *C = BV->getConstantSplatNode(&UndefElts)) {
53265         // FIXME: Allow non-constant?
53266         if (UndefElts.none()) {
53267           // Apply the scale.
53268           APInt Adder = C->getAPIntValue() * ScaleAmt;
53269           // Add it to the existing base.
53270           Base = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
53271                              DAG.getConstant(Adder, DL, PtrVT));
53272           Index = Index.getOperand(0);
53273           return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53274         }
53275       }
53276 
53277       // It's also possible base is just a constant. In that case, just
53278       // replace it with 0 and move the displacement into the index.
53279       if (BV->isConstant() && isa<ConstantSDNode>(Base) &&
53280           isOneConstant(Scale)) {
53281         SDValue Splat = DAG.getSplatBuildVector(Index.getValueType(), DL, Base);
53282         // Combine the constant build_vector and the constant base.
53283         Splat = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
53284                             Index.getOperand(1), Splat);
53285         // Add to the LHS of the original Index add.
53286         Index = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
53287                             Index.getOperand(0), Splat);
53288         Base = DAG.getConstant(0, DL, Base.getValueType());
53289         return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53290       }
53291     }
53292   }
53293 
53294   if (DCI.isBeforeLegalizeOps()) {
53295     unsigned IndexWidth = Index.getScalarValueSizeInBits();
53296 
53297     // Make sure the index is either i32 or i64
53298     if (IndexWidth != 32 && IndexWidth != 64) {
53299       MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
53300       EVT IndexVT = Index.getValueType().changeVectorElementType(EltVT);
53301       Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
53302       return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53303     }
53304   }
53305 
53306   // With vector masks we only demand the upper bit of the mask.
53307   SDValue Mask = GorS->getMask();
53308   if (Mask.getScalarValueSizeInBits() != 1) {
53309     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53310     APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
53311     if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
53312       if (N->getOpcode() != ISD::DELETED_NODE)
53313         DCI.AddToWorklist(N);
53314       return SDValue(N, 0);
53315     }
53316   }
53317 
53318   return SDValue();
53319 }
53320 
53321 // Optimize  RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
53322 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
53323                                const X86Subtarget &Subtarget) {
53324   SDLoc DL(N);
53325   X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
53326   SDValue EFLAGS = N->getOperand(1);
53327 
53328   // Try to simplify the EFLAGS and condition code operands.
53329   if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
53330     return getSETCC(CC, Flags, DL, DAG);
53331 
53332   return SDValue();
53333 }
53334 
53335 /// Optimize branch condition evaluation.
53336 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
53337                              const X86Subtarget &Subtarget) {
53338   SDLoc DL(N);
53339   SDValue EFLAGS = N->getOperand(3);
53340   X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
53341 
53342   // Try to simplify the EFLAGS and condition code operands.
53343   // Make sure to not keep references to operands, as combineSetCCEFLAGS can
53344   // RAUW them under us.
53345   if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
53346     SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
53347     return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
53348                        N->getOperand(1), Cond, Flags);
53349   }
53350 
53351   return SDValue();
53352 }
53353 
53354 // TODO: Could we move this to DAGCombine?
53355 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
53356                                                   SelectionDAG &DAG) {
53357   // Take advantage of vector comparisons (etc.) producing 0 or -1 in each lane
53358   // to optimize away operation when it's from a constant.
53359   //
53360   // The general transformation is:
53361   //    UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
53362   //       AND(VECTOR_CMP(x,y), constant2)
53363   //    constant2 = UNARYOP(constant)
53364 
53365   // Early exit if this isn't a vector operation, the operand of the
53366   // unary operation isn't a bitwise AND, or if the sizes of the operations
53367   // aren't the same.
53368   EVT VT = N->getValueType(0);
53369   bool IsStrict = N->isStrictFPOpcode();
53370   unsigned NumEltBits = VT.getScalarSizeInBits();
53371   SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53372   if (!VT.isVector() || Op0.getOpcode() != ISD::AND ||
53373       DAG.ComputeNumSignBits(Op0.getOperand(0)) != NumEltBits ||
53374       VT.getSizeInBits() != Op0.getValueSizeInBits())
53375     return SDValue();
53376 
53377   // Now check that the other operand of the AND is a constant. We could
53378   // make the transformation for non-constant splats as well, but it's unclear
53379   // that would be a benefit as it would not eliminate any operations, just
53380   // perform one more step in scalar code before moving to the vector unit.
53381   if (auto *BV = dyn_cast<BuildVectorSDNode>(Op0.getOperand(1))) {
53382     // Bail out if the vector isn't a constant.
53383     if (!BV->isConstant())
53384       return SDValue();
53385 
53386     // Everything checks out. Build up the new and improved node.
53387     SDLoc DL(N);
53388     EVT IntVT = BV->getValueType(0);
53389     // Create a new constant of the appropriate type for the transformed
53390     // DAG.
53391     SDValue SourceConst;
53392     if (IsStrict)
53393       SourceConst = DAG.getNode(N->getOpcode(), DL, {VT, MVT::Other},
53394                                 {N->getOperand(0), SDValue(BV, 0)});
53395     else
53396       SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
53397     // The AND node needs bitcasts to/from an integer vector type around it.
53398     SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
53399     SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, Op0->getOperand(0),
53400                                  MaskConst);
53401     SDValue Res = DAG.getBitcast(VT, NewAnd);
53402     if (IsStrict)
53403       return DAG.getMergeValues({Res, SourceConst.getValue(1)}, DL);
53404     return Res;
53405   }
53406 
53407   return SDValue();
53408 }
53409 
53410 /// If we are converting a value to floating-point, try to replace scalar
53411 /// truncate of an extracted vector element with a bitcast. This tries to keep
53412 /// the sequence on XMM registers rather than moving between vector and GPRs.
53413 static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
53414   // TODO: This is currently only used by combineSIntToFP, but it is generalized
53415   //       to allow being called by any similar cast opcode.
53416   // TODO: Consider merging this into lowering: vectorizeExtractedCast().
53417   SDValue Trunc = N->getOperand(0);
53418   if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
53419     return SDValue();
53420 
53421   SDValue ExtElt = Trunc.getOperand(0);
53422   if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53423       !isNullConstant(ExtElt.getOperand(1)))
53424     return SDValue();
53425 
53426   EVT TruncVT = Trunc.getValueType();
53427   EVT SrcVT = ExtElt.getValueType();
53428   unsigned DestWidth = TruncVT.getSizeInBits();
53429   unsigned SrcWidth = SrcVT.getSizeInBits();
53430   if (SrcWidth % DestWidth != 0)
53431     return SDValue();
53432 
53433   // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
53434   EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
53435   unsigned VecWidth = SrcVecVT.getSizeInBits();
53436   unsigned NumElts = VecWidth / DestWidth;
53437   EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
53438   SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
53439   SDLoc DL(N);
53440   SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
53441                                   BitcastVec, ExtElt.getOperand(1));
53442   return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
53443 }
53444 
53445 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
53446                                const X86Subtarget &Subtarget) {
53447   bool IsStrict = N->isStrictFPOpcode();
53448   SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53449   EVT VT = N->getValueType(0);
53450   EVT InVT = Op0.getValueType();
53451 
53452   // Using i16 as an intermediate type is a bad idea, unless we have HW support
53453   // for it. Therefore for type sizes equal or smaller than 32 just go with i32.
53454   // if hasFP16 support:
53455   //   UINT_TO_FP(vXi1~15)  -> SINT_TO_FP(ZEXT(vXi1~15  to vXi16))
53456   //   UINT_TO_FP(vXi17~31) -> SINT_TO_FP(ZEXT(vXi17~31 to vXi32))
53457   // else
53458   //   UINT_TO_FP(vXi1~31) -> SINT_TO_FP(ZEXT(vXi1~31 to vXi32))
53459   // UINT_TO_FP(vXi33~63) -> SINT_TO_FP(ZEXT(vXi33~63 to vXi64))
53460   if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
53461     unsigned ScalarSize = InVT.getScalarSizeInBits();
53462     if ((ScalarSize == 16 && Subtarget.hasFP16()) || ScalarSize == 32 ||
53463         ScalarSize >= 64)
53464       return SDValue();
53465     SDLoc dl(N);
53466     EVT DstVT =
53467         EVT::getVectorVT(*DAG.getContext(),
53468                          (Subtarget.hasFP16() && ScalarSize < 16) ? MVT::i16
53469                          : ScalarSize < 32                        ? MVT::i32
53470                                                                   : MVT::i64,
53471                          InVT.getVectorNumElements());
53472     SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
53473     if (IsStrict)
53474       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53475                          {N->getOperand(0), P});
53476     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53477   }
53478 
53479   // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
53480   // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
53481   // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
53482   if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
53483       VT.getScalarType() != MVT::f16) {
53484     SDLoc dl(N);
53485     EVT DstVT = InVT.changeVectorElementType(MVT::i32);
53486     SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
53487 
53488     // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
53489     if (IsStrict)
53490       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53491                          {N->getOperand(0), P});
53492     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53493   }
53494 
53495   // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
53496   // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
53497   // the optimization here.
53498   if (DAG.SignBitIsZero(Op0)) {
53499     if (IsStrict)
53500       return DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(N), {VT, MVT::Other},
53501                          {N->getOperand(0), Op0});
53502     return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
53503   }
53504 
53505   return SDValue();
53506 }
53507 
53508 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
53509                                TargetLowering::DAGCombinerInfo &DCI,
53510                                const X86Subtarget &Subtarget) {
53511   // First try to optimize away the conversion entirely when it's
53512   // conditionally from a constant. Vectors only.
53513   bool IsStrict = N->isStrictFPOpcode();
53514   if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
53515     return Res;
53516 
53517   // Now move on to more general possibilities.
53518   SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53519   EVT VT = N->getValueType(0);
53520   EVT InVT = Op0.getValueType();
53521 
53522   // Using i16 as an intermediate type is a bad idea, unless we have HW support
53523   // for it. Therefore for type sizes equal or smaller than 32 just go with i32.
53524   // if hasFP16 support:
53525   //   SINT_TO_FP(vXi1~15)  -> SINT_TO_FP(SEXT(vXi1~15  to vXi16))
53526   //   SINT_TO_FP(vXi17~31) -> SINT_TO_FP(SEXT(vXi17~31 to vXi32))
53527   // else
53528   //   SINT_TO_FP(vXi1~31) -> SINT_TO_FP(ZEXT(vXi1~31 to vXi32))
53529   // SINT_TO_FP(vXi33~63) -> SINT_TO_FP(SEXT(vXi33~63 to vXi64))
53530   if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
53531     unsigned ScalarSize = InVT.getScalarSizeInBits();
53532     if ((ScalarSize == 16 && Subtarget.hasFP16()) || ScalarSize == 32 ||
53533         ScalarSize >= 64)
53534       return SDValue();
53535     SDLoc dl(N);
53536     EVT DstVT =
53537         EVT::getVectorVT(*DAG.getContext(),
53538                          (Subtarget.hasFP16() && ScalarSize < 16) ? MVT::i16
53539                          : ScalarSize < 32                        ? MVT::i32
53540                                                                   : MVT::i64,
53541                          InVT.getVectorNumElements());
53542     SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
53543     if (IsStrict)
53544       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53545                          {N->getOperand(0), P});
53546     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53547   }
53548 
53549   // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
53550   // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
53551   // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
53552   if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
53553       VT.getScalarType() != MVT::f16) {
53554     SDLoc dl(N);
53555     EVT DstVT = InVT.changeVectorElementType(MVT::i32);
53556     SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
53557     if (IsStrict)
53558       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53559                          {N->getOperand(0), P});
53560     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53561   }
53562 
53563   // Without AVX512DQ we only support i64 to float scalar conversion. For both
53564   // vectors and scalars, see if we know that the upper bits are all the sign
53565   // bit, in which case we can truncate the input to i32 and convert from that.
53566   if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
53567     unsigned BitWidth = InVT.getScalarSizeInBits();
53568     unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
53569     if (NumSignBits >= (BitWidth - 31)) {
53570       EVT TruncVT = MVT::i32;
53571       if (InVT.isVector())
53572         TruncVT = InVT.changeVectorElementType(TruncVT);
53573       SDLoc dl(N);
53574       if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
53575         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
53576         if (IsStrict)
53577           return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53578                              {N->getOperand(0), Trunc});
53579         return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
53580       }
53581       // If we're after legalize and the type is v2i32 we need to shuffle and
53582       // use CVTSI2P.
53583       assert(InVT == MVT::v2i64 && "Unexpected VT!");
53584       SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
53585       SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
53586                                           { 0, 2, -1, -1 });
53587       if (IsStrict)
53588         return DAG.getNode(X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
53589                            {N->getOperand(0), Shuf});
53590       return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
53591     }
53592   }
53593 
53594   // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
53595   // a 32-bit target where SSE doesn't support i64->FP operations.
53596   if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
53597       Op0.getOpcode() == ISD::LOAD) {
53598     LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
53599 
53600     // This transformation is not supported if the result type is f16 or f128.
53601     if (VT == MVT::f16 || VT == MVT::f128)
53602       return SDValue();
53603 
53604     // If we have AVX512DQ we can use packed conversion instructions unless
53605     // the VT is f80.
53606     if (Subtarget.hasDQI() && VT != MVT::f80)
53607       return SDValue();
53608 
53609     if (Ld->isSimple() && !VT.isVector() && ISD::isNormalLoad(Op0.getNode()) &&
53610         Op0.hasOneUse() && !Subtarget.is64Bit() && InVT == MVT::i64) {
53611       std::pair<SDValue, SDValue> Tmp =
53612           Subtarget.getTargetLowering()->BuildFILD(
53613               VT, InVT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(),
53614               Ld->getPointerInfo(), Ld->getOriginalAlign(), DAG);
53615       DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second);
53616       return Tmp.first;
53617     }
53618   }
53619 
53620   if (IsStrict)
53621     return SDValue();
53622 
53623   if (SDValue V = combineToFPTruncExtElt(N, DAG))
53624     return V;
53625 
53626   return SDValue();
53627 }
53628 
53629 static bool needCarryOrOverflowFlag(SDValue Flags) {
53630   assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
53631 
53632   for (const SDNode *User : Flags->uses()) {
53633     X86::CondCode CC;
53634     switch (User->getOpcode()) {
53635     default:
53636       // Be conservative.
53637       return true;
53638     case X86ISD::SETCC:
53639     case X86ISD::SETCC_CARRY:
53640       CC = (X86::CondCode)User->getConstantOperandVal(0);
53641       break;
53642     case X86ISD::BRCOND:
53643     case X86ISD::CMOV:
53644       CC = (X86::CondCode)User->getConstantOperandVal(2);
53645       break;
53646     }
53647 
53648     switch (CC) {
53649     default: break;
53650     case X86::COND_A: case X86::COND_AE:
53651     case X86::COND_B: case X86::COND_BE:
53652     case X86::COND_O: case X86::COND_NO:
53653     case X86::COND_G: case X86::COND_GE:
53654     case X86::COND_L: case X86::COND_LE:
53655       return true;
53656     }
53657   }
53658 
53659   return false;
53660 }
53661 
53662 static bool onlyZeroFlagUsed(SDValue Flags) {
53663   assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
53664 
53665   for (const SDNode *User : Flags->uses()) {
53666     unsigned CCOpNo;
53667     switch (User->getOpcode()) {
53668     default:
53669       // Be conservative.
53670       return false;
53671     case X86ISD::SETCC:
53672     case X86ISD::SETCC_CARRY:
53673       CCOpNo = 0;
53674       break;
53675     case X86ISD::BRCOND:
53676     case X86ISD::CMOV:
53677       CCOpNo = 2;
53678       break;
53679     }
53680 
53681     X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
53682     if (CC != X86::COND_E && CC != X86::COND_NE)
53683       return false;
53684   }
53685 
53686   return true;
53687 }
53688 
53689 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG,
53690                           const X86Subtarget &Subtarget) {
53691   // Only handle test patterns.
53692   if (!isNullConstant(N->getOperand(1)))
53693     return SDValue();
53694 
53695   // If we have a CMP of a truncated binop, see if we can make a smaller binop
53696   // and use its flags directly.
53697   // TODO: Maybe we should try promoting compares that only use the zero flag
53698   // first if we can prove the upper bits with computeKnownBits?
53699   SDLoc dl(N);
53700   SDValue Op = N->getOperand(0);
53701   EVT VT = Op.getValueType();
53702   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53703 
53704   // If we have a constant logical shift that's only used in a comparison
53705   // against zero turn it into an equivalent AND. This allows turning it into
53706   // a TEST instruction later.
53707   if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
53708       Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
53709       onlyZeroFlagUsed(SDValue(N, 0))) {
53710     unsigned BitWidth = VT.getSizeInBits();
53711     const APInt &ShAmt = Op.getConstantOperandAPInt(1);
53712     if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
53713       unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
53714       APInt Mask = Op.getOpcode() == ISD::SRL
53715                        ? APInt::getHighBitsSet(BitWidth, MaskBits)
53716                        : APInt::getLowBitsSet(BitWidth, MaskBits);
53717       if (Mask.isSignedIntN(32)) {
53718         Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
53719                          DAG.getConstant(Mask, dl, VT));
53720         return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53721                            DAG.getConstant(0, dl, VT));
53722       }
53723     }
53724   }
53725 
53726   // If we're extracting from a avx512 bool vector and comparing against zero,
53727   // then try to just bitcast the vector to an integer to use TEST/BT directly.
53728   // (and (extract_elt (kshiftr vXi1, C), 0), 1) -> (and (bc vXi1), 1<<C)
53729   if (Op.getOpcode() == ISD::AND && isOneConstant(Op.getOperand(1)) &&
53730       Op.hasOneUse() && onlyZeroFlagUsed(SDValue(N, 0))) {
53731     SDValue Src = Op.getOperand(0);
53732     if (Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
53733         isNullConstant(Src.getOperand(1)) &&
53734         Src.getOperand(0).getValueType().getScalarType() == MVT::i1) {
53735       SDValue BoolVec = Src.getOperand(0);
53736       unsigned ShAmt = 0;
53737       if (BoolVec.getOpcode() == X86ISD::KSHIFTR) {
53738         ShAmt = BoolVec.getConstantOperandVal(1);
53739         BoolVec = BoolVec.getOperand(0);
53740       }
53741       BoolVec = widenMaskVector(BoolVec, false, Subtarget, DAG, dl);
53742       EVT VecVT = BoolVec.getValueType();
53743       unsigned BitWidth = VecVT.getVectorNumElements();
53744       EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), BitWidth);
53745       if (TLI.isTypeLegal(VecVT) && TLI.isTypeLegal(BCVT)) {
53746         APInt Mask = APInt::getOneBitSet(BitWidth, ShAmt);
53747         Op = DAG.getBitcast(BCVT, BoolVec);
53748         Op = DAG.getNode(ISD::AND, dl, BCVT, Op,
53749                          DAG.getConstant(Mask, dl, BCVT));
53750         return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53751                            DAG.getConstant(0, dl, BCVT));
53752       }
53753     }
53754   }
53755 
53756   // Peek through any zero-extend if we're only testing for a zero result.
53757   if (Op.getOpcode() == ISD::ZERO_EXTEND && onlyZeroFlagUsed(SDValue(N, 0))) {
53758     SDValue Src = Op.getOperand(0);
53759     EVT SrcVT = Src.getValueType();
53760     if (SrcVT.getScalarSizeInBits() >= 8 && TLI.isTypeLegal(SrcVT))
53761       return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Src,
53762                          DAG.getConstant(0, dl, SrcVT));
53763   }
53764 
53765   // Look for a truncate.
53766   if (Op.getOpcode() != ISD::TRUNCATE)
53767     return SDValue();
53768 
53769   SDValue Trunc = Op;
53770   Op = Op.getOperand(0);
53771 
53772   // See if we can compare with zero against the truncation source,
53773   // which should help using the Z flag from many ops. Only do this for
53774   // i32 truncated op to prevent partial-reg compares of promoted ops.
53775   EVT OpVT = Op.getValueType();
53776   APInt UpperBits =
53777       APInt::getBitsSetFrom(OpVT.getSizeInBits(), VT.getSizeInBits());
53778   if (OpVT == MVT::i32 && DAG.MaskedValueIsZero(Op, UpperBits) &&
53779       onlyZeroFlagUsed(SDValue(N, 0))) {
53780     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53781                        DAG.getConstant(0, dl, OpVT));
53782   }
53783 
53784   // After this the truncate and arithmetic op must have a single use.
53785   if (!Trunc.hasOneUse() || !Op.hasOneUse())
53786       return SDValue();
53787 
53788   unsigned NewOpc;
53789   switch (Op.getOpcode()) {
53790   default: return SDValue();
53791   case ISD::AND:
53792     // Skip and with constant. We have special handling for and with immediate
53793     // during isel to generate test instructions.
53794     if (isa<ConstantSDNode>(Op.getOperand(1)))
53795       return SDValue();
53796     NewOpc = X86ISD::AND;
53797     break;
53798   case ISD::OR:  NewOpc = X86ISD::OR;  break;
53799   case ISD::XOR: NewOpc = X86ISD::XOR; break;
53800   case ISD::ADD:
53801     // If the carry or overflow flag is used, we can't truncate.
53802     if (needCarryOrOverflowFlag(SDValue(N, 0)))
53803       return SDValue();
53804     NewOpc = X86ISD::ADD;
53805     break;
53806   case ISD::SUB:
53807     // If the carry or overflow flag is used, we can't truncate.
53808     if (needCarryOrOverflowFlag(SDValue(N, 0)))
53809       return SDValue();
53810     NewOpc = X86ISD::SUB;
53811     break;
53812   }
53813 
53814   // We found an op we can narrow. Truncate its inputs.
53815   SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
53816   SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
53817 
53818   // Use a X86 specific opcode to avoid DAG combine messing with it.
53819   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
53820   Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
53821 
53822   // For AND, keep a CMP so that we can match the test pattern.
53823   if (NewOpc == X86ISD::AND)
53824     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53825                        DAG.getConstant(0, dl, VT));
53826 
53827   // Return the flags.
53828   return Op.getValue(1);
53829 }
53830 
53831 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
53832                                 TargetLowering::DAGCombinerInfo &DCI) {
53833   assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
53834          "Expected X86ISD::ADD or X86ISD::SUB");
53835 
53836   SDLoc DL(N);
53837   SDValue LHS = N->getOperand(0);
53838   SDValue RHS = N->getOperand(1);
53839   MVT VT = LHS.getSimpleValueType();
53840   bool IsSub = X86ISD::SUB == N->getOpcode();
53841   unsigned GenericOpc = IsSub ? ISD::SUB : ISD::ADD;
53842 
53843   // If we don't use the flag result, simplify back to a generic ADD/SUB.
53844   if (!N->hasAnyUseOfValue(1)) {
53845     SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
53846     return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
53847   }
53848 
53849   // Fold any similar generic ADD/SUB opcodes to reuse this node.
53850   auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
53851     SDValue Ops[] = {N0, N1};
53852     SDVTList VTs = DAG.getVTList(N->getValueType(0));
53853     if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
53854       SDValue Op(N, 0);
53855       if (Negate)
53856         Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
53857       DCI.CombineTo(GenericAddSub, Op);
53858     }
53859   };
53860   MatchGeneric(LHS, RHS, false);
53861   MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
53862 
53863   // TODO: Can we drop the ZeroSecondOpOnly limit? This is to guarantee that the
53864   // EFLAGS result doesn't change.
53865   return combineAddOrSubToADCOrSBB(IsSub, DL, VT, LHS, RHS, DAG,
53866                                    /*ZeroSecondOpOnly*/ true);
53867 }
53868 
53869 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
53870   SDValue LHS = N->getOperand(0);
53871   SDValue RHS = N->getOperand(1);
53872   SDValue BorrowIn = N->getOperand(2);
53873 
53874   if (SDValue Flags = combineCarryThroughADD(BorrowIn, DAG)) {
53875     MVT VT = N->getSimpleValueType(0);
53876     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
53877     return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs, LHS, RHS, Flags);
53878   }
53879 
53880   // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
53881   // iff the flag result is dead.
53882   if (LHS.getOpcode() == ISD::SUB && isNullConstant(RHS) &&
53883       !N->hasAnyUseOfValue(1))
53884     return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), LHS.getOperand(0),
53885                        LHS.getOperand(1), BorrowIn);
53886 
53887   return SDValue();
53888 }
53889 
53890 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
53891 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
53892                           TargetLowering::DAGCombinerInfo &DCI) {
53893   SDValue LHS = N->getOperand(0);
53894   SDValue RHS = N->getOperand(1);
53895   SDValue CarryIn = N->getOperand(2);
53896   auto *LHSC = dyn_cast<ConstantSDNode>(LHS);
53897   auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
53898 
53899   // Canonicalize constant to RHS.
53900   if (LHSC && !RHSC)
53901     return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), RHS, LHS,
53902                        CarryIn);
53903 
53904   // If the LHS and RHS of the ADC node are zero, then it can't overflow and
53905   // the result is either zero or one (depending on the input carry bit).
53906   // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
53907   if (LHSC && RHSC && LHSC->isZero() && RHSC->isZero() &&
53908       // We don't have a good way to replace an EFLAGS use, so only do this when
53909       // dead right now.
53910       SDValue(N, 1).use_empty()) {
53911     SDLoc DL(N);
53912     EVT VT = N->getValueType(0);
53913     SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
53914     SDValue Res1 = DAG.getNode(
53915         ISD::AND, DL, VT,
53916         DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
53917                     DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), CarryIn),
53918         DAG.getConstant(1, DL, VT));
53919     return DCI.CombineTo(N, Res1, CarryOut);
53920   }
53921 
53922   // Fold ADC(C1,C2,Carry) -> ADC(0,C1+C2,Carry)
53923   // iff the flag result is dead.
53924   // TODO: Allow flag result if C1+C2 doesn't signed/unsigned overflow.
53925   if (LHSC && RHSC && !LHSC->isZero() && !N->hasAnyUseOfValue(1)) {
53926     SDLoc DL(N);
53927     APInt Sum = LHSC->getAPIntValue() + RHSC->getAPIntValue();
53928     return DAG.getNode(X86ISD::ADC, DL, N->getVTList(),
53929                        DAG.getConstant(0, DL, LHS.getValueType()),
53930                        DAG.getConstant(Sum, DL, LHS.getValueType()), CarryIn);
53931   }
53932 
53933   if (SDValue Flags = combineCarryThroughADD(CarryIn, DAG)) {
53934     MVT VT = N->getSimpleValueType(0);
53935     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
53936     return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs, LHS, RHS, Flags);
53937   }
53938 
53939   // Fold ADC(ADD(X,Y),0,Carry) -> ADC(X,Y,Carry)
53940   // iff the flag result is dead.
53941   if (LHS.getOpcode() == ISD::ADD && RHSC && RHSC->isZero() &&
53942       !N->hasAnyUseOfValue(1))
53943     return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), LHS.getOperand(0),
53944                        LHS.getOperand(1), CarryIn);
53945 
53946   return SDValue();
53947 }
53948 
53949 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
53950                             const SDLoc &DL, EVT VT,
53951                             const X86Subtarget &Subtarget) {
53952   // Example of pattern we try to detect:
53953   // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
53954   //(add (build_vector (extract_elt t, 0),
53955   //                   (extract_elt t, 2),
53956   //                   (extract_elt t, 4),
53957   //                   (extract_elt t, 6)),
53958   //     (build_vector (extract_elt t, 1),
53959   //                   (extract_elt t, 3),
53960   //                   (extract_elt t, 5),
53961   //                   (extract_elt t, 7)))
53962 
53963   if (!Subtarget.hasSSE2())
53964     return SDValue();
53965 
53966   if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
53967       Op1.getOpcode() != ISD::BUILD_VECTOR)
53968     return SDValue();
53969 
53970   if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
53971       VT.getVectorNumElements() < 4 ||
53972       !isPowerOf2_32(VT.getVectorNumElements()))
53973     return SDValue();
53974 
53975   // Check if one of Op0,Op1 is of the form:
53976   // (build_vector (extract_elt Mul, 0),
53977   //               (extract_elt Mul, 2),
53978   //               (extract_elt Mul, 4),
53979   //                   ...
53980   // the other is of the form:
53981   // (build_vector (extract_elt Mul, 1),
53982   //               (extract_elt Mul, 3),
53983   //               (extract_elt Mul, 5),
53984   //                   ...
53985   // and identify Mul.
53986   SDValue Mul;
53987   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
53988     SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
53989             Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
53990     // TODO: Be more tolerant to undefs.
53991     if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53992         Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53993         Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53994         Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
53995       return SDValue();
53996     auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
53997     auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
53998     auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
53999     auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
54000     if (!Const0L || !Const1L || !Const0H || !Const1H)
54001       return SDValue();
54002     unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
54003              Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
54004     // Commutativity of mul allows factors of a product to reorder.
54005     if (Idx0L > Idx1L)
54006       std::swap(Idx0L, Idx1L);
54007     if (Idx0H > Idx1H)
54008       std::swap(Idx0H, Idx1H);
54009     // Commutativity of add allows pairs of factors to reorder.
54010     if (Idx0L > Idx0H) {
54011       std::swap(Idx0L, Idx0H);
54012       std::swap(Idx1L, Idx1H);
54013     }
54014     if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
54015         Idx1H != 2 * i + 3)
54016       return SDValue();
54017     if (!Mul) {
54018       // First time an extract_elt's source vector is visited. Must be a MUL
54019       // with 2X number of vector elements than the BUILD_VECTOR.
54020       // Both extracts must be from same MUL.
54021       Mul = Op0L->getOperand(0);
54022       if (Mul->getOpcode() != ISD::MUL ||
54023           Mul.getValueType().getVectorNumElements() != 2 * e)
54024         return SDValue();
54025     }
54026     // Check that the extract is from the same MUL previously seen.
54027     if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
54028         Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
54029       return SDValue();
54030   }
54031 
54032   // Check if the Mul source can be safely shrunk.
54033   ShrinkMode Mode;
54034   if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) ||
54035       Mode == ShrinkMode::MULU16)
54036     return SDValue();
54037 
54038   EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
54039                                  VT.getVectorNumElements() * 2);
54040   SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(0));
54041   SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(1));
54042 
54043   auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
54044                          ArrayRef<SDValue> Ops) {
54045     EVT InVT = Ops[0].getValueType();
54046     assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
54047     EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
54048                                  InVT.getVectorNumElements() / 2);
54049     return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
54050   };
54051   return SplitOpsAndApply(DAG, Subtarget, DL, VT, { N0, N1 }, PMADDBuilder);
54052 }
54053 
54054 // Attempt to turn this pattern into PMADDWD.
54055 // (add (mul (sext (build_vector)), (sext (build_vector))),
54056 //      (mul (sext (build_vector)), (sext (build_vector)))
54057 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
54058                               const SDLoc &DL, EVT VT,
54059                               const X86Subtarget &Subtarget) {
54060   if (!Subtarget.hasSSE2())
54061     return SDValue();
54062 
54063   if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
54064     return SDValue();
54065 
54066   if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
54067       VT.getVectorNumElements() < 4 ||
54068       !isPowerOf2_32(VT.getVectorNumElements()))
54069     return SDValue();
54070 
54071   SDValue N00 = N0.getOperand(0);
54072   SDValue N01 = N0.getOperand(1);
54073   SDValue N10 = N1.getOperand(0);
54074   SDValue N11 = N1.getOperand(1);
54075 
54076   // All inputs need to be sign extends.
54077   // TODO: Support ZERO_EXTEND from known positive?
54078   if (N00.getOpcode() != ISD::SIGN_EXTEND ||
54079       N01.getOpcode() != ISD::SIGN_EXTEND ||
54080       N10.getOpcode() != ISD::SIGN_EXTEND ||
54081       N11.getOpcode() != ISD::SIGN_EXTEND)
54082     return SDValue();
54083 
54084   // Peek through the extends.
54085   N00 = N00.getOperand(0);
54086   N01 = N01.getOperand(0);
54087   N10 = N10.getOperand(0);
54088   N11 = N11.getOperand(0);
54089 
54090   // Must be extending from vXi16.
54091   EVT InVT = N00.getValueType();
54092   if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
54093       N10.getValueType() != InVT || N11.getValueType() != InVT)
54094     return SDValue();
54095 
54096   // All inputs should be build_vectors.
54097   if (N00.getOpcode() != ISD::BUILD_VECTOR ||
54098       N01.getOpcode() != ISD::BUILD_VECTOR ||
54099       N10.getOpcode() != ISD::BUILD_VECTOR ||
54100       N11.getOpcode() != ISD::BUILD_VECTOR)
54101     return SDValue();
54102 
54103   // For each element, we need to ensure we have an odd element from one vector
54104   // multiplied by the odd element of another vector and the even element from
54105   // one of the same vectors being multiplied by the even element from the
54106   // other vector. So we need to make sure for each element i, this operator
54107   // is being performed:
54108   //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
54109   SDValue In0, In1;
54110   for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
54111     SDValue N00Elt = N00.getOperand(i);
54112     SDValue N01Elt = N01.getOperand(i);
54113     SDValue N10Elt = N10.getOperand(i);
54114     SDValue N11Elt = N11.getOperand(i);
54115     // TODO: Be more tolerant to undefs.
54116     if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54117         N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54118         N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54119         N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
54120       return SDValue();
54121     auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
54122     auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
54123     auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
54124     auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
54125     if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
54126       return SDValue();
54127     unsigned IdxN00 = ConstN00Elt->getZExtValue();
54128     unsigned IdxN01 = ConstN01Elt->getZExtValue();
54129     unsigned IdxN10 = ConstN10Elt->getZExtValue();
54130     unsigned IdxN11 = ConstN11Elt->getZExtValue();
54131     // Add is commutative so indices can be reordered.
54132     if (IdxN00 > IdxN10) {
54133       std::swap(IdxN00, IdxN10);
54134       std::swap(IdxN01, IdxN11);
54135     }
54136     // N0 indices be the even element. N1 indices must be the next odd element.
54137     if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
54138         IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
54139       return SDValue();
54140     SDValue N00In = N00Elt.getOperand(0);
54141     SDValue N01In = N01Elt.getOperand(0);
54142     SDValue N10In = N10Elt.getOperand(0);
54143     SDValue N11In = N11Elt.getOperand(0);
54144 
54145     // First time we find an input capture it.
54146     if (!In0) {
54147       In0 = N00In;
54148       In1 = N01In;
54149 
54150       // The input vectors must be at least as wide as the output.
54151       // If they are larger than the output, we extract subvector below.
54152       if (In0.getValueSizeInBits() < VT.getSizeInBits() ||
54153           In1.getValueSizeInBits() < VT.getSizeInBits())
54154         return SDValue();
54155     }
54156     // Mul is commutative so the input vectors can be in any order.
54157     // Canonicalize to make the compares easier.
54158     if (In0 != N00In)
54159       std::swap(N00In, N01In);
54160     if (In0 != N10In)
54161       std::swap(N10In, N11In);
54162     if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
54163       return SDValue();
54164   }
54165 
54166   auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
54167                          ArrayRef<SDValue> Ops) {
54168     EVT OpVT = Ops[0].getValueType();
54169     assert(OpVT.getScalarType() == MVT::i16 &&
54170            "Unexpected scalar element type");
54171     assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
54172     EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
54173                                  OpVT.getVectorNumElements() / 2);
54174     return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
54175   };
54176 
54177   // If the output is narrower than an input, extract the low part of the input
54178   // vector.
54179   EVT OutVT16 = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
54180                                VT.getVectorNumElements() * 2);
54181   if (OutVT16.bitsLT(In0.getValueType())) {
54182     In0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In0,
54183                       DAG.getIntPtrConstant(0, DL));
54184   }
54185   if (OutVT16.bitsLT(In1.getValueType())) {
54186     In1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In1,
54187                       DAG.getIntPtrConstant(0, DL));
54188   }
54189   return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
54190                           PMADDBuilder);
54191 }
54192 
54193 // ADD(VPMADDWD(X,Y),VPMADDWD(Z,W)) -> VPMADDWD(SHUFFLE(X,Z), SHUFFLE(Y,W))
54194 // If upper element in each pair of both VPMADDWD are zero then we can merge
54195 // the operand elements and use the implicit add of VPMADDWD.
54196 // TODO: Add support for VPMADDUBSW (which isn't commutable).
54197 static SDValue combineAddOfPMADDWD(SelectionDAG &DAG, SDValue N0, SDValue N1,
54198                                    const SDLoc &DL, EVT VT) {
54199   if (N0.getOpcode() != N1.getOpcode() || N0.getOpcode() != X86ISD::VPMADDWD)
54200     return SDValue();
54201 
54202   // TODO: Add 256/512-bit support once VPMADDWD combines with shuffles.
54203   if (VT.getSizeInBits() > 128)
54204     return SDValue();
54205 
54206   unsigned NumElts = VT.getVectorNumElements();
54207   MVT OpVT = N0.getOperand(0).getSimpleValueType();
54208   APInt DemandedBits = APInt::getAllOnes(OpVT.getScalarSizeInBits());
54209   APInt DemandedHiElts = APInt::getSplat(2 * NumElts, APInt(2, 2));
54210 
54211   bool Op0HiZero =
54212       DAG.MaskedValueIsZero(N0.getOperand(0), DemandedBits, DemandedHiElts) ||
54213       DAG.MaskedValueIsZero(N0.getOperand(1), DemandedBits, DemandedHiElts);
54214   bool Op1HiZero =
54215       DAG.MaskedValueIsZero(N1.getOperand(0), DemandedBits, DemandedHiElts) ||
54216       DAG.MaskedValueIsZero(N1.getOperand(1), DemandedBits, DemandedHiElts);
54217 
54218   // TODO: Check for zero lower elements once we have actual codegen that
54219   // creates them.
54220   if (!Op0HiZero || !Op1HiZero)
54221     return SDValue();
54222 
54223   // Create a shuffle mask packing the lower elements from each VPMADDWD.
54224   SmallVector<int> Mask;
54225   for (int i = 0; i != (int)NumElts; ++i) {
54226     Mask.push_back(2 * i);
54227     Mask.push_back(2 * (i + NumElts));
54228   }
54229 
54230   SDValue LHS =
54231       DAG.getVectorShuffle(OpVT, DL, N0.getOperand(0), N1.getOperand(0), Mask);
54232   SDValue RHS =
54233       DAG.getVectorShuffle(OpVT, DL, N0.getOperand(1), N1.getOperand(1), Mask);
54234   return DAG.getNode(X86ISD::VPMADDWD, DL, VT, LHS, RHS);
54235 }
54236 
54237 /// CMOV of constants requires materializing constant operands in registers.
54238 /// Try to fold those constants into an 'add' instruction to reduce instruction
54239 /// count. We do this with CMOV rather the generic 'select' because there are
54240 /// earlier folds that may be used to turn select-of-constants into logic hacks.
54241 static SDValue pushAddIntoCmovOfConsts(SDNode *N, SelectionDAG &DAG,
54242                                        const X86Subtarget &Subtarget) {
54243   // If an operand is zero, add-of-0 gets simplified away, so that's clearly
54244   // better because we eliminate 1-2 instructions. This transform is still
54245   // an improvement without zero operands because we trade 2 move constants and
54246   // 1 add for 2 adds (LEA) as long as the constants can be represented as
54247   // immediate asm operands (fit in 32-bits).
54248   auto isSuitableCmov = [](SDValue V) {
54249     if (V.getOpcode() != X86ISD::CMOV || !V.hasOneUse())
54250       return false;
54251     if (!isa<ConstantSDNode>(V.getOperand(0)) ||
54252         !isa<ConstantSDNode>(V.getOperand(1)))
54253       return false;
54254     return isNullConstant(V.getOperand(0)) || isNullConstant(V.getOperand(1)) ||
54255            (V.getConstantOperandAPInt(0).isSignedIntN(32) &&
54256             V.getConstantOperandAPInt(1).isSignedIntN(32));
54257   };
54258 
54259   // Match an appropriate CMOV as the first operand of the add.
54260   SDValue Cmov = N->getOperand(0);
54261   SDValue OtherOp = N->getOperand(1);
54262   if (!isSuitableCmov(Cmov))
54263     std::swap(Cmov, OtherOp);
54264   if (!isSuitableCmov(Cmov))
54265     return SDValue();
54266 
54267   // Don't remove a load folding opportunity for the add. That would neutralize
54268   // any improvements from removing constant materializations.
54269   if (X86::mayFoldLoad(OtherOp, Subtarget))
54270     return SDValue();
54271 
54272   EVT VT = N->getValueType(0);
54273   SDLoc DL(N);
54274   SDValue FalseOp = Cmov.getOperand(0);
54275   SDValue TrueOp = Cmov.getOperand(1);
54276 
54277   // We will push the add through the select, but we can potentially do better
54278   // if we know there is another add in the sequence and this is pointer math.
54279   // In that case, we can absorb an add into the trailing memory op and avoid
54280   // a 3-operand LEA which is likely slower than a 2-operand LEA.
54281   // TODO: If target has "slow3OpsLEA", do this even without the trailing memop?
54282   if (OtherOp.getOpcode() == ISD::ADD && OtherOp.hasOneUse() &&
54283       !isa<ConstantSDNode>(OtherOp.getOperand(0)) &&
54284       all_of(N->uses(), [&](SDNode *Use) {
54285         auto *MemNode = dyn_cast<MemSDNode>(Use);
54286         return MemNode && MemNode->getBasePtr().getNode() == N;
54287       })) {
54288     // add (cmov C1, C2), add (X, Y) --> add (cmov (add X, C1), (add X, C2)), Y
54289     // TODO: We are arbitrarily choosing op0 as the 1st piece of the sum, but
54290     //       it is possible that choosing op1 might be better.
54291     SDValue X = OtherOp.getOperand(0), Y = OtherOp.getOperand(1);
54292     FalseOp = DAG.getNode(ISD::ADD, DL, VT, X, FalseOp);
54293     TrueOp = DAG.getNode(ISD::ADD, DL, VT, X, TrueOp);
54294     Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp,
54295                        Cmov.getOperand(2), Cmov.getOperand(3));
54296     return DAG.getNode(ISD::ADD, DL, VT, Cmov, Y);
54297   }
54298 
54299   // add (cmov C1, C2), OtherOp --> cmov (add OtherOp, C1), (add OtherOp, C2)
54300   FalseOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, FalseOp);
54301   TrueOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, TrueOp);
54302   return DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp, Cmov.getOperand(2),
54303                      Cmov.getOperand(3));
54304 }
54305 
54306 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
54307                           TargetLowering::DAGCombinerInfo &DCI,
54308                           const X86Subtarget &Subtarget) {
54309   EVT VT = N->getValueType(0);
54310   SDValue Op0 = N->getOperand(0);
54311   SDValue Op1 = N->getOperand(1);
54312   SDLoc DL(N);
54313 
54314   if (SDValue Select = pushAddIntoCmovOfConsts(N, DAG, Subtarget))
54315     return Select;
54316 
54317   if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, DL, VT, Subtarget))
54318     return MAdd;
54319   if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, DL, VT, Subtarget))
54320     return MAdd;
54321   if (SDValue MAdd = combineAddOfPMADDWD(DAG, Op0, Op1, DL, VT))
54322     return MAdd;
54323 
54324   // Try to synthesize horizontal adds from adds of shuffles.
54325   if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
54326     return V;
54327 
54328   // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
54329   // (sub Y, (sext (vXi1 X))).
54330   // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
54331   // generic DAG combine without a legal type check, but adding this there
54332   // caused regressions.
54333   if (VT.isVector()) {
54334     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54335     if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
54336         Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
54337         TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
54338       SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
54339       return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
54340     }
54341 
54342     if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
54343         Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
54344         TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
54345       SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
54346       return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
54347     }
54348   }
54349 
54350   // Fold ADD(ADC(Y,0,W),X) -> ADC(X,Y,W)
54351   if (Op0.getOpcode() == X86ISD::ADC && Op0->hasOneUse() &&
54352       X86::isZeroNode(Op0.getOperand(1))) {
54353     assert(!Op0->hasAnyUseOfValue(1) && "Overflow bit in use");
54354     return DAG.getNode(X86ISD::ADC, SDLoc(Op0), Op0->getVTList(), Op1,
54355                        Op0.getOperand(0), Op0.getOperand(2));
54356   }
54357 
54358   return combineAddOrSubToADCOrSBB(N, DAG);
54359 }
54360 
54361 // Try to fold (sub Y, cmovns X, -X) -> (add Y, cmovns -X, X) if the cmov
54362 // condition comes from the subtract node that produced -X. This matches the
54363 // cmov expansion for absolute value. By swapping the operands we convert abs
54364 // to nabs.
54365 static SDValue combineSubABS(SDNode *N, SelectionDAG &DAG) {
54366   SDValue N0 = N->getOperand(0);
54367   SDValue N1 = N->getOperand(1);
54368 
54369   if (N1.getOpcode() != X86ISD::CMOV || !N1.hasOneUse())
54370     return SDValue();
54371 
54372   X86::CondCode CC = (X86::CondCode)N1.getConstantOperandVal(2);
54373   if (CC != X86::COND_S && CC != X86::COND_NS)
54374     return SDValue();
54375 
54376   // Condition should come from a negate operation.
54377   SDValue Cond = N1.getOperand(3);
54378   if (Cond.getOpcode() != X86ISD::SUB || !isNullConstant(Cond.getOperand(0)))
54379     return SDValue();
54380   assert(Cond.getResNo() == 1 && "Unexpected result number");
54381 
54382   // Get the X and -X from the negate.
54383   SDValue NegX = Cond.getValue(0);
54384   SDValue X = Cond.getOperand(1);
54385 
54386   SDValue FalseOp = N1.getOperand(0);
54387   SDValue TrueOp = N1.getOperand(1);
54388 
54389   // Cmov operands should be X and NegX. Order doesn't matter.
54390   if (!(TrueOp == X && FalseOp == NegX) && !(TrueOp == NegX && FalseOp == X))
54391     return SDValue();
54392 
54393   // Build a new CMOV with the operands swapped.
54394   SDLoc DL(N);
54395   MVT VT = N->getSimpleValueType(0);
54396   SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, TrueOp, FalseOp,
54397                              N1.getOperand(2), Cond);
54398   // Convert sub to add.
54399   return DAG.getNode(ISD::ADD, DL, VT, N0, Cmov);
54400 }
54401 
54402 static SDValue combineSubSetcc(SDNode *N, SelectionDAG &DAG) {
54403   SDValue Op0 = N->getOperand(0);
54404   SDValue Op1 = N->getOperand(1);
54405 
54406   // (sub C (zero_extend (setcc)))
54407   // =>
54408   // (add (zero_extend (setcc inverted) C-1))   if C is a nonzero immediate
54409   // Don't disturb (sub 0 setcc), which is easily done with neg.
54410   EVT VT = N->getValueType(0);
54411   auto *Op0C = dyn_cast<ConstantSDNode>(Op0);
54412   if (Op1.getOpcode() == ISD::ZERO_EXTEND && Op1.hasOneUse() && Op0C &&
54413       !Op0C->isZero() && Op1.getOperand(0).getOpcode() == X86ISD::SETCC &&
54414       Op1.getOperand(0).hasOneUse()) {
54415     SDValue SetCC = Op1.getOperand(0);
54416     X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
54417     X86::CondCode NewCC = X86::GetOppositeBranchCondition(CC);
54418     APInt NewImm = Op0C->getAPIntValue() - 1;
54419     SDLoc DL(Op1);
54420     SDValue NewSetCC = getSETCC(NewCC, SetCC.getOperand(1), DL, DAG);
54421     NewSetCC = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, NewSetCC);
54422     return DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(VT, VT), NewSetCC,
54423                        DAG.getConstant(NewImm, DL, VT));
54424   }
54425 
54426   return SDValue();
54427 }
54428 
54429 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
54430                           TargetLowering::DAGCombinerInfo &DCI,
54431                           const X86Subtarget &Subtarget) {
54432   SDValue Op0 = N->getOperand(0);
54433   SDValue Op1 = N->getOperand(1);
54434 
54435   // TODO: Add NoOpaque handling to isConstantIntBuildVectorOrConstantInt.
54436   auto IsNonOpaqueConstant = [&](SDValue Op) {
54437     if (SDNode *C = DAG.isConstantIntBuildVectorOrConstantInt(Op)) {
54438       if (auto *Cst = dyn_cast<ConstantSDNode>(C))
54439         return !Cst->isOpaque();
54440       return true;
54441     }
54442     return false;
54443   };
54444 
54445   // X86 can't encode an immediate LHS of a sub. See if we can push the
54446   // negation into a preceding instruction. If the RHS of the sub is a XOR with
54447   // one use and a constant, invert the immediate, saving one register.
54448   // However, ignore cases where C1 is 0, as those will become a NEG.
54449   // sub(C1, xor(X, C2)) -> add(xor(X, ~C2), C1+1)
54450   if (Op1.getOpcode() == ISD::XOR && IsNonOpaqueConstant(Op0) &&
54451       !isNullConstant(Op0) && IsNonOpaqueConstant(Op1.getOperand(1)) &&
54452       Op1->hasOneUse()) {
54453     SDLoc DL(N);
54454     EVT VT = Op0.getValueType();
54455     SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, Op1.getOperand(0),
54456                                  DAG.getNOT(SDLoc(Op1), Op1.getOperand(1), VT));
54457     SDValue NewAdd =
54458         DAG.getNode(ISD::ADD, DL, VT, Op0, DAG.getConstant(1, DL, VT));
54459     return DAG.getNode(ISD::ADD, DL, VT, NewXor, NewAdd);
54460   }
54461 
54462   if (SDValue V = combineSubABS(N, DAG))
54463     return V;
54464 
54465   // Try to synthesize horizontal subs from subs of shuffles.
54466   if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
54467     return V;
54468 
54469   // Fold SUB(X,ADC(Y,0,W)) -> SBB(X,Y,W)
54470   if (Op1.getOpcode() == X86ISD::ADC && Op1->hasOneUse() &&
54471       X86::isZeroNode(Op1.getOperand(1))) {
54472     assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
54473     return DAG.getNode(X86ISD::SBB, SDLoc(Op1), Op1->getVTList(), Op0,
54474                        Op1.getOperand(0), Op1.getOperand(2));
54475   }
54476 
54477   // Fold SUB(X,SBB(Y,Z,W)) -> SUB(ADC(X,Z,W),Y)
54478   // Don't fold to ADC(0,0,W)/SETCC_CARRY pattern which will prevent more folds.
54479   if (Op1.getOpcode() == X86ISD::SBB && Op1->hasOneUse() &&
54480       !(X86::isZeroNode(Op0) && X86::isZeroNode(Op1.getOperand(1)))) {
54481     assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
54482     SDValue ADC = DAG.getNode(X86ISD::ADC, SDLoc(Op1), Op1->getVTList(), Op0,
54483                               Op1.getOperand(1), Op1.getOperand(2));
54484     return DAG.getNode(ISD::SUB, SDLoc(N), Op0.getValueType(), ADC.getValue(0),
54485                        Op1.getOperand(0));
54486   }
54487 
54488   if (SDValue V = combineXorSubCTLZ(N, DAG, Subtarget))
54489     return V;
54490 
54491   if (SDValue V = combineAddOrSubToADCOrSBB(N, DAG))
54492     return V;
54493 
54494   return combineSubSetcc(N, DAG);
54495 }
54496 
54497 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
54498                                     const X86Subtarget &Subtarget) {
54499   MVT VT = N->getSimpleValueType(0);
54500   SDLoc DL(N);
54501 
54502   if (N->getOperand(0) == N->getOperand(1)) {
54503     if (N->getOpcode() == X86ISD::PCMPEQ)
54504       return DAG.getConstant(-1, DL, VT);
54505     if (N->getOpcode() == X86ISD::PCMPGT)
54506       return DAG.getConstant(0, DL, VT);
54507   }
54508 
54509   return SDValue();
54510 }
54511 
54512 /// Helper that combines an array of subvector ops as if they were the operands
54513 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
54514 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
54515 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
54516                                       ArrayRef<SDValue> Ops, SelectionDAG &DAG,
54517                                       TargetLowering::DAGCombinerInfo &DCI,
54518                                       const X86Subtarget &Subtarget) {
54519   assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
54520   unsigned EltSizeInBits = VT.getScalarSizeInBits();
54521 
54522   if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
54523     return DAG.getUNDEF(VT);
54524 
54525   if (llvm::all_of(Ops, [](SDValue Op) {
54526         return ISD::isBuildVectorAllZeros(Op.getNode());
54527       }))
54528     return getZeroVector(VT, Subtarget, DAG, DL);
54529 
54530   SDValue Op0 = Ops[0];
54531   bool IsSplat = llvm::all_equal(Ops);
54532   unsigned NumOps = Ops.size();
54533   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54534   LLVMContext &Ctx = *DAG.getContext();
54535 
54536   // Repeated subvectors.
54537   if (IsSplat &&
54538       (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.hasAVX512()))) {
54539     // If this broadcast is inserted into both halves, use a larger broadcast.
54540     if (Op0.getOpcode() == X86ISD::VBROADCAST)
54541       return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
54542 
54543     // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
54544     if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
54545         (Subtarget.hasAVX2() ||
54546          X86::mayFoldLoadIntoBroadcastFromMem(Op0.getOperand(0),
54547                                               VT.getScalarType(), Subtarget)))
54548       return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
54549                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
54550                                      Op0.getOperand(0),
54551                                      DAG.getIntPtrConstant(0, DL)));
54552 
54553     // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
54554     if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
54555         (Subtarget.hasAVX2() ||
54556          (EltSizeInBits >= 32 &&
54557           X86::mayFoldLoad(Op0.getOperand(0), Subtarget))) &&
54558         Op0.getOperand(0).getValueType() == VT.getScalarType())
54559       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
54560 
54561     // concat_vectors(extract_subvector(broadcast(x)),
54562     //                extract_subvector(broadcast(x))) -> broadcast(x)
54563     // concat_vectors(extract_subvector(subv_broadcast(x)),
54564     //                extract_subvector(subv_broadcast(x))) -> subv_broadcast(x)
54565     if (Op0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
54566         Op0.getOperand(0).getValueType() == VT) {
54567       SDValue SrcVec = Op0.getOperand(0);
54568       if (SrcVec.getOpcode() == X86ISD::VBROADCAST ||
54569           SrcVec.getOpcode() == X86ISD::VBROADCAST_LOAD)
54570         return Op0.getOperand(0);
54571       if (SrcVec.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
54572           Op0.getValueType() == cast<MemSDNode>(SrcVec)->getMemoryVT())
54573         return Op0.getOperand(0);
54574     }
54575   }
54576 
54577   // concat(extract_subvector(v0,c0), extract_subvector(v1,c1)) -> vperm2x128.
54578   // Only concat of subvector high halves which vperm2x128 is best at.
54579   // TODO: This should go in combineX86ShufflesRecursively eventually.
54580   if (VT.is256BitVector() && NumOps == 2) {
54581     SDValue Src0 = peekThroughBitcasts(Ops[0]);
54582     SDValue Src1 = peekThroughBitcasts(Ops[1]);
54583     if (Src0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
54584         Src1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
54585       EVT SrcVT0 = Src0.getOperand(0).getValueType();
54586       EVT SrcVT1 = Src1.getOperand(0).getValueType();
54587       unsigned NumSrcElts0 = SrcVT0.getVectorNumElements();
54588       unsigned NumSrcElts1 = SrcVT1.getVectorNumElements();
54589       if (SrcVT0.is256BitVector() && SrcVT1.is256BitVector() &&
54590           Src0.getConstantOperandAPInt(1) == (NumSrcElts0 / 2) &&
54591           Src1.getConstantOperandAPInt(1) == (NumSrcElts1 / 2)) {
54592         return DAG.getNode(X86ISD::VPERM2X128, DL, VT,
54593                            DAG.getBitcast(VT, Src0.getOperand(0)),
54594                            DAG.getBitcast(VT, Src1.getOperand(0)),
54595                            DAG.getTargetConstant(0x31, DL, MVT::i8));
54596       }
54597     }
54598   }
54599 
54600   // Repeated opcode.
54601   // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
54602   // but it currently struggles with different vector widths.
54603   if (llvm::all_of(Ops, [Op0](SDValue Op) {
54604         return Op.getOpcode() == Op0.getOpcode() && Op.hasOneUse();
54605       })) {
54606     auto ConcatSubOperand = [&](EVT VT, ArrayRef<SDValue> SubOps, unsigned I) {
54607       SmallVector<SDValue> Subs;
54608       for (SDValue SubOp : SubOps)
54609         Subs.push_back(SubOp.getOperand(I));
54610       return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
54611     };
54612     auto IsConcatFree = [](MVT VT, ArrayRef<SDValue> SubOps, unsigned Op) {
54613       bool AllConstants = true;
54614       bool AllSubVectors = true;
54615       for (unsigned I = 0, E = SubOps.size(); I != E; ++I) {
54616         SDValue Sub = SubOps[I].getOperand(Op);
54617         unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
54618         SDValue BC = peekThroughBitcasts(Sub);
54619         AllConstants &= ISD::isBuildVectorOfConstantSDNodes(BC.getNode()) ||
54620                         ISD::isBuildVectorOfConstantFPSDNodes(BC.getNode());
54621         AllSubVectors &= Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
54622                          Sub.getOperand(0).getValueType() == VT &&
54623                          Sub.getConstantOperandAPInt(1) == (I * NumSubElts);
54624       }
54625       return AllConstants || AllSubVectors;
54626     };
54627 
54628     switch (Op0.getOpcode()) {
54629     case X86ISD::VBROADCAST: {
54630       if (!IsSplat && llvm::all_of(Ops, [](SDValue Op) {
54631             return Op.getOperand(0).getValueType().is128BitVector();
54632           })) {
54633         if (VT == MVT::v4f64 || VT == MVT::v4i64)
54634           return DAG.getNode(X86ISD::UNPCKL, DL, VT,
54635                              ConcatSubOperand(VT, Ops, 0),
54636                              ConcatSubOperand(VT, Ops, 0));
54637         // TODO: Add pseudo v8i32 PSHUFD handling to AVX1Only targets.
54638         if (VT == MVT::v8f32 || (VT == MVT::v8i32 && Subtarget.hasInt256()))
54639           return DAG.getNode(VT == MVT::v8f32 ? X86ISD::VPERMILPI
54640                                               : X86ISD::PSHUFD,
54641                              DL, VT, ConcatSubOperand(VT, Ops, 0),
54642                              getV4X86ShuffleImm8ForMask({0, 0, 0, 0}, DL, DAG));
54643       }
54644       break;
54645     }
54646     case X86ISD::MOVDDUP:
54647     case X86ISD::MOVSHDUP:
54648     case X86ISD::MOVSLDUP: {
54649       if (!IsSplat)
54650         return DAG.getNode(Op0.getOpcode(), DL, VT,
54651                            ConcatSubOperand(VT, Ops, 0));
54652       break;
54653     }
54654     case X86ISD::SHUFP: {
54655       // Add SHUFPD support if/when necessary.
54656       if (!IsSplat && VT.getScalarType() == MVT::f32 &&
54657           llvm::all_of(Ops, [Op0](SDValue Op) {
54658             return Op.getOperand(2) == Op0.getOperand(2);
54659           })) {
54660         return DAG.getNode(Op0.getOpcode(), DL, VT,
54661                            ConcatSubOperand(VT, Ops, 0),
54662                            ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
54663       }
54664       break;
54665     }
54666     case X86ISD::UNPCKH:
54667     case X86ISD::UNPCKL: {
54668       // Don't concatenate build_vector patterns.
54669       if (!IsSplat && EltSizeInBits >= 32 &&
54670           ((VT.is256BitVector() && Subtarget.hasInt256()) ||
54671            (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
54672           none_of(Ops, [](SDValue Op) {
54673             return peekThroughBitcasts(Op.getOperand(0)).getOpcode() ==
54674                        ISD::SCALAR_TO_VECTOR ||
54675                    peekThroughBitcasts(Op.getOperand(1)).getOpcode() ==
54676                        ISD::SCALAR_TO_VECTOR;
54677           })) {
54678         return DAG.getNode(Op0.getOpcode(), DL, VT,
54679                            ConcatSubOperand(VT, Ops, 0),
54680                            ConcatSubOperand(VT, Ops, 1));
54681       }
54682       break;
54683     }
54684     case X86ISD::PSHUFHW:
54685     case X86ISD::PSHUFLW:
54686     case X86ISD::PSHUFD:
54687       if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
54688           Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
54689         return DAG.getNode(Op0.getOpcode(), DL, VT,
54690                            ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
54691       }
54692       [[fallthrough]];
54693     case X86ISD::VPERMILPI:
54694       if (!IsSplat && EltSizeInBits == 32 &&
54695           (VT.is256BitVector() ||
54696            (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
54697           all_of(Ops, [&Op0](SDValue Op) {
54698             return Op0.getOperand(1) == Op.getOperand(1);
54699           })) {
54700         MVT FloatVT = VT.changeVectorElementType(MVT::f32);
54701         SDValue Res = DAG.getBitcast(FloatVT, ConcatSubOperand(VT, Ops, 0));
54702         Res =
54703             DAG.getNode(X86ISD::VPERMILPI, DL, FloatVT, Res, Op0.getOperand(1));
54704         return DAG.getBitcast(VT, Res);
54705       }
54706       if (!IsSplat && NumOps == 2 && VT == MVT::v4f64) {
54707         uint64_t Idx0 = Ops[0].getConstantOperandVal(1);
54708         uint64_t Idx1 = Ops[1].getConstantOperandVal(1);
54709         uint64_t Idx = ((Idx1 & 3) << 2) | (Idx0 & 3);
54710         return DAG.getNode(Op0.getOpcode(), DL, VT,
54711                            ConcatSubOperand(VT, Ops, 0),
54712                            DAG.getTargetConstant(Idx, DL, MVT::i8));
54713       }
54714       break;
54715     case X86ISD::PSHUFB:
54716     case X86ISD::PSADBW:
54717       if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
54718                        (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
54719         MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
54720         SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
54721                                  NumOps * SrcVT.getVectorNumElements());
54722         return DAG.getNode(Op0.getOpcode(), DL, VT,
54723                            ConcatSubOperand(SrcVT, Ops, 0),
54724                            ConcatSubOperand(SrcVT, Ops, 1));
54725       }
54726       break;
54727     case X86ISD::VPERMV:
54728       if (!IsSplat && NumOps == 2 &&
54729           (VT.is512BitVector() && Subtarget.useAVX512Regs())) {
54730         MVT OpVT = Op0.getSimpleValueType();
54731         int NumSrcElts = OpVT.getVectorNumElements();
54732         SmallVector<int, 64> ConcatMask;
54733         for (unsigned i = 0; i != NumOps; ++i) {
54734           SmallVector<int, 64> SubMask;
54735           SmallVector<SDValue, 2> SubOps;
54736           if (!getTargetShuffleMask(Ops[i].getNode(), OpVT, false, SubOps,
54737                                     SubMask))
54738             break;
54739           for (int M : SubMask) {
54740             if (0 <= M)
54741               M += i * NumSrcElts;
54742             ConcatMask.push_back(M);
54743           }
54744         }
54745         if (ConcatMask.size() == (NumOps * NumSrcElts)) {
54746           SDValue Src = concatSubVectors(Ops[0].getOperand(1),
54747                                          Ops[1].getOperand(1), DAG, DL);
54748           MVT IntMaskSVT = MVT::getIntegerVT(EltSizeInBits);
54749           MVT IntMaskVT = MVT::getVectorVT(IntMaskSVT, NumOps * NumSrcElts);
54750           SDValue Mask = getConstVector(ConcatMask, IntMaskVT, DAG, DL, true);
54751           return DAG.getNode(X86ISD::VPERMV, DL, VT, Mask, Src);
54752         }
54753       }
54754       break;
54755     case X86ISD::VPERMV3:
54756       if (!IsSplat && NumOps == 2 && VT.is512BitVector()) {
54757         MVT OpVT = Op0.getSimpleValueType();
54758         int NumSrcElts = OpVT.getVectorNumElements();
54759         SmallVector<int, 64> ConcatMask;
54760         for (unsigned i = 0; i != NumOps; ++i) {
54761           SmallVector<int, 64> SubMask;
54762           SmallVector<SDValue, 2> SubOps;
54763           if (!getTargetShuffleMask(Ops[i].getNode(), OpVT, false, SubOps,
54764                                     SubMask))
54765             break;
54766           for (int M : SubMask) {
54767             if (0 <= M) {
54768               M += M < NumSrcElts ? 0 : NumSrcElts;
54769               M += i * NumSrcElts;
54770             }
54771             ConcatMask.push_back(M);
54772           }
54773         }
54774         if (ConcatMask.size() == (NumOps * NumSrcElts)) {
54775           SDValue Src0 = concatSubVectors(Ops[0].getOperand(0),
54776                                           Ops[1].getOperand(0), DAG, DL);
54777           SDValue Src1 = concatSubVectors(Ops[0].getOperand(2),
54778                                           Ops[1].getOperand(2), DAG, DL);
54779           MVT IntMaskSVT = MVT::getIntegerVT(EltSizeInBits);
54780           MVT IntMaskVT = MVT::getVectorVT(IntMaskSVT, NumOps * NumSrcElts);
54781           SDValue Mask = getConstVector(ConcatMask, IntMaskVT, DAG, DL, true);
54782           return DAG.getNode(X86ISD::VPERMV3, DL, VT, Src0, Mask, Src1);
54783         }
54784       }
54785       break;
54786     case X86ISD::VPERM2X128: {
54787       if (!IsSplat && VT.is512BitVector() && Subtarget.useAVX512Regs()) {
54788         assert(NumOps == 2 && "Bad concat_vectors operands");
54789         unsigned Imm0 = Ops[0].getConstantOperandVal(2);
54790         unsigned Imm1 = Ops[1].getConstantOperandVal(2);
54791         // TODO: Handle zero'd subvectors.
54792         if ((Imm0 & 0x88) == 0 && (Imm1 & 0x88) == 0) {
54793           int Mask[4] = {(int)(Imm0 & 0x03), (int)((Imm0 >> 4) & 0x3), (int)(Imm1 & 0x03),
54794                          (int)((Imm1 >> 4) & 0x3)};
54795           MVT ShuffleVT = VT.isFloatingPoint() ? MVT::v8f64 : MVT::v8i64;
54796           SDValue LHS = concatSubVectors(Ops[0].getOperand(0),
54797                                          Ops[0].getOperand(1), DAG, DL);
54798           SDValue RHS = concatSubVectors(Ops[1].getOperand(0),
54799                                          Ops[1].getOperand(1), DAG, DL);
54800           SDValue Res = DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
54801                                     DAG.getBitcast(ShuffleVT, LHS),
54802                                     DAG.getBitcast(ShuffleVT, RHS),
54803                                     getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
54804           return DAG.getBitcast(VT, Res);
54805         }
54806       }
54807       break;
54808     }
54809     case X86ISD::SHUF128: {
54810       if (!IsSplat && NumOps == 2 && VT.is512BitVector()) {
54811         unsigned Imm0 = Ops[0].getConstantOperandVal(2);
54812         unsigned Imm1 = Ops[1].getConstantOperandVal(2);
54813         unsigned Imm = ((Imm0 & 1) << 0) | ((Imm0 & 2) << 1) | 0x08 |
54814                        ((Imm1 & 1) << 4) | ((Imm1 & 2) << 5) | 0x80;
54815         SDValue LHS = concatSubVectors(Ops[0].getOperand(0),
54816                                        Ops[0].getOperand(1), DAG, DL);
54817         SDValue RHS = concatSubVectors(Ops[1].getOperand(0),
54818                                        Ops[1].getOperand(1), DAG, DL);
54819         return DAG.getNode(X86ISD::SHUF128, DL, VT, LHS, RHS,
54820                            DAG.getTargetConstant(Imm, DL, MVT::i8));
54821       }
54822       break;
54823     }
54824     case ISD::TRUNCATE:
54825       if (!IsSplat && NumOps == 2 && VT.is256BitVector()) {
54826         EVT SrcVT = Ops[0].getOperand(0).getValueType();
54827         if (SrcVT.is256BitVector() && SrcVT.isSimple() &&
54828             SrcVT == Ops[1].getOperand(0).getValueType() &&
54829             Subtarget.useAVX512Regs() &&
54830             Subtarget.getPreferVectorWidth() >= 512 &&
54831             (SrcVT.getScalarSizeInBits() > 16 || Subtarget.useBWIRegs())) {
54832           EVT NewSrcVT = SrcVT.getDoubleNumVectorElementsVT(Ctx);
54833           return DAG.getNode(ISD::TRUNCATE, DL, VT,
54834                              ConcatSubOperand(NewSrcVT, Ops, 0));
54835         }
54836       }
54837       break;
54838     case X86ISD::VSHLI:
54839     case X86ISD::VSRLI:
54840       // Special case: SHL/SRL AVX1 V4i64 by 32-bits can lower as a shuffle.
54841       // TODO: Move this to LowerShiftByScalarImmediate?
54842       if (VT == MVT::v4i64 && !Subtarget.hasInt256() &&
54843           llvm::all_of(Ops, [](SDValue Op) {
54844             return Op.getConstantOperandAPInt(1) == 32;
54845           })) {
54846         SDValue Res = DAG.getBitcast(MVT::v8i32, ConcatSubOperand(VT, Ops, 0));
54847         SDValue Zero = getZeroVector(MVT::v8i32, Subtarget, DAG, DL);
54848         if (Op0.getOpcode() == X86ISD::VSHLI) {
54849           Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
54850                                      {8, 0, 8, 2, 8, 4, 8, 6});
54851         } else {
54852           Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
54853                                      {1, 8, 3, 8, 5, 8, 7, 8});
54854         }
54855         return DAG.getBitcast(VT, Res);
54856       }
54857       [[fallthrough]];
54858     case X86ISD::VSRAI:
54859     case X86ISD::VSHL:
54860     case X86ISD::VSRL:
54861     case X86ISD::VSRA:
54862       if (((VT.is256BitVector() && Subtarget.hasInt256()) ||
54863            (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
54864             (EltSizeInBits >= 32 || Subtarget.useBWIRegs()))) &&
54865           llvm::all_of(Ops, [Op0](SDValue Op) {
54866             return Op0.getOperand(1) == Op.getOperand(1);
54867           })) {
54868         return DAG.getNode(Op0.getOpcode(), DL, VT,
54869                            ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
54870       }
54871       break;
54872     case X86ISD::VPERMI:
54873     case X86ISD::VROTLI:
54874     case X86ISD::VROTRI:
54875       if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
54876           llvm::all_of(Ops, [Op0](SDValue Op) {
54877             return Op0.getOperand(1) == Op.getOperand(1);
54878           })) {
54879         return DAG.getNode(Op0.getOpcode(), DL, VT,
54880                            ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
54881       }
54882       break;
54883     case ISD::AND:
54884     case ISD::OR:
54885     case ISD::XOR:
54886     case X86ISD::ANDNP:
54887       if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
54888                        (VT.is512BitVector() && Subtarget.useAVX512Regs()))) {
54889         return DAG.getNode(Op0.getOpcode(), DL, VT,
54890                            ConcatSubOperand(VT, Ops, 0),
54891                            ConcatSubOperand(VT, Ops, 1));
54892       }
54893       break;
54894     case X86ISD::PCMPEQ:
54895     case X86ISD::PCMPGT:
54896       if (!IsSplat && VT.is256BitVector() && Subtarget.hasInt256() &&
54897           (IsConcatFree(VT, Ops, 0) || IsConcatFree(VT, Ops, 1))) {
54898         return DAG.getNode(Op0.getOpcode(), DL, VT,
54899                            ConcatSubOperand(VT, Ops, 0),
54900                            ConcatSubOperand(VT, Ops, 1));
54901       }
54902       break;
54903     case ISD::CTPOP:
54904     case ISD::CTTZ:
54905     case ISD::CTLZ:
54906     case ISD::CTTZ_ZERO_UNDEF:
54907     case ISD::CTLZ_ZERO_UNDEF:
54908       if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
54909                        (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
54910         return DAG.getNode(Op0.getOpcode(), DL, VT,
54911                            ConcatSubOperand(VT, Ops, 0));
54912       }
54913       break;
54914     case X86ISD::GF2P8AFFINEQB:
54915       if (!IsSplat &&
54916           (VT.is256BitVector() ||
54917            (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
54918           llvm::all_of(Ops, [Op0](SDValue Op) {
54919             return Op0.getOperand(2) == Op.getOperand(2);
54920           })) {
54921         return DAG.getNode(Op0.getOpcode(), DL, VT,
54922                            ConcatSubOperand(VT, Ops, 0),
54923                            ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
54924       }
54925       break;
54926     case ISD::ADD:
54927     case ISD::SUB:
54928     case ISD::MUL:
54929       if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
54930                        (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
54931                         (EltSizeInBits >= 32 || Subtarget.useBWIRegs())))) {
54932         return DAG.getNode(Op0.getOpcode(), DL, VT,
54933                            ConcatSubOperand(VT, Ops, 0),
54934                            ConcatSubOperand(VT, Ops, 1));
54935       }
54936       break;
54937     // Due to VADD, VSUB, VMUL can executed on more ports than VINSERT and
54938     // their latency are short, so here we don't replace them.
54939     case ISD::FDIV:
54940       if (!IsSplat && (VT.is256BitVector() ||
54941                        (VT.is512BitVector() && Subtarget.useAVX512Regs()))) {
54942         return DAG.getNode(Op0.getOpcode(), DL, VT,
54943                            ConcatSubOperand(VT, Ops, 0),
54944                            ConcatSubOperand(VT, Ops, 1));
54945       }
54946       break;
54947     case X86ISD::HADD:
54948     case X86ISD::HSUB:
54949     case X86ISD::FHADD:
54950     case X86ISD::FHSUB:
54951       if (!IsSplat && VT.is256BitVector() &&
54952           (VT.isFloatingPoint() || Subtarget.hasInt256())) {
54953         return DAG.getNode(Op0.getOpcode(), DL, VT,
54954                            ConcatSubOperand(VT, Ops, 0),
54955                            ConcatSubOperand(VT, Ops, 1));
54956       }
54957       break;
54958     case X86ISD::PACKSS:
54959     case X86ISD::PACKUS:
54960       if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
54961                        (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
54962         MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
54963         SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
54964                                  NumOps * SrcVT.getVectorNumElements());
54965         return DAG.getNode(Op0.getOpcode(), DL, VT,
54966                            ConcatSubOperand(SrcVT, Ops, 0),
54967                            ConcatSubOperand(SrcVT, Ops, 1));
54968       }
54969       break;
54970     case X86ISD::PALIGNR:
54971       if (!IsSplat &&
54972           ((VT.is256BitVector() && Subtarget.hasInt256()) ||
54973            (VT.is512BitVector() && Subtarget.useBWIRegs())) &&
54974           llvm::all_of(Ops, [Op0](SDValue Op) {
54975             return Op0.getOperand(2) == Op.getOperand(2);
54976           })) {
54977         return DAG.getNode(Op0.getOpcode(), DL, VT,
54978                            ConcatSubOperand(VT, Ops, 0),
54979                            ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
54980       }
54981       break;
54982     case ISD::VSELECT:
54983       if (!IsSplat && Subtarget.hasAVX512() &&
54984           (VT.is256BitVector() ||
54985            (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
54986           (EltSizeInBits >= 32 || Subtarget.hasBWI())) {
54987         EVT SelVT = Ops[0].getOperand(0).getValueType();
54988         if (SelVT.getVectorElementType() == MVT::i1) {
54989           SelVT = EVT::getVectorVT(Ctx, MVT::i1,
54990                                    NumOps * SelVT.getVectorNumElements());
54991           if (TLI.isTypeLegal(SelVT))
54992             return DAG.getNode(Op0.getOpcode(), DL, VT,
54993                                ConcatSubOperand(SelVT.getSimpleVT(), Ops, 0),
54994                                ConcatSubOperand(VT, Ops, 1),
54995                                ConcatSubOperand(VT, Ops, 2));
54996         }
54997       }
54998       [[fallthrough]];
54999     case X86ISD::BLENDV:
55000       if (!IsSplat && VT.is256BitVector() && NumOps == 2 &&
55001           (EltSizeInBits >= 32 || Subtarget.hasInt256()) &&
55002           IsConcatFree(VT, Ops, 1) && IsConcatFree(VT, Ops, 2)) {
55003         EVT SelVT = Ops[0].getOperand(0).getValueType();
55004         SelVT = SelVT.getDoubleNumVectorElementsVT(Ctx);
55005         if (TLI.isTypeLegal(SelVT))
55006           return DAG.getNode(Op0.getOpcode(), DL, VT,
55007                              ConcatSubOperand(SelVT.getSimpleVT(), Ops, 0),
55008                              ConcatSubOperand(VT, Ops, 1),
55009                              ConcatSubOperand(VT, Ops, 2));
55010       }
55011       break;
55012     }
55013   }
55014 
55015   // Fold subvector loads into one.
55016   // If needed, look through bitcasts to get to the load.
55017   if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
55018     unsigned Fast;
55019     const X86TargetLowering *TLI = Subtarget.getTargetLowering();
55020     if (TLI->allowsMemoryAccess(Ctx, DAG.getDataLayout(), VT,
55021                                 *FirstLd->getMemOperand(), &Fast) &&
55022         Fast) {
55023       if (SDValue Ld =
55024               EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
55025         return Ld;
55026     }
55027   }
55028 
55029   // Attempt to fold target constant loads.
55030   if (all_of(Ops, [](SDValue Op) { return getTargetConstantFromNode(Op); })) {
55031     SmallVector<APInt> EltBits;
55032     APInt UndefElts = APInt::getZero(VT.getVectorNumElements());
55033     for (unsigned I = 0; I != NumOps; ++I) {
55034       APInt OpUndefElts;
55035       SmallVector<APInt> OpEltBits;
55036       if (!getTargetConstantBitsFromNode(Ops[I], EltSizeInBits, OpUndefElts,
55037                                          OpEltBits, true, false))
55038         break;
55039       EltBits.append(OpEltBits);
55040       UndefElts.insertBits(OpUndefElts, I * OpUndefElts.getBitWidth());
55041     }
55042     if (EltBits.size() == VT.getVectorNumElements()) {
55043       Constant *C = getConstantVector(VT, EltBits, UndefElts, Ctx);
55044       MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
55045       SDValue CV = DAG.getConstantPool(C, PVT);
55046       MachineFunction &MF = DAG.getMachineFunction();
55047       MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
55048       SDValue Ld = DAG.getLoad(VT, DL, DAG.getEntryNode(), CV, MPI);
55049       SDValue Sub = extractSubVector(Ld, 0, DAG, DL, Op0.getValueSizeInBits());
55050       DAG.ReplaceAllUsesOfValueWith(Op0, Sub);
55051       return Ld;
55052     }
55053   }
55054 
55055   // If this simple subvector or scalar/subvector broadcast_load is inserted
55056   // into both halves, use a larger broadcast_load. Update other uses to use
55057   // an extracted subvector.
55058   if (IsSplat &&
55059       (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.hasAVX512()))) {
55060     if (ISD::isNormalLoad(Op0.getNode()) ||
55061         Op0.getOpcode() == X86ISD::VBROADCAST_LOAD ||
55062         Op0.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
55063       auto *Mem = cast<MemSDNode>(Op0);
55064       unsigned Opc = Op0.getOpcode() == X86ISD::VBROADCAST_LOAD
55065                          ? X86ISD::VBROADCAST_LOAD
55066                          : X86ISD::SUBV_BROADCAST_LOAD;
55067       if (SDValue BcastLd =
55068               getBROADCAST_LOAD(Opc, DL, VT, Mem->getMemoryVT(), Mem, 0, DAG)) {
55069         SDValue BcastSrc =
55070             extractSubVector(BcastLd, 0, DAG, DL, Op0.getValueSizeInBits());
55071         DAG.ReplaceAllUsesOfValueWith(Op0, BcastSrc);
55072         return BcastLd;
55073       }
55074     }
55075   }
55076 
55077   // If we're splatting a 128-bit subvector to 512-bits, use SHUF128 directly.
55078   if (IsSplat && NumOps == 4 && VT.is512BitVector() &&
55079       Subtarget.useAVX512Regs()) {
55080     MVT ShuffleVT = VT.isFloatingPoint() ? MVT::v8f64 : MVT::v8i64;
55081     SDValue Res = widenSubVector(Op0, false, Subtarget, DAG, DL, 512);
55082     Res = DAG.getBitcast(ShuffleVT, Res);
55083     Res = DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT, Res, Res,
55084                       getV4X86ShuffleImm8ForMask({0, 0, 0, 0}, DL, DAG));
55085     return DAG.getBitcast(VT, Res);
55086   }
55087 
55088   return SDValue();
55089 }
55090 
55091 static SDValue combineCONCAT_VECTORS(SDNode *N, SelectionDAG &DAG,
55092                                      TargetLowering::DAGCombinerInfo &DCI,
55093                                      const X86Subtarget &Subtarget) {
55094   EVT VT = N->getValueType(0);
55095   EVT SrcVT = N->getOperand(0).getValueType();
55096   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55097   SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
55098 
55099   if (VT.getVectorElementType() == MVT::i1) {
55100     // Attempt to constant fold.
55101     unsigned SubSizeInBits = SrcVT.getSizeInBits();
55102     APInt Constant = APInt::getZero(VT.getSizeInBits());
55103     for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
55104       auto *C = dyn_cast<ConstantSDNode>(peekThroughBitcasts(Ops[I]));
55105       if (!C) break;
55106       Constant.insertBits(C->getAPIntValue(), I * SubSizeInBits);
55107       if (I == (E - 1)) {
55108         EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
55109         if (TLI.isTypeLegal(IntVT))
55110           return DAG.getBitcast(VT, DAG.getConstant(Constant, SDLoc(N), IntVT));
55111       }
55112     }
55113 
55114     // Don't do anything else for i1 vectors.
55115     return SDValue();
55116   }
55117 
55118   if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
55119     if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
55120                                            DCI, Subtarget))
55121       return R;
55122   }
55123 
55124   return SDValue();
55125 }
55126 
55127 static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
55128                                        TargetLowering::DAGCombinerInfo &DCI,
55129                                        const X86Subtarget &Subtarget) {
55130   if (DCI.isBeforeLegalizeOps())
55131     return SDValue();
55132 
55133   MVT OpVT = N->getSimpleValueType(0);
55134 
55135   bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
55136 
55137   SDLoc dl(N);
55138   SDValue Vec = N->getOperand(0);
55139   SDValue SubVec = N->getOperand(1);
55140 
55141   uint64_t IdxVal = N->getConstantOperandVal(2);
55142   MVT SubVecVT = SubVec.getSimpleValueType();
55143 
55144   if (Vec.isUndef() && SubVec.isUndef())
55145     return DAG.getUNDEF(OpVT);
55146 
55147   // Inserting undefs/zeros into zeros/undefs is a zero vector.
55148   if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
55149       (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
55150     return getZeroVector(OpVT, Subtarget, DAG, dl);
55151 
55152   if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
55153     // If we're inserting into a zero vector and then into a larger zero vector,
55154     // just insert into the larger zero vector directly.
55155     if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
55156         ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
55157       uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
55158       return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55159                          getZeroVector(OpVT, Subtarget, DAG, dl),
55160                          SubVec.getOperand(1),
55161                          DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
55162     }
55163 
55164     // If we're inserting into a zero vector and our input was extracted from an
55165     // insert into a zero vector of the same type and the extraction was at
55166     // least as large as the original insertion. Just insert the original
55167     // subvector into a zero vector.
55168     if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
55169         isNullConstant(SubVec.getOperand(1)) &&
55170         SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
55171       SDValue Ins = SubVec.getOperand(0);
55172       if (isNullConstant(Ins.getOperand(2)) &&
55173           ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
55174           Ins.getOperand(1).getValueSizeInBits().getFixedValue() <=
55175               SubVecVT.getFixedSizeInBits())
55176           return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55177                              getZeroVector(OpVT, Subtarget, DAG, dl),
55178                              Ins.getOperand(1), N->getOperand(2));
55179     }
55180   }
55181 
55182   // Stop here if this is an i1 vector.
55183   if (IsI1Vector)
55184     return SDValue();
55185 
55186   // Eliminate an intermediate vector widening:
55187   // insert_subvector X, (insert_subvector undef, Y, 0), Idx -->
55188   // insert_subvector X, Y, Idx
55189   // TODO: This is a more general version of a DAGCombiner fold, can we move it
55190   // there?
55191   if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
55192       SubVec.getOperand(0).isUndef() && isNullConstant(SubVec.getOperand(2)))
55193     return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Vec,
55194                        SubVec.getOperand(1), N->getOperand(2));
55195 
55196   // If this is an insert of an extract, combine to a shuffle. Don't do this
55197   // if the insert or extract can be represented with a subregister operation.
55198   if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
55199       SubVec.getOperand(0).getSimpleValueType() == OpVT &&
55200       (IdxVal != 0 ||
55201        !(Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
55202     int ExtIdxVal = SubVec.getConstantOperandVal(1);
55203     if (ExtIdxVal != 0) {
55204       int VecNumElts = OpVT.getVectorNumElements();
55205       int SubVecNumElts = SubVecVT.getVectorNumElements();
55206       SmallVector<int, 64> Mask(VecNumElts);
55207       // First create an identity shuffle mask.
55208       for (int i = 0; i != VecNumElts; ++i)
55209         Mask[i] = i;
55210       // Now insert the extracted portion.
55211       for (int i = 0; i != SubVecNumElts; ++i)
55212         Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
55213 
55214       return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
55215     }
55216   }
55217 
55218   // Match concat_vector style patterns.
55219   SmallVector<SDValue, 2> SubVectorOps;
55220   if (collectConcatOps(N, SubVectorOps, DAG)) {
55221     if (SDValue Fold =
55222             combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
55223       return Fold;
55224 
55225     // If we're inserting all zeros into the upper half, change this to
55226     // a concat with zero. We will match this to a move
55227     // with implicit upper bit zeroing during isel.
55228     // We do this here because we don't want combineConcatVectorOps to
55229     // create INSERT_SUBVECTOR from CONCAT_VECTORS.
55230     if (SubVectorOps.size() == 2 &&
55231         ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
55232       return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55233                          getZeroVector(OpVT, Subtarget, DAG, dl),
55234                          SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
55235 
55236     // Attempt to recursively combine to a shuffle.
55237     if (all_of(SubVectorOps, [](SDValue SubOp) {
55238           return isTargetShuffle(SubOp.getOpcode());
55239         })) {
55240       SDValue Op(N, 0);
55241       if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
55242         return Res;
55243     }
55244   }
55245 
55246   // If this is a broadcast insert into an upper undef, use a larger broadcast.
55247   if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
55248     return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
55249 
55250   // If this is a broadcast load inserted into an upper undef, use a larger
55251   // broadcast load.
55252   if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
55253       SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
55254     auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
55255     SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
55256     SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
55257     SDValue BcastLd =
55258         DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
55259                                 MemIntr->getMemoryVT(),
55260                                 MemIntr->getMemOperand());
55261     DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
55262     return BcastLd;
55263   }
55264 
55265   // If we're splatting the lower half subvector of a full vector load into the
55266   // upper half, attempt to create a subvector broadcast.
55267   if (IdxVal == (OpVT.getVectorNumElements() / 2) && SubVec.hasOneUse() &&
55268       Vec.getValueSizeInBits() == (2 * SubVec.getValueSizeInBits())) {
55269     auto *VecLd = dyn_cast<LoadSDNode>(Vec);
55270     auto *SubLd = dyn_cast<LoadSDNode>(SubVec);
55271     if (VecLd && SubLd &&
55272         DAG.areNonVolatileConsecutiveLoads(SubLd, VecLd,
55273                                            SubVec.getValueSizeInBits() / 8, 0))
55274       return getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, dl, OpVT, SubVecVT,
55275                                SubLd, 0, DAG);
55276   }
55277 
55278   return SDValue();
55279 }
55280 
55281 /// If we are extracting a subvector of a vector select and the select condition
55282 /// is composed of concatenated vectors, try to narrow the select width. This
55283 /// is a common pattern for AVX1 integer code because 256-bit selects may be
55284 /// legal, but there is almost no integer math/logic available for 256-bit.
55285 /// This function should only be called with legal types (otherwise, the calls
55286 /// to get simple value types will assert).
55287 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
55288   SDValue Sel = Ext->getOperand(0);
55289   if (Sel.getOpcode() != ISD::VSELECT ||
55290       !isFreeToSplitVector(Sel.getOperand(0).getNode(), DAG))
55291     return SDValue();
55292 
55293   // Note: We assume simple value types because this should only be called with
55294   //       legal operations/types.
55295   // TODO: This can be extended to handle extraction to 256-bits.
55296   MVT VT = Ext->getSimpleValueType(0);
55297   if (!VT.is128BitVector())
55298     return SDValue();
55299 
55300   MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
55301   if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
55302     return SDValue();
55303 
55304   MVT WideVT = Ext->getOperand(0).getSimpleValueType();
55305   MVT SelVT = Sel.getSimpleValueType();
55306   assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
55307          "Unexpected vector type with legal operations");
55308 
55309   unsigned SelElts = SelVT.getVectorNumElements();
55310   unsigned CastedElts = WideVT.getVectorNumElements();
55311   unsigned ExtIdx = Ext->getConstantOperandVal(1);
55312   if (SelElts % CastedElts == 0) {
55313     // The select has the same or more (narrower) elements than the extract
55314     // operand. The extraction index gets scaled by that factor.
55315     ExtIdx *= (SelElts / CastedElts);
55316   } else if (CastedElts % SelElts == 0) {
55317     // The select has less (wider) elements than the extract operand. Make sure
55318     // that the extraction index can be divided evenly.
55319     unsigned IndexDivisor = CastedElts / SelElts;
55320     if (ExtIdx % IndexDivisor != 0)
55321       return SDValue();
55322     ExtIdx /= IndexDivisor;
55323   } else {
55324     llvm_unreachable("Element count of simple vector types are not divisible?");
55325   }
55326 
55327   unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
55328   unsigned NarrowElts = SelElts / NarrowingFactor;
55329   MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
55330   SDLoc DL(Ext);
55331   SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
55332   SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
55333   SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
55334   SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
55335   return DAG.getBitcast(VT, NarrowSel);
55336 }
55337 
55338 static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
55339                                         TargetLowering::DAGCombinerInfo &DCI,
55340                                         const X86Subtarget &Subtarget) {
55341   // For AVX1 only, if we are extracting from a 256-bit and+not (which will
55342   // eventually get combined/lowered into ANDNP) with a concatenated operand,
55343   // split the 'and' into 128-bit ops to avoid the concatenate and extract.
55344   // We let generic combining take over from there to simplify the
55345   // insert/extract and 'not'.
55346   // This pattern emerges during AVX1 legalization. We handle it before lowering
55347   // to avoid complications like splitting constant vector loads.
55348 
55349   // Capture the original wide type in the likely case that we need to bitcast
55350   // back to this type.
55351   if (!N->getValueType(0).isSimple())
55352     return SDValue();
55353 
55354   MVT VT = N->getSimpleValueType(0);
55355   SDValue InVec = N->getOperand(0);
55356   unsigned IdxVal = N->getConstantOperandVal(1);
55357   SDValue InVecBC = peekThroughBitcasts(InVec);
55358   EVT InVecVT = InVec.getValueType();
55359   unsigned SizeInBits = VT.getSizeInBits();
55360   unsigned InSizeInBits = InVecVT.getSizeInBits();
55361   unsigned NumSubElts = VT.getVectorNumElements();
55362   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55363 
55364   if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
55365       TLI.isTypeLegal(InVecVT) &&
55366       InSizeInBits == 256 && InVecBC.getOpcode() == ISD::AND) {
55367     auto isConcatenatedNot = [](SDValue V) {
55368       V = peekThroughBitcasts(V);
55369       if (!isBitwiseNot(V))
55370         return false;
55371       SDValue NotOp = V->getOperand(0);
55372       return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
55373     };
55374     if (isConcatenatedNot(InVecBC.getOperand(0)) ||
55375         isConcatenatedNot(InVecBC.getOperand(1))) {
55376       // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
55377       SDValue Concat = splitVectorIntBinary(InVecBC, DAG);
55378       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
55379                          DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
55380     }
55381   }
55382 
55383   if (DCI.isBeforeLegalizeOps())
55384     return SDValue();
55385 
55386   if (SDValue V = narrowExtractedVectorSelect(N, DAG))
55387     return V;
55388 
55389   if (ISD::isBuildVectorAllZeros(InVec.getNode()))
55390     return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
55391 
55392   if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
55393     if (VT.getScalarType() == MVT::i1)
55394       return DAG.getConstant(1, SDLoc(N), VT);
55395     return getOnesVector(VT, DAG, SDLoc(N));
55396   }
55397 
55398   if (InVec.getOpcode() == ISD::BUILD_VECTOR)
55399     return DAG.getBuildVector(VT, SDLoc(N),
55400                               InVec->ops().slice(IdxVal, NumSubElts));
55401 
55402   // If we are extracting from an insert into a larger vector, replace with a
55403   // smaller insert if we don't access less than the original subvector. Don't
55404   // do this for i1 vectors.
55405   // TODO: Relax the matching indices requirement?
55406   if (VT.getVectorElementType() != MVT::i1 &&
55407       InVec.getOpcode() == ISD::INSERT_SUBVECTOR && InVec.hasOneUse() &&
55408       IdxVal == InVec.getConstantOperandVal(2) &&
55409       InVec.getOperand(1).getValueSizeInBits() <= SizeInBits) {
55410     SDLoc DL(N);
55411     SDValue NewExt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT,
55412                                  InVec.getOperand(0), N->getOperand(1));
55413     unsigned NewIdxVal = InVec.getConstantOperandVal(2) - IdxVal;
55414     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, NewExt,
55415                        InVec.getOperand(1),
55416                        DAG.getVectorIdxConstant(NewIdxVal, DL));
55417   }
55418 
55419   // If we're extracting an upper subvector from a broadcast we should just
55420   // extract the lowest subvector instead which should allow
55421   // SimplifyDemandedVectorElts do more simplifications.
55422   if (IdxVal != 0 && (InVec.getOpcode() == X86ISD::VBROADCAST ||
55423                       InVec.getOpcode() == X86ISD::VBROADCAST_LOAD ||
55424                       DAG.isSplatValue(InVec, /*AllowUndefs*/ false)))
55425     return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
55426 
55427   // If we're extracting a broadcasted subvector, just use the lowest subvector.
55428   if (IdxVal != 0 && InVec.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
55429       cast<MemIntrinsicSDNode>(InVec)->getMemoryVT() == VT)
55430     return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
55431 
55432   // Attempt to extract from the source of a shuffle vector.
55433   if ((InSizeInBits % SizeInBits) == 0 && (IdxVal % NumSubElts) == 0) {
55434     SmallVector<int, 32> ShuffleMask;
55435     SmallVector<int, 32> ScaledMask;
55436     SmallVector<SDValue, 2> ShuffleInputs;
55437     unsigned NumSubVecs = InSizeInBits / SizeInBits;
55438     // Decode the shuffle mask and scale it so its shuffling subvectors.
55439     if (getTargetShuffleInputs(InVecBC, ShuffleInputs, ShuffleMask, DAG) &&
55440         scaleShuffleElements(ShuffleMask, NumSubVecs, ScaledMask)) {
55441       unsigned SubVecIdx = IdxVal / NumSubElts;
55442       if (ScaledMask[SubVecIdx] == SM_SentinelUndef)
55443         return DAG.getUNDEF(VT);
55444       if (ScaledMask[SubVecIdx] == SM_SentinelZero)
55445         return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
55446       SDValue Src = ShuffleInputs[ScaledMask[SubVecIdx] / NumSubVecs];
55447       if (Src.getValueSizeInBits() == InSizeInBits) {
55448         unsigned SrcSubVecIdx = ScaledMask[SubVecIdx] % NumSubVecs;
55449         unsigned SrcEltIdx = SrcSubVecIdx * NumSubElts;
55450         return extractSubVector(DAG.getBitcast(InVecVT, Src), SrcEltIdx, DAG,
55451                                 SDLoc(N), SizeInBits);
55452       }
55453     }
55454   }
55455 
55456   // If we're extracting the lowest subvector and we're the only user,
55457   // we may be able to perform this with a smaller vector width.
55458   unsigned InOpcode = InVec.getOpcode();
55459   if (InVec.hasOneUse()) {
55460     if (IdxVal == 0 && VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
55461       // v2f64 CVTDQ2PD(v4i32).
55462       if (InOpcode == ISD::SINT_TO_FP &&
55463           InVec.getOperand(0).getValueType() == MVT::v4i32) {
55464         return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
55465       }
55466       // v2f64 CVTUDQ2PD(v4i32).
55467       if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() &&
55468           InVec.getOperand(0).getValueType() == MVT::v4i32) {
55469         return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
55470       }
55471       // v2f64 CVTPS2PD(v4f32).
55472       if (InOpcode == ISD::FP_EXTEND &&
55473           InVec.getOperand(0).getValueType() == MVT::v4f32) {
55474         return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
55475       }
55476     }
55477     if (IdxVal == 0 &&
55478         (ISD::isExtOpcode(InOpcode) || ISD::isExtVecInRegOpcode(InOpcode)) &&
55479         (SizeInBits == 128 || SizeInBits == 256) &&
55480         InVec.getOperand(0).getValueSizeInBits() >= SizeInBits) {
55481       SDLoc DL(N);
55482       SDValue Ext = InVec.getOperand(0);
55483       if (Ext.getValueSizeInBits() > SizeInBits)
55484         Ext = extractSubVector(Ext, 0, DAG, DL, SizeInBits);
55485       unsigned ExtOp = DAG.getOpcode_EXTEND_VECTOR_INREG(InOpcode);
55486       return DAG.getNode(ExtOp, DL, VT, Ext);
55487     }
55488     if (IdxVal == 0 && InOpcode == ISD::VSELECT &&
55489         InVec.getOperand(0).getValueType().is256BitVector() &&
55490         InVec.getOperand(1).getValueType().is256BitVector() &&
55491         InVec.getOperand(2).getValueType().is256BitVector()) {
55492       SDLoc DL(N);
55493       SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
55494       SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
55495       SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
55496       return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
55497     }
55498     if (IdxVal == 0 && InOpcode == ISD::TRUNCATE && Subtarget.hasVLX() &&
55499         (VT.is128BitVector() || VT.is256BitVector())) {
55500       SDLoc DL(N);
55501       SDValue InVecSrc = InVec.getOperand(0);
55502       unsigned Scale = InVecSrc.getValueSizeInBits() / InSizeInBits;
55503       SDValue Ext = extractSubVector(InVecSrc, 0, DAG, DL, Scale * SizeInBits);
55504       return DAG.getNode(InOpcode, DL, VT, Ext);
55505     }
55506     if (InOpcode == X86ISD::MOVDDUP &&
55507         (VT.is128BitVector() || VT.is256BitVector())) {
55508       SDLoc DL(N);
55509       SDValue Ext0 =
55510           extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
55511       return DAG.getNode(InOpcode, DL, VT, Ext0);
55512     }
55513   }
55514 
55515   // Always split vXi64 logical shifts where we're extracting the upper 32-bits
55516   // as this is very likely to fold into a shuffle/truncation.
55517   if ((InOpcode == X86ISD::VSHLI || InOpcode == X86ISD::VSRLI) &&
55518       InVecVT.getScalarSizeInBits() == 64 &&
55519       InVec.getConstantOperandAPInt(1) == 32) {
55520     SDLoc DL(N);
55521     SDValue Ext =
55522         extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
55523     return DAG.getNode(InOpcode, DL, VT, Ext, InVec.getOperand(1));
55524   }
55525 
55526   return SDValue();
55527 }
55528 
55529 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
55530   EVT VT = N->getValueType(0);
55531   SDValue Src = N->getOperand(0);
55532   SDLoc DL(N);
55533 
55534   // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
55535   // This occurs frequently in our masked scalar intrinsic code and our
55536   // floating point select lowering with AVX512.
55537   // TODO: SimplifyDemandedBits instead?
55538   if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse() &&
55539       isOneConstant(Src.getOperand(1)))
55540     return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Src.getOperand(0));
55541 
55542   // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
55543   if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
55544       Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
55545       Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
55546       isNullConstant(Src.getOperand(1)))
55547     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
55548                        Src.getOperand(1));
55549 
55550   // Reduce v2i64 to v4i32 if we don't need the upper bits or are known zero.
55551   // TODO: Move to DAGCombine/SimplifyDemandedBits?
55552   if ((VT == MVT::v2i64 || VT == MVT::v2f64) && Src.hasOneUse()) {
55553     auto IsExt64 = [&DAG](SDValue Op, bool IsZeroExt) {
55554       if (Op.getValueType() != MVT::i64)
55555         return SDValue();
55556       unsigned Opc = IsZeroExt ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND;
55557       if (Op.getOpcode() == Opc &&
55558           Op.getOperand(0).getScalarValueSizeInBits() <= 32)
55559         return Op.getOperand(0);
55560       unsigned Ext = IsZeroExt ? ISD::ZEXTLOAD : ISD::EXTLOAD;
55561       if (auto *Ld = dyn_cast<LoadSDNode>(Op))
55562         if (Ld->getExtensionType() == Ext &&
55563             Ld->getMemoryVT().getScalarSizeInBits() <= 32)
55564           return Op;
55565       if (IsZeroExt) {
55566         KnownBits Known = DAG.computeKnownBits(Op);
55567         if (!Known.isConstant() && Known.countMinLeadingZeros() >= 32)
55568           return Op;
55569       }
55570       return SDValue();
55571     };
55572 
55573     if (SDValue AnyExt = IsExt64(peekThroughOneUseBitcasts(Src), false))
55574       return DAG.getBitcast(
55575           VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
55576                           DAG.getAnyExtOrTrunc(AnyExt, DL, MVT::i32)));
55577 
55578     if (SDValue ZeroExt = IsExt64(peekThroughOneUseBitcasts(Src), true))
55579       return DAG.getBitcast(
55580           VT,
55581           DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v4i32,
55582                       DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
55583                                   DAG.getZExtOrTrunc(ZeroExt, DL, MVT::i32))));
55584   }
55585 
55586   // Combine (v2i64 (scalar_to_vector (i64 (bitconvert (mmx))))) to MOVQ2DQ.
55587   if (VT == MVT::v2i64 && Src.getOpcode() == ISD::BITCAST &&
55588       Src.getOperand(0).getValueType() == MVT::x86mmx)
55589     return DAG.getNode(X86ISD::MOVQ2DQ, DL, VT, Src.getOperand(0));
55590 
55591   // See if we're broadcasting the scalar value, in which case just reuse that.
55592   // Ensure the same SDValue from the SDNode use is being used.
55593   if (VT.getScalarType() == Src.getValueType())
55594     for (SDNode *User : Src->uses())
55595       if (User->getOpcode() == X86ISD::VBROADCAST &&
55596           Src == User->getOperand(0)) {
55597         unsigned SizeInBits = VT.getFixedSizeInBits();
55598         unsigned BroadcastSizeInBits =
55599             User->getValueSizeInBits(0).getFixedValue();
55600         if (BroadcastSizeInBits == SizeInBits)
55601           return SDValue(User, 0);
55602         if (BroadcastSizeInBits > SizeInBits)
55603           return extractSubVector(SDValue(User, 0), 0, DAG, DL, SizeInBits);
55604         // TODO: Handle BroadcastSizeInBits < SizeInBits when we have test
55605         // coverage.
55606       }
55607 
55608   return SDValue();
55609 }
55610 
55611 // Simplify PMULDQ and PMULUDQ operations.
55612 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
55613                              TargetLowering::DAGCombinerInfo &DCI,
55614                              const X86Subtarget &Subtarget) {
55615   SDValue LHS = N->getOperand(0);
55616   SDValue RHS = N->getOperand(1);
55617 
55618   // Canonicalize constant to RHS.
55619   if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
55620       !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
55621     return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
55622 
55623   // Multiply by zero.
55624   // Don't return RHS as it may contain UNDEFs.
55625   if (ISD::isBuildVectorAllZeros(RHS.getNode()))
55626     return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
55627 
55628   // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
55629   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55630   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(64), DCI))
55631     return SDValue(N, 0);
55632 
55633   // If the input is an extend_invec and the SimplifyDemandedBits call didn't
55634   // convert it to any_extend_invec, due to the LegalOperations check, do the
55635   // conversion directly to a vector shuffle manually. This exposes combine
55636   // opportunities missed by combineEXTEND_VECTOR_INREG not calling
55637   // combineX86ShufflesRecursively on SSE4.1 targets.
55638   // FIXME: This is basically a hack around several other issues related to
55639   // ANY_EXTEND_VECTOR_INREG.
55640   if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
55641       (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
55642        LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
55643       LHS.getOperand(0).getValueType() == MVT::v4i32) {
55644     SDLoc dl(N);
55645     LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
55646                                LHS.getOperand(0), { 0, -1, 1, -1 });
55647     LHS = DAG.getBitcast(MVT::v2i64, LHS);
55648     return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
55649   }
55650   if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
55651       (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
55652        RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
55653       RHS.getOperand(0).getValueType() == MVT::v4i32) {
55654     SDLoc dl(N);
55655     RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
55656                                RHS.getOperand(0), { 0, -1, 1, -1 });
55657     RHS = DAG.getBitcast(MVT::v2i64, RHS);
55658     return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
55659   }
55660 
55661   return SDValue();
55662 }
55663 
55664 // Simplify VPMADDUBSW/VPMADDWD operations.
55665 static SDValue combineVPMADD(SDNode *N, SelectionDAG &DAG,
55666                              TargetLowering::DAGCombinerInfo &DCI) {
55667   EVT VT = N->getValueType(0);
55668   SDValue LHS = N->getOperand(0);
55669   SDValue RHS = N->getOperand(1);
55670 
55671   // Multiply by zero.
55672   // Don't return LHS/RHS as it may contain UNDEFs.
55673   if (ISD::isBuildVectorAllZeros(LHS.getNode()) ||
55674       ISD::isBuildVectorAllZeros(RHS.getNode()))
55675     return DAG.getConstant(0, SDLoc(N), VT);
55676 
55677   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55678   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
55679   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
55680     return SDValue(N, 0);
55681 
55682   return SDValue();
55683 }
55684 
55685 static SDValue combineEXTEND_VECTOR_INREG(SDNode *N, SelectionDAG &DAG,
55686                                           TargetLowering::DAGCombinerInfo &DCI,
55687                                           const X86Subtarget &Subtarget) {
55688   EVT VT = N->getValueType(0);
55689   SDValue In = N->getOperand(0);
55690   unsigned Opcode = N->getOpcode();
55691   unsigned InOpcode = In.getOpcode();
55692   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55693   SDLoc DL(N);
55694 
55695   // Try to merge vector loads and extend_inreg to an extload.
55696   if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
55697       In.hasOneUse()) {
55698     auto *Ld = cast<LoadSDNode>(In);
55699     if (Ld->isSimple()) {
55700       MVT SVT = In.getSimpleValueType().getVectorElementType();
55701       ISD::LoadExtType Ext = Opcode == ISD::SIGN_EXTEND_VECTOR_INREG
55702                                  ? ISD::SEXTLOAD
55703                                  : ISD::ZEXTLOAD;
55704       EVT MemVT = VT.changeVectorElementType(SVT);
55705       if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
55706         SDValue Load = DAG.getExtLoad(
55707             Ext, DL, VT, Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
55708             MemVT, Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags());
55709         DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
55710         return Load;
55711       }
55712     }
55713   }
55714 
55715   // Fold EXTEND_VECTOR_INREG(EXTEND_VECTOR_INREG(X)) -> EXTEND_VECTOR_INREG(X).
55716   if (Opcode == InOpcode)
55717     return DAG.getNode(Opcode, DL, VT, In.getOperand(0));
55718 
55719   // Fold EXTEND_VECTOR_INREG(EXTRACT_SUBVECTOR(EXTEND(X),0))
55720   // -> EXTEND_VECTOR_INREG(X).
55721   // TODO: Handle non-zero subvector indices.
55722   if (InOpcode == ISD::EXTRACT_SUBVECTOR && In.getConstantOperandVal(1) == 0 &&
55723       In.getOperand(0).getOpcode() == DAG.getOpcode_EXTEND(Opcode) &&
55724       In.getOperand(0).getOperand(0).getValueSizeInBits() ==
55725           In.getValueSizeInBits())
55726     return DAG.getNode(Opcode, DL, VT, In.getOperand(0).getOperand(0));
55727 
55728   // Fold EXTEND_VECTOR_INREG(BUILD_VECTOR(X,Y,?,?)) -> BUILD_VECTOR(X,0,Y,0).
55729   // TODO: Move to DAGCombine?
55730   if (!DCI.isBeforeLegalizeOps() && Opcode == ISD::ZERO_EXTEND_VECTOR_INREG &&
55731       In.getOpcode() == ISD::BUILD_VECTOR && In.hasOneUse() &&
55732       In.getValueSizeInBits() == VT.getSizeInBits()) {
55733     unsigned NumElts = VT.getVectorNumElements();
55734     unsigned Scale = VT.getScalarSizeInBits() / In.getScalarValueSizeInBits();
55735     EVT EltVT = In.getOperand(0).getValueType();
55736     SmallVector<SDValue> Elts(Scale * NumElts, DAG.getConstant(0, DL, EltVT));
55737     for (unsigned I = 0; I != NumElts; ++I)
55738       Elts[I * Scale] = In.getOperand(I);
55739     return DAG.getBitcast(VT, DAG.getBuildVector(In.getValueType(), DL, Elts));
55740   }
55741 
55742   // Attempt to combine as a shuffle on SSE41+ targets.
55743   if (Subtarget.hasSSE41()) {
55744     SDValue Op(N, 0);
55745     if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
55746       if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
55747         return Res;
55748   }
55749 
55750   return SDValue();
55751 }
55752 
55753 static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
55754                              TargetLowering::DAGCombinerInfo &DCI) {
55755   EVT VT = N->getValueType(0);
55756 
55757   if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
55758     return DAG.getConstant(0, SDLoc(N), VT);
55759 
55760   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55761   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
55762   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
55763     return SDValue(N, 0);
55764 
55765   return SDValue();
55766 }
55767 
55768 // Optimize (fp16_to_fp (fp_to_fp16 X)) to VCVTPS2PH followed by VCVTPH2PS.
55769 // Done as a combine because the lowering for fp16_to_fp and fp_to_fp16 produce
55770 // extra instructions between the conversion due to going to scalar and back.
55771 static SDValue combineFP16_TO_FP(SDNode *N, SelectionDAG &DAG,
55772                                  const X86Subtarget &Subtarget) {
55773   if (Subtarget.useSoftFloat() || !Subtarget.hasF16C())
55774     return SDValue();
55775 
55776   if (N->getOperand(0).getOpcode() != ISD::FP_TO_FP16)
55777     return SDValue();
55778 
55779   if (N->getValueType(0) != MVT::f32 ||
55780       N->getOperand(0).getOperand(0).getValueType() != MVT::f32)
55781     return SDValue();
55782 
55783   SDLoc dl(N);
55784   SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32,
55785                             N->getOperand(0).getOperand(0));
55786   Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
55787                     DAG.getTargetConstant(4, dl, MVT::i32));
55788   Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
55789   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
55790                      DAG.getIntPtrConstant(0, dl));
55791 }
55792 
55793 static SDValue combineFP_EXTEND(SDNode *N, SelectionDAG &DAG,
55794                                 const X86Subtarget &Subtarget) {
55795   if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
55796     return SDValue();
55797 
55798   if (Subtarget.hasFP16())
55799     return SDValue();
55800 
55801   bool IsStrict = N->isStrictFPOpcode();
55802   EVT VT = N->getValueType(0);
55803   SDValue Src = N->getOperand(IsStrict ? 1 : 0);
55804   EVT SrcVT = Src.getValueType();
55805 
55806   if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::f16)
55807     return SDValue();
55808 
55809   if (VT.getVectorElementType() != MVT::f32 &&
55810       VT.getVectorElementType() != MVT::f64)
55811     return SDValue();
55812 
55813   unsigned NumElts = VT.getVectorNumElements();
55814   if (NumElts == 1 || !isPowerOf2_32(NumElts))
55815     return SDValue();
55816 
55817   SDLoc dl(N);
55818 
55819   // Convert the input to vXi16.
55820   EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
55821   Src = DAG.getBitcast(IntVT, Src);
55822 
55823   // Widen to at least 8 input elements.
55824   if (NumElts < 8) {
55825     unsigned NumConcats = 8 / NumElts;
55826     SDValue Fill = NumElts == 4 ? DAG.getUNDEF(IntVT)
55827                                 : DAG.getConstant(0, dl, IntVT);
55828     SmallVector<SDValue, 4> Ops(NumConcats, Fill);
55829     Ops[0] = Src;
55830     Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, Ops);
55831   }
55832 
55833   // Destination is vXf32 with at least 4 elements.
55834   EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32,
55835                                std::max(4U, NumElts));
55836   SDValue Cvt, Chain;
55837   if (IsStrict) {
55838     Cvt = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {CvtVT, MVT::Other},
55839                       {N->getOperand(0), Src});
55840     Chain = Cvt.getValue(1);
55841   } else {
55842     Cvt = DAG.getNode(X86ISD::CVTPH2PS, dl, CvtVT, Src);
55843   }
55844 
55845   if (NumElts < 4) {
55846     assert(NumElts == 2 && "Unexpected size");
55847     Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Cvt,
55848                       DAG.getIntPtrConstant(0, dl));
55849   }
55850 
55851   if (IsStrict) {
55852     // Extend to the original VT if necessary.
55853     if (Cvt.getValueType() != VT) {
55854       Cvt = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {VT, MVT::Other},
55855                         {Chain, Cvt});
55856       Chain = Cvt.getValue(1);
55857     }
55858     return DAG.getMergeValues({Cvt, Chain}, dl);
55859   }
55860 
55861   // Extend to the original VT if necessary.
55862   return DAG.getNode(ISD::FP_EXTEND, dl, VT, Cvt);
55863 }
55864 
55865 // Try to find a larger VBROADCAST_LOAD/SUBV_BROADCAST_LOAD that we can extract
55866 // from. Limit this to cases where the loads have the same input chain and the
55867 // output chains are unused. This avoids any memory ordering issues.
55868 static SDValue combineBROADCAST_LOAD(SDNode *N, SelectionDAG &DAG,
55869                                      TargetLowering::DAGCombinerInfo &DCI) {
55870   assert((N->getOpcode() == X86ISD::VBROADCAST_LOAD ||
55871           N->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) &&
55872          "Unknown broadcast load type");
55873 
55874   // Only do this if the chain result is unused.
55875   if (N->hasAnyUseOfValue(1))
55876     return SDValue();
55877 
55878   auto *MemIntrin = cast<MemIntrinsicSDNode>(N);
55879 
55880   SDValue Ptr = MemIntrin->getBasePtr();
55881   SDValue Chain = MemIntrin->getChain();
55882   EVT VT = N->getSimpleValueType(0);
55883   EVT MemVT = MemIntrin->getMemoryVT();
55884 
55885   // Look at other users of our base pointer and try to find a wider broadcast.
55886   // The input chain and the size of the memory VT must match.
55887   for (SDNode *User : Ptr->uses())
55888     if (User != N && User->getOpcode() == N->getOpcode() &&
55889         cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
55890         cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
55891         cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
55892             MemVT.getSizeInBits() &&
55893         !User->hasAnyUseOfValue(1) &&
55894         User->getValueSizeInBits(0).getFixedValue() > VT.getFixedSizeInBits()) {
55895       SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
55896                                          VT.getSizeInBits());
55897       Extract = DAG.getBitcast(VT, Extract);
55898       return DCI.CombineTo(N, Extract, SDValue(User, 1));
55899     }
55900 
55901   return SDValue();
55902 }
55903 
55904 static SDValue combineFP_ROUND(SDNode *N, SelectionDAG &DAG,
55905                                const X86Subtarget &Subtarget) {
55906   if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
55907     return SDValue();
55908 
55909   bool IsStrict = N->isStrictFPOpcode();
55910   EVT VT = N->getValueType(0);
55911   SDValue Src = N->getOperand(IsStrict ? 1 : 0);
55912   EVT SrcVT = Src.getValueType();
55913 
55914   if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
55915       SrcVT.getVectorElementType() != MVT::f32)
55916     return SDValue();
55917 
55918   SDLoc dl(N);
55919 
55920   SDValue Cvt, Chain;
55921   unsigned NumElts = VT.getVectorNumElements();
55922   if (Subtarget.hasFP16()) {
55923     // Combine (v8f16 fp_round(concat_vectors(v4f32 (xint_to_fp v4i64), ..)))
55924     // into (v8f16 vector_shuffle(v8f16 (CVTXI2P v4i64), ..))
55925     if (NumElts == 8 && Src.getOpcode() == ISD::CONCAT_VECTORS) {
55926       SDValue Cvt0, Cvt1;
55927       SDValue Op0 = Src.getOperand(0);
55928       SDValue Op1 = Src.getOperand(1);
55929       bool IsOp0Strict = Op0->isStrictFPOpcode();
55930       if (Op0.getOpcode() != Op1.getOpcode() ||
55931           Op0.getOperand(IsOp0Strict ? 1 : 0).getValueType() != MVT::v4i64 ||
55932           Op1.getOperand(IsOp0Strict ? 1 : 0).getValueType() != MVT::v4i64) {
55933         return SDValue();
55934       }
55935       int Mask[8] = {0, 1, 2, 3, 8, 9, 10, 11};
55936       if (IsStrict) {
55937         assert(IsOp0Strict && "Op0 must be strict node");
55938         unsigned Opc = Op0.getOpcode() == ISD::STRICT_SINT_TO_FP
55939                            ? X86ISD::STRICT_CVTSI2P
55940                            : X86ISD::STRICT_CVTUI2P;
55941         Cvt0 = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
55942                            {Op0.getOperand(0), Op0.getOperand(1)});
55943         Cvt1 = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
55944                            {Op1.getOperand(0), Op1.getOperand(1)});
55945         Cvt = DAG.getVectorShuffle(MVT::v8f16, dl, Cvt0, Cvt1, Mask);
55946         return DAG.getMergeValues({Cvt, Cvt0.getValue(1)}, dl);
55947       }
55948       unsigned Opc = Op0.getOpcode() == ISD::SINT_TO_FP ? X86ISD::CVTSI2P
55949                                                         : X86ISD::CVTUI2P;
55950       Cvt0 = DAG.getNode(Opc, dl, MVT::v8f16, Op0.getOperand(0));
55951       Cvt1 = DAG.getNode(Opc, dl, MVT::v8f16, Op1.getOperand(0));
55952       return Cvt = DAG.getVectorShuffle(MVT::v8f16, dl, Cvt0, Cvt1, Mask);
55953     }
55954     return SDValue();
55955   }
55956 
55957   if (NumElts == 1 || !isPowerOf2_32(NumElts))
55958     return SDValue();
55959 
55960   // Widen to at least 4 input elements.
55961   if (NumElts < 4)
55962     Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
55963                       DAG.getConstantFP(0.0, dl, SrcVT));
55964 
55965   // Destination is v8i16 with at least 8 elements.
55966   EVT CvtVT =
55967       EVT::getVectorVT(*DAG.getContext(), MVT::i16, std::max(8U, NumElts));
55968   SDValue Rnd = DAG.getTargetConstant(4, dl, MVT::i32);
55969   if (IsStrict) {
55970     Cvt = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {CvtVT, MVT::Other},
55971                       {N->getOperand(0), Src, Rnd});
55972     Chain = Cvt.getValue(1);
55973   } else {
55974     Cvt = DAG.getNode(X86ISD::CVTPS2PH, dl, CvtVT, Src, Rnd);
55975   }
55976 
55977   // Extract down to real number of elements.
55978   if (NumElts < 8) {
55979     EVT IntVT = VT.changeVectorElementTypeToInteger();
55980     Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, IntVT, Cvt,
55981                       DAG.getIntPtrConstant(0, dl));
55982   }
55983 
55984   Cvt = DAG.getBitcast(VT, Cvt);
55985 
55986   if (IsStrict)
55987     return DAG.getMergeValues({Cvt, Chain}, dl);
55988 
55989   return Cvt;
55990 }
55991 
55992 static SDValue combineMOVDQ2Q(SDNode *N, SelectionDAG &DAG) {
55993   SDValue Src = N->getOperand(0);
55994 
55995   // Turn MOVDQ2Q+simple_load into an mmx load.
55996   if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
55997     LoadSDNode *LN = cast<LoadSDNode>(Src.getNode());
55998 
55999     if (LN->isSimple()) {
56000       SDValue NewLd = DAG.getLoad(MVT::x86mmx, SDLoc(N), LN->getChain(),
56001                                   LN->getBasePtr(),
56002                                   LN->getPointerInfo(),
56003                                   LN->getOriginalAlign(),
56004                                   LN->getMemOperand()->getFlags());
56005       DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), NewLd.getValue(1));
56006       return NewLd;
56007     }
56008   }
56009 
56010   return SDValue();
56011 }
56012 
56013 static SDValue combinePDEP(SDNode *N, SelectionDAG &DAG,
56014                            TargetLowering::DAGCombinerInfo &DCI) {
56015   unsigned NumBits = N->getSimpleValueType(0).getSizeInBits();
56016   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56017   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBits), DCI))
56018     return SDValue(N, 0);
56019 
56020   return SDValue();
56021 }
56022 
56023 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
56024                                              DAGCombinerInfo &DCI) const {
56025   SelectionDAG &DAG = DCI.DAG;
56026   switch (N->getOpcode()) {
56027   default: break;
56028   case ISD::SCALAR_TO_VECTOR:
56029     return combineScalarToVector(N, DAG);
56030   case ISD::EXTRACT_VECTOR_ELT:
56031   case X86ISD::PEXTRW:
56032   case X86ISD::PEXTRB:
56033     return combineExtractVectorElt(N, DAG, DCI, Subtarget);
56034   case ISD::CONCAT_VECTORS:
56035     return combineCONCAT_VECTORS(N, DAG, DCI, Subtarget);
56036   case ISD::INSERT_SUBVECTOR:
56037     return combineINSERT_SUBVECTOR(N, DAG, DCI, Subtarget);
56038   case ISD::EXTRACT_SUBVECTOR:
56039     return combineEXTRACT_SUBVECTOR(N, DAG, DCI, Subtarget);
56040   case ISD::VSELECT:
56041   case ISD::SELECT:
56042   case X86ISD::BLENDV:      return combineSelect(N, DAG, DCI, Subtarget);
56043   case ISD::BITCAST:        return combineBitcast(N, DAG, DCI, Subtarget);
56044   case X86ISD::CMOV:        return combineCMov(N, DAG, DCI, Subtarget);
56045   case X86ISD::CMP:         return combineCMP(N, DAG, Subtarget);
56046   case ISD::ADD:            return combineAdd(N, DAG, DCI, Subtarget);
56047   case ISD::SUB:            return combineSub(N, DAG, DCI, Subtarget);
56048   case X86ISD::ADD:
56049   case X86ISD::SUB:         return combineX86AddSub(N, DAG, DCI);
56050   case X86ISD::SBB:         return combineSBB(N, DAG);
56051   case X86ISD::ADC:         return combineADC(N, DAG, DCI);
56052   case ISD::MUL:            return combineMul(N, DAG, DCI, Subtarget);
56053   case ISD::SHL:            return combineShiftLeft(N, DAG);
56054   case ISD::SRA:            return combineShiftRightArithmetic(N, DAG, Subtarget);
56055   case ISD::SRL:            return combineShiftRightLogical(N, DAG, DCI, Subtarget);
56056   case ISD::AND:            return combineAnd(N, DAG, DCI, Subtarget);
56057   case ISD::OR:             return combineOr(N, DAG, DCI, Subtarget);
56058   case ISD::XOR:            return combineXor(N, DAG, DCI, Subtarget);
56059   case X86ISD::BEXTR:
56060   case X86ISD::BEXTRI:      return combineBEXTR(N, DAG, DCI, Subtarget);
56061   case ISD::LOAD:           return combineLoad(N, DAG, DCI, Subtarget);
56062   case ISD::MLOAD:          return combineMaskedLoad(N, DAG, DCI, Subtarget);
56063   case ISD::STORE:          return combineStore(N, DAG, DCI, Subtarget);
56064   case ISD::MSTORE:         return combineMaskedStore(N, DAG, DCI, Subtarget);
56065   case X86ISD::VEXTRACT_STORE:
56066     return combineVEXTRACT_STORE(N, DAG, DCI, Subtarget);
56067   case ISD::SINT_TO_FP:
56068   case ISD::STRICT_SINT_TO_FP:
56069     return combineSIntToFP(N, DAG, DCI, Subtarget);
56070   case ISD::UINT_TO_FP:
56071   case ISD::STRICT_UINT_TO_FP:
56072     return combineUIntToFP(N, DAG, Subtarget);
56073   case ISD::FADD:
56074   case ISD::FSUB:           return combineFaddFsub(N, DAG, Subtarget);
56075   case X86ISD::VFCMULC:
56076   case X86ISD::VFMULC:      return combineFMulcFCMulc(N, DAG, Subtarget);
56077   case ISD::FNEG:           return combineFneg(N, DAG, DCI, Subtarget);
56078   case ISD::TRUNCATE:       return combineTruncate(N, DAG, Subtarget);
56079   case X86ISD::VTRUNC:      return combineVTRUNC(N, DAG, DCI);
56080   case X86ISD::ANDNP:       return combineAndnp(N, DAG, DCI, Subtarget);
56081   case X86ISD::FAND:        return combineFAnd(N, DAG, Subtarget);
56082   case X86ISD::FANDN:       return combineFAndn(N, DAG, Subtarget);
56083   case X86ISD::FXOR:
56084   case X86ISD::FOR:         return combineFOr(N, DAG, DCI, Subtarget);
56085   case X86ISD::FMIN:
56086   case X86ISD::FMAX:        return combineFMinFMax(N, DAG);
56087   case ISD::FMINNUM:
56088   case ISD::FMAXNUM:        return combineFMinNumFMaxNum(N, DAG, Subtarget);
56089   case X86ISD::CVTSI2P:
56090   case X86ISD::CVTUI2P:     return combineX86INT_TO_FP(N, DAG, DCI);
56091   case X86ISD::CVTP2SI:
56092   case X86ISD::CVTP2UI:
56093   case X86ISD::STRICT_CVTTP2SI:
56094   case X86ISD::CVTTP2SI:
56095   case X86ISD::STRICT_CVTTP2UI:
56096   case X86ISD::CVTTP2UI:
56097                             return combineCVTP2I_CVTTP2I(N, DAG, DCI);
56098   case X86ISD::STRICT_CVTPH2PS:
56099   case X86ISD::CVTPH2PS:    return combineCVTPH2PS(N, DAG, DCI);
56100   case X86ISD::BT:          return combineBT(N, DAG, DCI);
56101   case ISD::ANY_EXTEND:
56102   case ISD::ZERO_EXTEND:    return combineZext(N, DAG, DCI, Subtarget);
56103   case ISD::SIGN_EXTEND:    return combineSext(N, DAG, DCI, Subtarget);
56104   case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
56105   case ISD::ANY_EXTEND_VECTOR_INREG:
56106   case ISD::SIGN_EXTEND_VECTOR_INREG:
56107   case ISD::ZERO_EXTEND_VECTOR_INREG:
56108     return combineEXTEND_VECTOR_INREG(N, DAG, DCI, Subtarget);
56109   case ISD::SETCC:          return combineSetCC(N, DAG, DCI, Subtarget);
56110   case X86ISD::SETCC:       return combineX86SetCC(N, DAG, Subtarget);
56111   case X86ISD::BRCOND:      return combineBrCond(N, DAG, Subtarget);
56112   case X86ISD::PACKSS:
56113   case X86ISD::PACKUS:      return combineVectorPack(N, DAG, DCI, Subtarget);
56114   case X86ISD::HADD:
56115   case X86ISD::HSUB:
56116   case X86ISD::FHADD:
56117   case X86ISD::FHSUB:       return combineVectorHADDSUB(N, DAG, DCI, Subtarget);
56118   case X86ISD::VSHL:
56119   case X86ISD::VSRA:
56120   case X86ISD::VSRL:
56121     return combineVectorShiftVar(N, DAG, DCI, Subtarget);
56122   case X86ISD::VSHLI:
56123   case X86ISD::VSRAI:
56124   case X86ISD::VSRLI:
56125     return combineVectorShiftImm(N, DAG, DCI, Subtarget);
56126   case ISD::INSERT_VECTOR_ELT:
56127   case X86ISD::PINSRB:
56128   case X86ISD::PINSRW:      return combineVectorInsert(N, DAG, DCI, Subtarget);
56129   case X86ISD::SHUFP:       // Handle all target specific shuffles
56130   case X86ISD::INSERTPS:
56131   case X86ISD::EXTRQI:
56132   case X86ISD::INSERTQI:
56133   case X86ISD::VALIGN:
56134   case X86ISD::PALIGNR:
56135   case X86ISD::VSHLDQ:
56136   case X86ISD::VSRLDQ:
56137   case X86ISD::BLENDI:
56138   case X86ISD::UNPCKH:
56139   case X86ISD::UNPCKL:
56140   case X86ISD::MOVHLPS:
56141   case X86ISD::MOVLHPS:
56142   case X86ISD::PSHUFB:
56143   case X86ISD::PSHUFD:
56144   case X86ISD::PSHUFHW:
56145   case X86ISD::PSHUFLW:
56146   case X86ISD::MOVSHDUP:
56147   case X86ISD::MOVSLDUP:
56148   case X86ISD::MOVDDUP:
56149   case X86ISD::MOVSS:
56150   case X86ISD::MOVSD:
56151   case X86ISD::MOVSH:
56152   case X86ISD::VBROADCAST:
56153   case X86ISD::VPPERM:
56154   case X86ISD::VPERMI:
56155   case X86ISD::VPERMV:
56156   case X86ISD::VPERMV3:
56157   case X86ISD::VPERMIL2:
56158   case X86ISD::VPERMILPI:
56159   case X86ISD::VPERMILPV:
56160   case X86ISD::VPERM2X128:
56161   case X86ISD::SHUF128:
56162   case X86ISD::VZEXT_MOVL:
56163   case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
56164   case X86ISD::FMADD_RND:
56165   case X86ISD::FMSUB:
56166   case X86ISD::STRICT_FMSUB:
56167   case X86ISD::FMSUB_RND:
56168   case X86ISD::FNMADD:
56169   case X86ISD::STRICT_FNMADD:
56170   case X86ISD::FNMADD_RND:
56171   case X86ISD::FNMSUB:
56172   case X86ISD::STRICT_FNMSUB:
56173   case X86ISD::FNMSUB_RND:
56174   case ISD::FMA:
56175   case ISD::STRICT_FMA:     return combineFMA(N, DAG, DCI, Subtarget);
56176   case X86ISD::FMADDSUB_RND:
56177   case X86ISD::FMSUBADD_RND:
56178   case X86ISD::FMADDSUB:
56179   case X86ISD::FMSUBADD:    return combineFMADDSUB(N, DAG, DCI);
56180   case X86ISD::MOVMSK:      return combineMOVMSK(N, DAG, DCI, Subtarget);
56181   case X86ISD::TESTP:       return combineTESTP(N, DAG, DCI, Subtarget);
56182   case X86ISD::MGATHER:
56183   case X86ISD::MSCATTER:    return combineX86GatherScatter(N, DAG, DCI);
56184   case ISD::MGATHER:
56185   case ISD::MSCATTER:       return combineGatherScatter(N, DAG, DCI);
56186   case X86ISD::PCMPEQ:
56187   case X86ISD::PCMPGT:      return combineVectorCompare(N, DAG, Subtarget);
56188   case X86ISD::PMULDQ:
56189   case X86ISD::PMULUDQ:     return combinePMULDQ(N, DAG, DCI, Subtarget);
56190   case X86ISD::VPMADDUBSW:
56191   case X86ISD::VPMADDWD:    return combineVPMADD(N, DAG, DCI);
56192   case X86ISD::KSHIFTL:
56193   case X86ISD::KSHIFTR:     return combineKSHIFT(N, DAG, DCI);
56194   case ISD::FP16_TO_FP:     return combineFP16_TO_FP(N, DAG, Subtarget);
56195   case ISD::STRICT_FP_EXTEND:
56196   case ISD::FP_EXTEND:      return combineFP_EXTEND(N, DAG, Subtarget);
56197   case ISD::STRICT_FP_ROUND:
56198   case ISD::FP_ROUND:       return combineFP_ROUND(N, DAG, Subtarget);
56199   case X86ISD::VBROADCAST_LOAD:
56200   case X86ISD::SUBV_BROADCAST_LOAD: return combineBROADCAST_LOAD(N, DAG, DCI);
56201   case X86ISD::MOVDQ2Q:     return combineMOVDQ2Q(N, DAG);
56202   case X86ISD::PDEP:        return combinePDEP(N, DAG, DCI);
56203   }
56204 
56205   return SDValue();
56206 }
56207 
56208 bool X86TargetLowering::preferABDSToABSWithNSW(EVT VT) const {
56209   return false;
56210 }
56211 
56212 // Prefer (non-AVX512) vector TRUNCATE(SIGN_EXTEND_INREG(X)) to use of PACKSS.
56213 bool X86TargetLowering::preferSextInRegOfTruncate(EVT TruncVT, EVT VT,
56214                                                   EVT ExtVT) const {
56215   return Subtarget.hasAVX512() || !VT.isVector();
56216 }
56217 
56218 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
56219   if (!isTypeLegal(VT))
56220     return false;
56221 
56222   // There are no vXi8 shifts.
56223   if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
56224     return false;
56225 
56226   // TODO: Almost no 8-bit ops are desirable because they have no actual
56227   //       size/speed advantages vs. 32-bit ops, but they do have a major
56228   //       potential disadvantage by causing partial register stalls.
56229   //
56230   // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
56231   // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
56232   // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
56233   // check for a constant operand to the multiply.
56234   if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
56235     return false;
56236 
56237   // i16 instruction encodings are longer and some i16 instructions are slow,
56238   // so those are not desirable.
56239   if (VT == MVT::i16) {
56240     switch (Opc) {
56241     default:
56242       break;
56243     case ISD::LOAD:
56244     case ISD::SIGN_EXTEND:
56245     case ISD::ZERO_EXTEND:
56246     case ISD::ANY_EXTEND:
56247     case ISD::SHL:
56248     case ISD::SRA:
56249     case ISD::SRL:
56250     case ISD::SUB:
56251     case ISD::ADD:
56252     case ISD::MUL:
56253     case ISD::AND:
56254     case ISD::OR:
56255     case ISD::XOR:
56256       return false;
56257     }
56258   }
56259 
56260   // Any legal type not explicitly accounted for above here is desirable.
56261   return true;
56262 }
56263 
56264 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc &dl,
56265                                                   SDValue Value, SDValue Addr,
56266                                                   int JTI,
56267                                                   SelectionDAG &DAG) const {
56268   const Module *M = DAG.getMachineFunction().getMMI().getModule();
56269   Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
56270   if (IsCFProtectionSupported) {
56271     // In case control-flow branch protection is enabled, we need to add
56272     // notrack prefix to the indirect branch.
56273     // In order to do that we create NT_BRIND SDNode.
56274     // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
56275     SDValue JTInfo = DAG.getJumpTableDebugInfo(JTI, Value, dl);
56276     return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, JTInfo, Addr);
56277   }
56278 
56279   return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, JTI, DAG);
56280 }
56281 
56282 TargetLowering::AndOrSETCCFoldKind
56283 X86TargetLowering::isDesirableToCombineLogicOpOfSETCC(
56284     const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const {
56285   using AndOrSETCCFoldKind = TargetLowering::AndOrSETCCFoldKind;
56286   EVT VT = LogicOp->getValueType(0);
56287   EVT OpVT = SETCC0->getOperand(0).getValueType();
56288   if (!VT.isInteger())
56289     return AndOrSETCCFoldKind::None;
56290 
56291   if (VT.isVector())
56292     return AndOrSETCCFoldKind(AndOrSETCCFoldKind::NotAnd |
56293                               (isOperationLegal(ISD::ABS, OpVT)
56294                                    ? AndOrSETCCFoldKind::ABS
56295                                    : AndOrSETCCFoldKind::None));
56296 
56297   // Don't use `NotAnd` as even though `not` is generally shorter code size than
56298   // `add`, `add` can lower to LEA which can save moves / spills. Any case where
56299   // `NotAnd` applies, `AddAnd` does as well.
56300   // TODO: Currently we lower (icmp eq/ne (and ~X, Y), 0) -> `test (not X), Y`,
56301   // if we change that to `andn Y, X` it may be worth prefering `NotAnd` here.
56302   return AndOrSETCCFoldKind::AddAnd;
56303 }
56304 
56305 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
56306   EVT VT = Op.getValueType();
56307   bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
56308                              isa<ConstantSDNode>(Op.getOperand(1));
56309 
56310   // i16 is legal, but undesirable since i16 instruction encodings are longer
56311   // and some i16 instructions are slow.
56312   // 8-bit multiply-by-constant can usually be expanded to something cheaper
56313   // using LEA and/or other ALU ops.
56314   if (VT != MVT::i16 && !Is8BitMulByConstant)
56315     return false;
56316 
56317   auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
56318     if (!Op.hasOneUse())
56319       return false;
56320     SDNode *User = *Op->use_begin();
56321     if (!ISD::isNormalStore(User))
56322       return false;
56323     auto *Ld = cast<LoadSDNode>(Load);
56324     auto *St = cast<StoreSDNode>(User);
56325     return Ld->getBasePtr() == St->getBasePtr();
56326   };
56327 
56328   auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
56329     if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
56330       return false;
56331     if (!Op.hasOneUse())
56332       return false;
56333     SDNode *User = *Op->use_begin();
56334     if (User->getOpcode() != ISD::ATOMIC_STORE)
56335       return false;
56336     auto *Ld = cast<AtomicSDNode>(Load);
56337     auto *St = cast<AtomicSDNode>(User);
56338     return Ld->getBasePtr() == St->getBasePtr();
56339   };
56340 
56341   bool Commute = false;
56342   switch (Op.getOpcode()) {
56343   default: return false;
56344   case ISD::SIGN_EXTEND:
56345   case ISD::ZERO_EXTEND:
56346   case ISD::ANY_EXTEND:
56347     break;
56348   case ISD::SHL:
56349   case ISD::SRA:
56350   case ISD::SRL: {
56351     SDValue N0 = Op.getOperand(0);
56352     // Look out for (store (shl (load), x)).
56353     if (X86::mayFoldLoad(N0, Subtarget) && IsFoldableRMW(N0, Op))
56354       return false;
56355     break;
56356   }
56357   case ISD::ADD:
56358   case ISD::MUL:
56359   case ISD::AND:
56360   case ISD::OR:
56361   case ISD::XOR:
56362     Commute = true;
56363     [[fallthrough]];
56364   case ISD::SUB: {
56365     SDValue N0 = Op.getOperand(0);
56366     SDValue N1 = Op.getOperand(1);
56367     // Avoid disabling potential load folding opportunities.
56368     if (X86::mayFoldLoad(N1, Subtarget) &&
56369         (!Commute || !isa<ConstantSDNode>(N0) ||
56370          (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
56371       return false;
56372     if (X86::mayFoldLoad(N0, Subtarget) &&
56373         ((Commute && !isa<ConstantSDNode>(N1)) ||
56374          (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
56375       return false;
56376     if (IsFoldableAtomicRMW(N0, Op) ||
56377         (Commute && IsFoldableAtomicRMW(N1, Op)))
56378       return false;
56379   }
56380   }
56381 
56382   PVT = MVT::i32;
56383   return true;
56384 }
56385 
56386 //===----------------------------------------------------------------------===//
56387 //                           X86 Inline Assembly Support
56388 //===----------------------------------------------------------------------===//
56389 
56390 // Helper to match a string separated by whitespace.
56391 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
56392   S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
56393 
56394   for (StringRef Piece : Pieces) {
56395     if (!S.starts_with(Piece)) // Check if the piece matches.
56396       return false;
56397 
56398     S = S.substr(Piece.size());
56399     StringRef::size_type Pos = S.find_first_not_of(" \t");
56400     if (Pos == 0) // We matched a prefix.
56401       return false;
56402 
56403     S = S.substr(Pos);
56404   }
56405 
56406   return S.empty();
56407 }
56408 
56409 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
56410 
56411   if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
56412     if (llvm::is_contained(AsmPieces, "~{cc}") &&
56413         llvm::is_contained(AsmPieces, "~{flags}") &&
56414         llvm::is_contained(AsmPieces, "~{fpsr}")) {
56415 
56416       if (AsmPieces.size() == 3)
56417         return true;
56418       else if (llvm::is_contained(AsmPieces, "~{dirflag}"))
56419         return true;
56420     }
56421   }
56422   return false;
56423 }
56424 
56425 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
56426   InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
56427 
56428   const std::string &AsmStr = IA->getAsmString();
56429 
56430   IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
56431   if (!Ty || Ty->getBitWidth() % 16 != 0)
56432     return false;
56433 
56434   // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
56435   SmallVector<StringRef, 4> AsmPieces;
56436   SplitString(AsmStr, AsmPieces, ";\n");
56437 
56438   switch (AsmPieces.size()) {
56439   default: return false;
56440   case 1:
56441     // FIXME: this should verify that we are targeting a 486 or better.  If not,
56442     // we will turn this bswap into something that will be lowered to logical
56443     // ops instead of emitting the bswap asm.  For now, we don't support 486 or
56444     // lower so don't worry about this.
56445     // bswap $0
56446     if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
56447         matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
56448         matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
56449         matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
56450         matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
56451         matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
56452       // No need to check constraints, nothing other than the equivalent of
56453       // "=r,0" would be valid here.
56454       return IntrinsicLowering::LowerToByteSwap(CI);
56455     }
56456 
56457     // rorw $$8, ${0:w}  -->  llvm.bswap.i16
56458     if (CI->getType()->isIntegerTy(16) &&
56459         IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
56460         (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
56461          matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
56462       AsmPieces.clear();
56463       StringRef ConstraintsStr = IA->getConstraintString();
56464       SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
56465       array_pod_sort(AsmPieces.begin(), AsmPieces.end());
56466       if (clobbersFlagRegisters(AsmPieces))
56467         return IntrinsicLowering::LowerToByteSwap(CI);
56468     }
56469     break;
56470   case 3:
56471     if (CI->getType()->isIntegerTy(32) &&
56472         IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
56473         matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
56474         matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
56475         matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
56476       AsmPieces.clear();
56477       StringRef ConstraintsStr = IA->getConstraintString();
56478       SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
56479       array_pod_sort(AsmPieces.begin(), AsmPieces.end());
56480       if (clobbersFlagRegisters(AsmPieces))
56481         return IntrinsicLowering::LowerToByteSwap(CI);
56482     }
56483 
56484     if (CI->getType()->isIntegerTy(64)) {
56485       InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
56486       if (Constraints.size() >= 2 &&
56487           Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
56488           Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
56489         // bswap %eax / bswap %edx / xchgl %eax, %edx  -> llvm.bswap.i64
56490         if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
56491             matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
56492             matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
56493           return IntrinsicLowering::LowerToByteSwap(CI);
56494       }
56495     }
56496     break;
56497   }
56498   return false;
56499 }
56500 
56501 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
56502   X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
56503                            .Case("{@cca}", X86::COND_A)
56504                            .Case("{@ccae}", X86::COND_AE)
56505                            .Case("{@ccb}", X86::COND_B)
56506                            .Case("{@ccbe}", X86::COND_BE)
56507                            .Case("{@ccc}", X86::COND_B)
56508                            .Case("{@cce}", X86::COND_E)
56509                            .Case("{@ccz}", X86::COND_E)
56510                            .Case("{@ccg}", X86::COND_G)
56511                            .Case("{@ccge}", X86::COND_GE)
56512                            .Case("{@ccl}", X86::COND_L)
56513                            .Case("{@ccle}", X86::COND_LE)
56514                            .Case("{@ccna}", X86::COND_BE)
56515                            .Case("{@ccnae}", X86::COND_B)
56516                            .Case("{@ccnb}", X86::COND_AE)
56517                            .Case("{@ccnbe}", X86::COND_A)
56518                            .Case("{@ccnc}", X86::COND_AE)
56519                            .Case("{@ccne}", X86::COND_NE)
56520                            .Case("{@ccnz}", X86::COND_NE)
56521                            .Case("{@ccng}", X86::COND_LE)
56522                            .Case("{@ccnge}", X86::COND_L)
56523                            .Case("{@ccnl}", X86::COND_GE)
56524                            .Case("{@ccnle}", X86::COND_G)
56525                            .Case("{@ccno}", X86::COND_NO)
56526                            .Case("{@ccnp}", X86::COND_NP)
56527                            .Case("{@ccns}", X86::COND_NS)
56528                            .Case("{@cco}", X86::COND_O)
56529                            .Case("{@ccp}", X86::COND_P)
56530                            .Case("{@ccs}", X86::COND_S)
56531                            .Default(X86::COND_INVALID);
56532   return Cond;
56533 }
56534 
56535 /// Given a constraint letter, return the type of constraint for this target.
56536 X86TargetLowering::ConstraintType
56537 X86TargetLowering::getConstraintType(StringRef Constraint) const {
56538   if (Constraint.size() == 1) {
56539     switch (Constraint[0]) {
56540     case 'R':
56541     case 'q':
56542     case 'Q':
56543     case 'f':
56544     case 't':
56545     case 'u':
56546     case 'y':
56547     case 'x':
56548     case 'v':
56549     case 'l':
56550     case 'k': // AVX512 masking registers.
56551       return C_RegisterClass;
56552     case 'a':
56553     case 'b':
56554     case 'c':
56555     case 'd':
56556     case 'S':
56557     case 'D':
56558     case 'A':
56559       return C_Register;
56560     case 'I':
56561     case 'J':
56562     case 'K':
56563     case 'N':
56564     case 'G':
56565     case 'L':
56566     case 'M':
56567       return C_Immediate;
56568     case 'C':
56569     case 'e':
56570     case 'Z':
56571       return C_Other;
56572     default:
56573       break;
56574     }
56575   }
56576   else if (Constraint.size() == 2) {
56577     switch (Constraint[0]) {
56578     default:
56579       break;
56580     case 'Y':
56581       switch (Constraint[1]) {
56582       default:
56583         break;
56584       case 'z':
56585         return C_Register;
56586       case 'i':
56587       case 'm':
56588       case 'k':
56589       case 't':
56590       case '2':
56591         return C_RegisterClass;
56592       }
56593     }
56594   } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
56595     return C_Other;
56596   return TargetLowering::getConstraintType(Constraint);
56597 }
56598 
56599 /// Examine constraint type and operand type and determine a weight value.
56600 /// This object must already have been set up with the operand type
56601 /// and the current alternative constraint selected.
56602 TargetLowering::ConstraintWeight
56603 X86TargetLowering::getSingleConstraintMatchWeight(
56604     AsmOperandInfo &Info, const char *Constraint) const {
56605   ConstraintWeight Wt = CW_Invalid;
56606   Value *CallOperandVal = Info.CallOperandVal;
56607   // If we don't have a value, we can't do a match,
56608   // but allow it at the lowest weight.
56609   if (!CallOperandVal)
56610     return CW_Default;
56611   Type *Ty = CallOperandVal->getType();
56612   // Look at the constraint type.
56613   switch (*Constraint) {
56614   default:
56615     Wt = TargetLowering::getSingleConstraintMatchWeight(Info, Constraint);
56616     [[fallthrough]];
56617   case 'R':
56618   case 'q':
56619   case 'Q':
56620   case 'a':
56621   case 'b':
56622   case 'c':
56623   case 'd':
56624   case 'S':
56625   case 'D':
56626   case 'A':
56627     if (CallOperandVal->getType()->isIntegerTy())
56628       Wt = CW_SpecificReg;
56629     break;
56630   case 'f':
56631   case 't':
56632   case 'u':
56633     if (Ty->isFloatingPointTy())
56634       Wt = CW_SpecificReg;
56635     break;
56636   case 'y':
56637     if (Ty->isX86_MMXTy() && Subtarget.hasMMX())
56638       Wt = CW_SpecificReg;
56639     break;
56640   case 'Y':
56641     if (StringRef(Constraint).size() != 2)
56642       break;
56643     switch (Constraint[1]) {
56644     default:
56645       return CW_Invalid;
56646     // XMM0
56647     case 'z':
56648       if (((Ty->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
56649           ((Ty->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()) ||
56650           ((Ty->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512()))
56651         return CW_SpecificReg;
56652       return CW_Invalid;
56653     // Conditional OpMask regs (AVX512)
56654     case 'k':
56655       if ((Ty->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
56656         return CW_Register;
56657       return CW_Invalid;
56658     // Any MMX reg
56659     case 'm':
56660       if (Ty->isX86_MMXTy() && Subtarget.hasMMX())
56661         return Wt;
56662       return CW_Invalid;
56663     // Any SSE reg when ISA >= SSE2, same as 'x'
56664     case 'i':
56665     case 't':
56666     case '2':
56667       if (!Subtarget.hasSSE2())
56668         return CW_Invalid;
56669       break;
56670     }
56671     break;
56672   case 'v':
56673     if ((Ty->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
56674       Wt = CW_Register;
56675     [[fallthrough]];
56676   case 'x':
56677     if (((Ty->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
56678         ((Ty->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
56679       Wt = CW_Register;
56680     break;
56681   case 'k':
56682     // Enable conditional vector operations using %k<#> registers.
56683     if ((Ty->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
56684       Wt = CW_Register;
56685     break;
56686   case 'I':
56687     if (auto *C = dyn_cast<ConstantInt>(Info.CallOperandVal))
56688       if (C->getZExtValue() <= 31)
56689         Wt = CW_Constant;
56690     break;
56691   case 'J':
56692     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56693       if (C->getZExtValue() <= 63)
56694         Wt = CW_Constant;
56695     break;
56696   case 'K':
56697     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56698       if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
56699         Wt = CW_Constant;
56700     break;
56701   case 'L':
56702     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56703       if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
56704         Wt = CW_Constant;
56705     break;
56706   case 'M':
56707     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56708       if (C->getZExtValue() <= 3)
56709         Wt = CW_Constant;
56710     break;
56711   case 'N':
56712     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56713       if (C->getZExtValue() <= 0xff)
56714         Wt = CW_Constant;
56715     break;
56716   case 'G':
56717   case 'C':
56718     if (isa<ConstantFP>(CallOperandVal))
56719       Wt = CW_Constant;
56720     break;
56721   case 'e':
56722     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56723       if ((C->getSExtValue() >= -0x80000000LL) &&
56724           (C->getSExtValue() <= 0x7fffffffLL))
56725         Wt = CW_Constant;
56726     break;
56727   case 'Z':
56728     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56729       if (C->getZExtValue() <= 0xffffffff)
56730         Wt = CW_Constant;
56731     break;
56732   }
56733   return Wt;
56734 }
56735 
56736 /// Try to replace an X constraint, which matches anything, with another that
56737 /// has more specific requirements based on the type of the corresponding
56738 /// operand.
56739 const char *X86TargetLowering::
56740 LowerXConstraint(EVT ConstraintVT) const {
56741   // FP X constraints get lowered to SSE1/2 registers if available, otherwise
56742   // 'f' like normal targets.
56743   if (ConstraintVT.isFloatingPoint()) {
56744     if (Subtarget.hasSSE1())
56745       return "x";
56746   }
56747 
56748   return TargetLowering::LowerXConstraint(ConstraintVT);
56749 }
56750 
56751 // Lower @cc targets via setcc.
56752 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
56753     SDValue &Chain, SDValue &Glue, const SDLoc &DL,
56754     const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
56755   X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
56756   if (Cond == X86::COND_INVALID)
56757     return SDValue();
56758   // Check that return type is valid.
56759   if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
56760       OpInfo.ConstraintVT.getSizeInBits() < 8)
56761     report_fatal_error("Glue output operand is of invalid type");
56762 
56763   // Get EFLAGS register. Only update chain when copyfrom is glued.
56764   if (Glue.getNode()) {
56765     Glue = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Glue);
56766     Chain = Glue.getValue(1);
56767   } else
56768     Glue = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
56769   // Extract CC code.
56770   SDValue CC = getSETCC(Cond, Glue, DL, DAG);
56771   // Extend to 32-bits
56772   SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
56773 
56774   return Result;
56775 }
56776 
56777 /// Lower the specified operand into the Ops vector.
56778 /// If it is invalid, don't add anything to Ops.
56779 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
56780                                                      StringRef Constraint,
56781                                                      std::vector<SDValue> &Ops,
56782                                                      SelectionDAG &DAG) const {
56783   SDValue Result;
56784 
56785   // Only support length 1 constraints for now.
56786   if (Constraint.size() > 1)
56787     return;
56788 
56789   char ConstraintLetter = Constraint[0];
56790   switch (ConstraintLetter) {
56791   default: break;
56792   case 'I':
56793     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56794       if (C->getZExtValue() <= 31) {
56795         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56796                                        Op.getValueType());
56797         break;
56798       }
56799     }
56800     return;
56801   case 'J':
56802     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56803       if (C->getZExtValue() <= 63) {
56804         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56805                                        Op.getValueType());
56806         break;
56807       }
56808     }
56809     return;
56810   case 'K':
56811     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56812       if (isInt<8>(C->getSExtValue())) {
56813         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56814                                        Op.getValueType());
56815         break;
56816       }
56817     }
56818     return;
56819   case 'L':
56820     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56821       if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
56822           (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
56823         Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
56824                                        Op.getValueType());
56825         break;
56826       }
56827     }
56828     return;
56829   case 'M':
56830     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56831       if (C->getZExtValue() <= 3) {
56832         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56833                                        Op.getValueType());
56834         break;
56835       }
56836     }
56837     return;
56838   case 'N':
56839     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56840       if (C->getZExtValue() <= 255) {
56841         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56842                                        Op.getValueType());
56843         break;
56844       }
56845     }
56846     return;
56847   case 'O':
56848     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56849       if (C->getZExtValue() <= 127) {
56850         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56851                                        Op.getValueType());
56852         break;
56853       }
56854     }
56855     return;
56856   case 'e': {
56857     // 32-bit signed value
56858     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56859       if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
56860                                            C->getSExtValue())) {
56861         // Widen to 64 bits here to get it sign extended.
56862         Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
56863         break;
56864       }
56865     // FIXME gcc accepts some relocatable values here too, but only in certain
56866     // memory models; it's complicated.
56867     }
56868     return;
56869   }
56870   case 'Z': {
56871     // 32-bit unsigned value
56872     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56873       if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
56874                                            C->getZExtValue())) {
56875         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56876                                        Op.getValueType());
56877         break;
56878       }
56879     }
56880     // FIXME gcc accepts some relocatable values here too, but only in certain
56881     // memory models; it's complicated.
56882     return;
56883   }
56884   case 'i': {
56885     // Literal immediates are always ok.
56886     if (auto *CST = dyn_cast<ConstantSDNode>(Op)) {
56887       bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
56888       BooleanContent BCont = getBooleanContents(MVT::i64);
56889       ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
56890                                     : ISD::SIGN_EXTEND;
56891       int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
56892                                                   : CST->getSExtValue();
56893       Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
56894       break;
56895     }
56896 
56897     // In any sort of PIC mode addresses need to be computed at runtime by
56898     // adding in a register or some sort of table lookup.  These can't
56899     // be used as immediates. BlockAddresses and BasicBlocks are fine though.
56900     if ((Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) &&
56901         !(isa<BlockAddressSDNode>(Op) || isa<BasicBlockSDNode>(Op)))
56902       return;
56903 
56904     // If we are in non-pic codegen mode, we allow the address of a global (with
56905     // an optional displacement) to be used with 'i'.
56906     if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
56907       // If we require an extra load to get this address, as in PIC mode, we
56908       // can't accept it.
56909       if (isGlobalStubReference(
56910               Subtarget.classifyGlobalReference(GA->getGlobal())))
56911         return;
56912     break;
56913   }
56914   }
56915 
56916   if (Result.getNode()) {
56917     Ops.push_back(Result);
56918     return;
56919   }
56920   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
56921 }
56922 
56923 /// Check if \p RC is a general purpose register class.
56924 /// I.e., GR* or one of their variant.
56925 static bool isGRClass(const TargetRegisterClass &RC) {
56926   return RC.hasSuperClassEq(&X86::GR8RegClass) ||
56927          RC.hasSuperClassEq(&X86::GR16RegClass) ||
56928          RC.hasSuperClassEq(&X86::GR32RegClass) ||
56929          RC.hasSuperClassEq(&X86::GR64RegClass) ||
56930          RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
56931 }
56932 
56933 /// Check if \p RC is a vector register class.
56934 /// I.e., FR* / VR* or one of their variant.
56935 static bool isFRClass(const TargetRegisterClass &RC) {
56936   return RC.hasSuperClassEq(&X86::FR16XRegClass) ||
56937          RC.hasSuperClassEq(&X86::FR32XRegClass) ||
56938          RC.hasSuperClassEq(&X86::FR64XRegClass) ||
56939          RC.hasSuperClassEq(&X86::VR128XRegClass) ||
56940          RC.hasSuperClassEq(&X86::VR256XRegClass) ||
56941          RC.hasSuperClassEq(&X86::VR512RegClass);
56942 }
56943 
56944 /// Check if \p RC is a mask register class.
56945 /// I.e., VK* or one of their variant.
56946 static bool isVKClass(const TargetRegisterClass &RC) {
56947   return RC.hasSuperClassEq(&X86::VK1RegClass) ||
56948          RC.hasSuperClassEq(&X86::VK2RegClass) ||
56949          RC.hasSuperClassEq(&X86::VK4RegClass) ||
56950          RC.hasSuperClassEq(&X86::VK8RegClass) ||
56951          RC.hasSuperClassEq(&X86::VK16RegClass) ||
56952          RC.hasSuperClassEq(&X86::VK32RegClass) ||
56953          RC.hasSuperClassEq(&X86::VK64RegClass);
56954 }
56955 
56956 std::pair<unsigned, const TargetRegisterClass *>
56957 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
56958                                                 StringRef Constraint,
56959                                                 MVT VT) const {
56960   // First, see if this is a constraint that directly corresponds to an LLVM
56961   // register class.
56962   if (Constraint.size() == 1) {
56963     // GCC Constraint Letters
56964     switch (Constraint[0]) {
56965     default: break;
56966     // 'A' means [ER]AX + [ER]DX.
56967     case 'A':
56968       if (Subtarget.is64Bit())
56969         return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
56970       assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
56971              "Expecting 64, 32 or 16 bit subtarget");
56972       return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
56973 
56974       // TODO: Slight differences here in allocation order and leaving
56975       // RIP in the class. Do they matter any more here than they do
56976       // in the normal allocation?
56977     case 'k':
56978       if (Subtarget.hasAVX512()) {
56979         if (VT == MVT::i1)
56980           return std::make_pair(0U, &X86::VK1RegClass);
56981         if (VT == MVT::i8)
56982           return std::make_pair(0U, &X86::VK8RegClass);
56983         if (VT == MVT::i16)
56984           return std::make_pair(0U, &X86::VK16RegClass);
56985       }
56986       if (Subtarget.hasBWI()) {
56987         if (VT == MVT::i32)
56988           return std::make_pair(0U, &X86::VK32RegClass);
56989         if (VT == MVT::i64)
56990           return std::make_pair(0U, &X86::VK64RegClass);
56991       }
56992       break;
56993     case 'q':   // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
56994       if (Subtarget.is64Bit()) {
56995         if (VT == MVT::i8 || VT == MVT::i1)
56996           return std::make_pair(0U, &X86::GR8_NOREX2RegClass);
56997         if (VT == MVT::i16)
56998           return std::make_pair(0U, &X86::GR16_NOREX2RegClass);
56999         if (VT == MVT::i32 || VT == MVT::f32)
57000           return std::make_pair(0U, &X86::GR32_NOREX2RegClass);
57001         if (VT != MVT::f80 && !VT.isVector())
57002           return std::make_pair(0U, &X86::GR64_NOREX2RegClass);
57003         break;
57004       }
57005       [[fallthrough]];
57006       // 32-bit fallthrough
57007     case 'Q':   // Q_REGS
57008       if (VT == MVT::i8 || VT == MVT::i1)
57009         return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
57010       if (VT == MVT::i16)
57011         return std::make_pair(0U, &X86::GR16_ABCDRegClass);
57012       if (VT == MVT::i32 || VT == MVT::f32 ||
57013           (!VT.isVector() && !Subtarget.is64Bit()))
57014         return std::make_pair(0U, &X86::GR32_ABCDRegClass);
57015       if (VT != MVT::f80 && !VT.isVector())
57016         return std::make_pair(0U, &X86::GR64_ABCDRegClass);
57017       break;
57018     case 'r':   // GENERAL_REGS
57019     case 'l':   // INDEX_REGS
57020       if (VT == MVT::i8 || VT == MVT::i1)
57021         return std::make_pair(0U, &X86::GR8_NOREX2RegClass);
57022       if (VT == MVT::i16)
57023         return std::make_pair(0U, &X86::GR16_NOREX2RegClass);
57024       if (VT == MVT::i32 || VT == MVT::f32 ||
57025           (!VT.isVector() && !Subtarget.is64Bit()))
57026         return std::make_pair(0U, &X86::GR32_NOREX2RegClass);
57027       if (VT != MVT::f80 && !VT.isVector())
57028         return std::make_pair(0U, &X86::GR64_NOREX2RegClass);
57029       break;
57030     case 'R':   // LEGACY_REGS
57031       if (VT == MVT::i8 || VT == MVT::i1)
57032         return std::make_pair(0U, &X86::GR8_NOREXRegClass);
57033       if (VT == MVT::i16)
57034         return std::make_pair(0U, &X86::GR16_NOREXRegClass);
57035       if (VT == MVT::i32 || VT == MVT::f32 ||
57036           (!VT.isVector() && !Subtarget.is64Bit()))
57037         return std::make_pair(0U, &X86::GR32_NOREXRegClass);
57038       if (VT != MVT::f80 && !VT.isVector())
57039         return std::make_pair(0U, &X86::GR64_NOREXRegClass);
57040       break;
57041     case 'f':  // FP Stack registers.
57042       // If SSE is enabled for this VT, use f80 to ensure the isel moves the
57043       // value to the correct fpstack register class.
57044       if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
57045         return std::make_pair(0U, &X86::RFP32RegClass);
57046       if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
57047         return std::make_pair(0U, &X86::RFP64RegClass);
57048       if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80)
57049         return std::make_pair(0U, &X86::RFP80RegClass);
57050       break;
57051     case 'y':   // MMX_REGS if MMX allowed.
57052       if (!Subtarget.hasMMX()) break;
57053       return std::make_pair(0U, &X86::VR64RegClass);
57054     case 'v':
57055     case 'x':   // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
57056       if (!Subtarget.hasSSE1()) break;
57057       bool VConstraint = (Constraint[0] == 'v');
57058 
57059       switch (VT.SimpleTy) {
57060       default: break;
57061       // Scalar SSE types.
57062       case MVT::f16:
57063         if (VConstraint && Subtarget.hasFP16())
57064           return std::make_pair(0U, &X86::FR16XRegClass);
57065         break;
57066       case MVT::f32:
57067       case MVT::i32:
57068         if (VConstraint && Subtarget.hasVLX())
57069           return std::make_pair(0U, &X86::FR32XRegClass);
57070         return std::make_pair(0U, &X86::FR32RegClass);
57071       case MVT::f64:
57072       case MVT::i64:
57073         if (VConstraint && Subtarget.hasVLX())
57074           return std::make_pair(0U, &X86::FR64XRegClass);
57075         return std::make_pair(0U, &X86::FR64RegClass);
57076       case MVT::i128:
57077         if (Subtarget.is64Bit()) {
57078           if (VConstraint && Subtarget.hasVLX())
57079             return std::make_pair(0U, &X86::VR128XRegClass);
57080           return std::make_pair(0U, &X86::VR128RegClass);
57081         }
57082         break;
57083       // Vector types and fp128.
57084       case MVT::v8f16:
57085         if (!Subtarget.hasFP16())
57086           break;
57087         if (VConstraint)
57088           return std::make_pair(0U, &X86::VR128XRegClass);
57089         return std::make_pair(0U, &X86::VR128RegClass);
57090       case MVT::v8bf16:
57091         if (!Subtarget.hasBF16() || !Subtarget.hasVLX())
57092           break;
57093         if (VConstraint)
57094           return std::make_pair(0U, &X86::VR128XRegClass);
57095         return std::make_pair(0U, &X86::VR128RegClass);
57096       case MVT::f128:
57097       case MVT::v16i8:
57098       case MVT::v8i16:
57099       case MVT::v4i32:
57100       case MVT::v2i64:
57101       case MVT::v4f32:
57102       case MVT::v2f64:
57103         if (VConstraint && Subtarget.hasVLX())
57104           return std::make_pair(0U, &X86::VR128XRegClass);
57105         return std::make_pair(0U, &X86::VR128RegClass);
57106       // AVX types.
57107       case MVT::v16f16:
57108         if (!Subtarget.hasFP16())
57109           break;
57110         if (VConstraint)
57111           return std::make_pair(0U, &X86::VR256XRegClass);
57112         return std::make_pair(0U, &X86::VR256RegClass);
57113       case MVT::v16bf16:
57114         if (!Subtarget.hasBF16() || !Subtarget.hasVLX())
57115           break;
57116         if (VConstraint)
57117           return std::make_pair(0U, &X86::VR256XRegClass);
57118         return std::make_pair(0U, &X86::VR256RegClass);
57119       case MVT::v32i8:
57120       case MVT::v16i16:
57121       case MVT::v8i32:
57122       case MVT::v4i64:
57123       case MVT::v8f32:
57124       case MVT::v4f64:
57125         if (VConstraint && Subtarget.hasVLX())
57126           return std::make_pair(0U, &X86::VR256XRegClass);
57127         if (Subtarget.hasAVX())
57128           return std::make_pair(0U, &X86::VR256RegClass);
57129         break;
57130       case MVT::v32f16:
57131         if (!Subtarget.hasFP16())
57132           break;
57133         if (VConstraint)
57134           return std::make_pair(0U, &X86::VR512RegClass);
57135         return std::make_pair(0U, &X86::VR512_0_15RegClass);
57136       case MVT::v32bf16:
57137         if (!Subtarget.hasBF16())
57138           break;
57139         if (VConstraint)
57140           return std::make_pair(0U, &X86::VR512RegClass);
57141         return std::make_pair(0U, &X86::VR512_0_15RegClass);
57142       case MVT::v64i8:
57143       case MVT::v32i16:
57144       case MVT::v8f64:
57145       case MVT::v16f32:
57146       case MVT::v16i32:
57147       case MVT::v8i64:
57148         if (!Subtarget.hasAVX512()) break;
57149         if (VConstraint)
57150           return std::make_pair(0U, &X86::VR512RegClass);
57151         return std::make_pair(0U, &X86::VR512_0_15RegClass);
57152       }
57153       break;
57154     }
57155   } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
57156     switch (Constraint[1]) {
57157     default:
57158       break;
57159     case 'i':
57160     case 't':
57161     case '2':
57162       return getRegForInlineAsmConstraint(TRI, "x", VT);
57163     case 'm':
57164       if (!Subtarget.hasMMX()) break;
57165       return std::make_pair(0U, &X86::VR64RegClass);
57166     case 'z':
57167       if (!Subtarget.hasSSE1()) break;
57168       switch (VT.SimpleTy) {
57169       default: break;
57170       // Scalar SSE types.
57171       case MVT::f16:
57172         if (!Subtarget.hasFP16())
57173           break;
57174         return std::make_pair(X86::XMM0, &X86::FR16XRegClass);
57175       case MVT::f32:
57176       case MVT::i32:
57177         return std::make_pair(X86::XMM0, &X86::FR32RegClass);
57178       case MVT::f64:
57179       case MVT::i64:
57180         return std::make_pair(X86::XMM0, &X86::FR64RegClass);
57181       case MVT::v8f16:
57182         if (!Subtarget.hasFP16())
57183           break;
57184         return std::make_pair(X86::XMM0, &X86::VR128RegClass);
57185       case MVT::v8bf16:
57186         if (!Subtarget.hasBF16() || !Subtarget.hasVLX())
57187           break;
57188         return std::make_pair(X86::XMM0, &X86::VR128RegClass);
57189       case MVT::f128:
57190       case MVT::v16i8:
57191       case MVT::v8i16:
57192       case MVT::v4i32:
57193       case MVT::v2i64:
57194       case MVT::v4f32:
57195       case MVT::v2f64:
57196         return std::make_pair(X86::XMM0, &X86::VR128RegClass);
57197       // AVX types.
57198       case MVT::v16f16:
57199         if (!Subtarget.hasFP16())
57200           break;
57201         return std::make_pair(X86::YMM0, &X86::VR256RegClass);
57202       case MVT::v16bf16:
57203         if (!Subtarget.hasBF16() || !Subtarget.hasVLX())
57204           break;
57205         return std::make_pair(X86::YMM0, &X86::VR256RegClass);
57206       case MVT::v32i8:
57207       case MVT::v16i16:
57208       case MVT::v8i32:
57209       case MVT::v4i64:
57210       case MVT::v8f32:
57211       case MVT::v4f64:
57212         if (Subtarget.hasAVX())
57213           return std::make_pair(X86::YMM0, &X86::VR256RegClass);
57214         break;
57215       case MVT::v32f16:
57216         if (!Subtarget.hasFP16())
57217           break;
57218         return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
57219       case MVT::v32bf16:
57220         if (!Subtarget.hasBF16())
57221           break;
57222         return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
57223       case MVT::v64i8:
57224       case MVT::v32i16:
57225       case MVT::v8f64:
57226       case MVT::v16f32:
57227       case MVT::v16i32:
57228       case MVT::v8i64:
57229         if (Subtarget.hasAVX512())
57230           return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
57231         break;
57232       }
57233       break;
57234     case 'k':
57235       // This register class doesn't allocate k0 for masked vector operation.
57236       if (Subtarget.hasAVX512()) {
57237         if (VT == MVT::i1)
57238           return std::make_pair(0U, &X86::VK1WMRegClass);
57239         if (VT == MVT::i8)
57240           return std::make_pair(0U, &X86::VK8WMRegClass);
57241         if (VT == MVT::i16)
57242           return std::make_pair(0U, &X86::VK16WMRegClass);
57243       }
57244       if (Subtarget.hasBWI()) {
57245         if (VT == MVT::i32)
57246           return std::make_pair(0U, &X86::VK32WMRegClass);
57247         if (VT == MVT::i64)
57248           return std::make_pair(0U, &X86::VK64WMRegClass);
57249       }
57250       break;
57251     }
57252   }
57253 
57254   if (parseConstraintCode(Constraint) != X86::COND_INVALID)
57255     return std::make_pair(0U, &X86::GR32RegClass);
57256 
57257   // Use the default implementation in TargetLowering to convert the register
57258   // constraint into a member of a register class.
57259   std::pair<Register, const TargetRegisterClass*> Res;
57260   Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
57261 
57262   // Not found as a standard register?
57263   if (!Res.second) {
57264     // Only match x87 registers if the VT is one SelectionDAGBuilder can convert
57265     // to/from f80.
57266     if (VT == MVT::Other || VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80) {
57267       // Map st(0) -> st(7) -> ST0
57268       if (Constraint.size() == 7 && Constraint[0] == '{' &&
57269           tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
57270           Constraint[3] == '(' &&
57271           (Constraint[4] >= '0' && Constraint[4] <= '7') &&
57272           Constraint[5] == ')' && Constraint[6] == '}') {
57273         // st(7) is not allocatable and thus not a member of RFP80. Return
57274         // singleton class in cases where we have a reference to it.
57275         if (Constraint[4] == '7')
57276           return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
57277         return std::make_pair(X86::FP0 + Constraint[4] - '0',
57278                               &X86::RFP80RegClass);
57279       }
57280 
57281       // GCC allows "st(0)" to be called just plain "st".
57282       if (StringRef("{st}").equals_insensitive(Constraint))
57283         return std::make_pair(X86::FP0, &X86::RFP80RegClass);
57284     }
57285 
57286     // flags -> EFLAGS
57287     if (StringRef("{flags}").equals_insensitive(Constraint))
57288       return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
57289 
57290     // dirflag -> DF
57291     // Only allow for clobber.
57292     if (StringRef("{dirflag}").equals_insensitive(Constraint) &&
57293         VT == MVT::Other)
57294       return std::make_pair(X86::DF, &X86::DFCCRRegClass);
57295 
57296     // fpsr -> FPSW
57297     // Only allow for clobber.
57298     if (StringRef("{fpsr}").equals_insensitive(Constraint) && VT == MVT::Other)
57299       return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
57300 
57301     return Res;
57302   }
57303 
57304   // Make sure it isn't a register that requires 64-bit mode.
57305   if (!Subtarget.is64Bit() &&
57306       (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
57307       TRI->getEncodingValue(Res.first) >= 8) {
57308     // Register requires REX prefix, but we're in 32-bit mode.
57309     return std::make_pair(0, nullptr);
57310   }
57311 
57312   // Make sure it isn't a register that requires AVX512.
57313   if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
57314       TRI->getEncodingValue(Res.first) & 0x10) {
57315     // Register requires EVEX prefix.
57316     return std::make_pair(0, nullptr);
57317   }
57318 
57319   // Otherwise, check to see if this is a register class of the wrong value
57320   // type.  For example, we want to map "{ax},i32" -> {eax}, we don't want it to
57321   // turn into {ax},{dx}.
57322   // MVT::Other is used to specify clobber names.
57323   if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
57324     return Res;   // Correct type already, nothing to do.
57325 
57326   // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
57327   // return "eax". This should even work for things like getting 64bit integer
57328   // registers when given an f64 type.
57329   const TargetRegisterClass *Class = Res.second;
57330   // The generic code will match the first register class that contains the
57331   // given register. Thus, based on the ordering of the tablegened file,
57332   // the "plain" GR classes might not come first.
57333   // Therefore, use a helper method.
57334   if (isGRClass(*Class)) {
57335     unsigned Size = VT.getSizeInBits();
57336     if (Size == 1) Size = 8;
57337     if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
57338       return std::make_pair(0, nullptr);
57339     Register DestReg = getX86SubSuperRegister(Res.first, Size);
57340     if (DestReg.isValid()) {
57341       bool is64Bit = Subtarget.is64Bit();
57342       const TargetRegisterClass *RC =
57343           Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
57344         : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
57345         : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
57346         : /*Size == 64*/ (is64Bit ? &X86::GR64RegClass : nullptr);
57347       if (Size == 64 && !is64Bit) {
57348         // Model GCC's behavior here and select a fixed pair of 32-bit
57349         // registers.
57350         switch (DestReg) {
57351         case X86::RAX:
57352           return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
57353         case X86::RDX:
57354           return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
57355         case X86::RCX:
57356           return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
57357         case X86::RBX:
57358           return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
57359         case X86::RSI:
57360           return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
57361         case X86::RDI:
57362           return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
57363         case X86::RBP:
57364           return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
57365         default:
57366           return std::make_pair(0, nullptr);
57367         }
57368       }
57369       if (RC && RC->contains(DestReg))
57370         return std::make_pair(DestReg, RC);
57371       return Res;
57372     }
57373     // No register found/type mismatch.
57374     return std::make_pair(0, nullptr);
57375   } else if (isFRClass(*Class)) {
57376     // Handle references to XMM physical registers that got mapped into the
57377     // wrong class.  This can happen with constraints like {xmm0} where the
57378     // target independent register mapper will just pick the first match it can
57379     // find, ignoring the required type.
57380 
57381     // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
57382     if (VT == MVT::f16)
57383       Res.second = &X86::FR16XRegClass;
57384     else if (VT == MVT::f32 || VT == MVT::i32)
57385       Res.second = &X86::FR32XRegClass;
57386     else if (VT == MVT::f64 || VT == MVT::i64)
57387       Res.second = &X86::FR64XRegClass;
57388     else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
57389       Res.second = &X86::VR128XRegClass;
57390     else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
57391       Res.second = &X86::VR256XRegClass;
57392     else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
57393       Res.second = &X86::VR512RegClass;
57394     else {
57395       // Type mismatch and not a clobber: Return an error;
57396       Res.first = 0;
57397       Res.second = nullptr;
57398     }
57399   } else if (isVKClass(*Class)) {
57400     if (VT == MVT::i1)
57401       Res.second = &X86::VK1RegClass;
57402     else if (VT == MVT::i8)
57403       Res.second = &X86::VK8RegClass;
57404     else if (VT == MVT::i16)
57405       Res.second = &X86::VK16RegClass;
57406     else if (VT == MVT::i32)
57407       Res.second = &X86::VK32RegClass;
57408     else if (VT == MVT::i64)
57409       Res.second = &X86::VK64RegClass;
57410     else {
57411       // Type mismatch and not a clobber: Return an error;
57412       Res.first = 0;
57413       Res.second = nullptr;
57414     }
57415   }
57416 
57417   return Res;
57418 }
57419 
57420 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
57421   // Integer division on x86 is expensive. However, when aggressively optimizing
57422   // for code size, we prefer to use a div instruction, as it is usually smaller
57423   // than the alternative sequence.
57424   // The exception to this is vector division. Since x86 doesn't have vector
57425   // integer division, leaving the division as-is is a loss even in terms of
57426   // size, because it will have to be scalarized, while the alternative code
57427   // sequence can be performed in vector form.
57428   bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
57429   return OptSize && !VT.isVector();
57430 }
57431 
57432 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
57433   if (!Subtarget.is64Bit())
57434     return;
57435 
57436   // Update IsSplitCSR in X86MachineFunctionInfo.
57437   X86MachineFunctionInfo *AFI =
57438       Entry->getParent()->getInfo<X86MachineFunctionInfo>();
57439   AFI->setIsSplitCSR(true);
57440 }
57441 
57442 void X86TargetLowering::insertCopiesSplitCSR(
57443     MachineBasicBlock *Entry,
57444     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
57445   const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
57446   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
57447   if (!IStart)
57448     return;
57449 
57450   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
57451   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
57452   MachineBasicBlock::iterator MBBI = Entry->begin();
57453   for (const MCPhysReg *I = IStart; *I; ++I) {
57454     const TargetRegisterClass *RC = nullptr;
57455     if (X86::GR64RegClass.contains(*I))
57456       RC = &X86::GR64RegClass;
57457     else
57458       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
57459 
57460     Register NewVR = MRI->createVirtualRegister(RC);
57461     // Create copy from CSR to a virtual register.
57462     // FIXME: this currently does not emit CFI pseudo-instructions, it works
57463     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
57464     // nounwind. If we want to generalize this later, we may need to emit
57465     // CFI pseudo-instructions.
57466     assert(
57467         Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
57468         "Function should be nounwind in insertCopiesSplitCSR!");
57469     Entry->addLiveIn(*I);
57470     BuildMI(*Entry, MBBI, MIMetadata(), TII->get(TargetOpcode::COPY), NewVR)
57471         .addReg(*I);
57472 
57473     // Insert the copy-back instructions right before the terminator.
57474     for (auto *Exit : Exits)
57475       BuildMI(*Exit, Exit->getFirstTerminator(), MIMetadata(),
57476               TII->get(TargetOpcode::COPY), *I)
57477           .addReg(NewVR);
57478   }
57479 }
57480 
57481 bool X86TargetLowering::supportSwiftError() const {
57482   return Subtarget.is64Bit();
57483 }
57484 
57485 MachineInstr *
57486 X86TargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
57487                                  MachineBasicBlock::instr_iterator &MBBI,
57488                                  const TargetInstrInfo *TII) const {
57489   assert(MBBI->isCall() && MBBI->getCFIType() &&
57490          "Invalid call instruction for a KCFI check");
57491 
57492   MachineFunction &MF = *MBB.getParent();
57493   // If the call target is a memory operand, unfold it and use R11 for the
57494   // call, so KCFI_CHECK won't have to recompute the address.
57495   switch (MBBI->getOpcode()) {
57496   case X86::CALL64m:
57497   case X86::CALL64m_NT:
57498   case X86::TAILJMPm64:
57499   case X86::TAILJMPm64_REX: {
57500     MachineBasicBlock::instr_iterator OrigCall = MBBI;
57501     SmallVector<MachineInstr *, 2> NewMIs;
57502     if (!TII->unfoldMemoryOperand(MF, *OrigCall, X86::R11, /*UnfoldLoad=*/true,
57503                                   /*UnfoldStore=*/false, NewMIs))
57504       report_fatal_error("Failed to unfold memory operand for a KCFI check");
57505     for (auto *NewMI : NewMIs)
57506       MBBI = MBB.insert(OrigCall, NewMI);
57507     assert(MBBI->isCall() &&
57508            "Unexpected instruction after memory operand unfolding");
57509     if (OrigCall->shouldUpdateCallSiteInfo())
57510       MF.moveCallSiteInfo(&*OrigCall, &*MBBI);
57511     MBBI->setCFIType(MF, OrigCall->getCFIType());
57512     OrigCall->eraseFromParent();
57513     break;
57514   }
57515   default:
57516     break;
57517   }
57518 
57519   MachineOperand &Target = MBBI->getOperand(0);
57520   Register TargetReg;
57521   switch (MBBI->getOpcode()) {
57522   case X86::CALL64r:
57523   case X86::CALL64r_NT:
57524   case X86::TAILJMPr64:
57525   case X86::TAILJMPr64_REX:
57526     assert(Target.isReg() && "Unexpected target operand for an indirect call");
57527     Target.setIsRenamable(false);
57528     TargetReg = Target.getReg();
57529     break;
57530   case X86::CALL64pcrel32:
57531   case X86::TAILJMPd64:
57532     assert(Target.isSymbol() && "Unexpected target operand for a direct call");
57533     // X86TargetLowering::EmitLoweredIndirectThunk always uses r11 for
57534     // 64-bit indirect thunk calls.
57535     assert(StringRef(Target.getSymbolName()).ends_with("_r11") &&
57536            "Unexpected register for an indirect thunk call");
57537     TargetReg = X86::R11;
57538     break;
57539   default:
57540     llvm_unreachable("Unexpected CFI call opcode");
57541     break;
57542   }
57543 
57544   return BuildMI(MBB, MBBI, MIMetadata(*MBBI), TII->get(X86::KCFI_CHECK))
57545       .addReg(TargetReg)
57546       .addImm(MBBI->getCFIType())
57547       .getInstr();
57548 }
57549 
57550 /// Returns true if stack probing through a function call is requested.
57551 bool X86TargetLowering::hasStackProbeSymbol(const MachineFunction &MF) const {
57552   return !getStackProbeSymbolName(MF).empty();
57553 }
57554 
57555 /// Returns true if stack probing through inline assembly is requested.
57556 bool X86TargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {
57557 
57558   // No inline stack probe for Windows, they have their own mechanism.
57559   if (Subtarget.isOSWindows() ||
57560       MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
57561     return false;
57562 
57563   // If the function specifically requests inline stack probes, emit them.
57564   if (MF.getFunction().hasFnAttribute("probe-stack"))
57565     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
57566            "inline-asm";
57567 
57568   return false;
57569 }
57570 
57571 /// Returns the name of the symbol used to emit stack probes or the empty
57572 /// string if not applicable.
57573 StringRef
57574 X86TargetLowering::getStackProbeSymbolName(const MachineFunction &MF) const {
57575   // Inline Stack probes disable stack probe call
57576   if (hasInlineStackProbe(MF))
57577     return "";
57578 
57579   // If the function specifically requests stack probes, emit them.
57580   if (MF.getFunction().hasFnAttribute("probe-stack"))
57581     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
57582 
57583   // Generally, if we aren't on Windows, the platform ABI does not include
57584   // support for stack probes, so don't emit them.
57585   if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
57586       MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
57587     return "";
57588 
57589   // We need a stack probe to conform to the Windows ABI. Choose the right
57590   // symbol.
57591   if (Subtarget.is64Bit())
57592     return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
57593   return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
57594 }
57595 
57596 unsigned
57597 X86TargetLowering::getStackProbeSize(const MachineFunction &MF) const {
57598   // The default stack probe size is 4096 if the function has no stackprobesize
57599   // attribute.
57600   return MF.getFunction().getFnAttributeAsParsedInteger("stack-probe-size",
57601                                                         4096);
57602 }
57603 
57604 Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
57605   if (ML->isInnermost() &&
57606       ExperimentalPrefInnermostLoopAlignment.getNumOccurrences())
57607     return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
57608   return TargetLowering::getPrefLoopAlignment();
57609 }
57610