Lines Matching defs:X86
1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
16 #include "X86.h"
135 // X86 is weird. It always uses i8 for shift amounts and setcc results.
137 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
140 // X86 instruction cache is coherent with its data cache so we can use the
196 addRegisterClass(MVT::i8, &X86::GR8RegClass);
197 addRegisterClass(MVT::i16, &X86::GR16RegClass);
198 addRegisterClass(MVT::i32, &X86::GR32RegClass);
200 addRegisterClass(MVT::i64, &X86::GR64RegClass);
258 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
273 // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
289 // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
485 // X86 wants to expand cmov itself.
639 addRegisterClass(MVT::f16, Subtarget.hasAVX512() ? &X86::FR16XRegClass
640 : &X86::FR16RegClass);
641 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
642 : &X86::FR32RegClass);
643 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
644 : &X86::FR64RegClass);
723 addRegisterClass(MVT::f32, &X86::FR32RegClass);
725 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
755 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
756 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
771 if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
781 if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
813 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
876 addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
877 : &X86::VR128RegClass);
1039 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
1044 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
1045 : &X86::VR128RegClass);
1070 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1071 : &X86::VR128RegClass);
1075 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
1076 : &X86::VR128RegClass);
1077 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1078 : &X86::VR128RegClass);
1079 addRegisterClass(MVT::v8f16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1080 : &X86::VR128RegClass);
1081 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
1082 : &X86::VR128RegClass);
1083 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1084 : &X86::VR128RegClass);
1416 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
1417 : &X86::VR256RegClass);
1418 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1419 : &X86::VR256RegClass);
1420 addRegisterClass(MVT::v16f16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1421 : &X86::VR256RegClass);
1422 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1423 : &X86::VR256RegClass);
1424 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1425 : &X86::VR256RegClass);
1426 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1427 : &X86::VR256RegClass);
1428 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1429 : &X86::VR256RegClass);
1696 addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
1697 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1698 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1699 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1700 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1771 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1772 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1773 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1774 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1775 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1776 addRegisterClass(MVT::v32f16, &X86::VR512RegClass);
1777 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
2132 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
2133 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
2325 addRegisterClass(MVT::v8bf16, Subtarget.hasAVX512() ? &X86::VR128XRegClass
2326 : &X86::VR128RegClass);
2327 addRegisterClass(MVT::v16bf16, Subtarget.hasAVX512() ? &X86::VR256XRegClass
2328 : &X86::VR256RegClass);
2350 addRegisterClass(MVT::v32bf16, &X86::VR512RegClass);
2412 addRegisterClass(MVT::x86amx, &X86::TILERegClass);
2608 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2633 return X86::createFastISel(funcInfo, libInfo);
2640 bool X86::mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget,
2659 bool X86::mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT,
2663 if (!X86::mayFoldLoad(Op, Subtarget, AssumeSingleUse))
2673 bool X86::mayFoldIntoStore(SDValue Op) {
2677 bool X86::mayFoldIntoZeroExtend(SDValue Op) {
2768 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model CM,
2802 case X86::COND_E:
2803 case X86::COND_NE:
2804 case X86::COND_B:
2805 case X86::COND_A:
2806 case X86::COND_BE:
2807 case X86::COND_AE:
2809 case X86::COND_G:
2810 case X86::COND_GE:
2811 case X86::COND_L:
2812 case X86::COND_LE:
2817 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
2821 case ISD::SETEQ: return X86::COND_E;
2822 case ISD::SETGT: return X86::COND_G;
2823 case ISD::SETGE: return X86::COND_GE;
2824 case ISD::SETLT: return X86::COND_L;
2825 case ISD::SETLE: return X86::COND_LE;
2826 case ISD::SETNE: return X86::COND_NE;
2827 case ISD::SETULT: return X86::COND_B;
2828 case ISD::SETUGT: return X86::COND_A;
2829 case ISD::SETULE: return X86::COND_BE;
2830 case ISD::SETUGE: return X86::COND_AE;
2835 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
2838 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
2846 return X86::COND_NS;
2850 return X86::COND_S;
2854 return X86::COND_NS;
2859 return X86::COND_LE;
2895 case ISD::SETEQ: return X86::COND_E;
2898 case ISD::SETGT: return X86::COND_A;
2901 case ISD::SETGE: return X86::COND_AE;
2904 case ISD::SETLT: return X86::COND_B;
2907 case ISD::SETLE: return X86::COND_BE;
2909 case ISD::SETNE: return X86::COND_NE;
2910 case ISD::SETUO: return X86::COND_P;
2911 case ISD::SETO: return X86::COND_NP;
2913 case ISD::SETUNE: return X86::COND_INVALID;
2918 /// Is there a floating point cmov for the specific X86 condition code?
2925 case X86::COND_B:
2926 case X86::COND_BE:
2927 case X86::COND_E:
2928 case X86::COND_P:
2929 case X86::COND_A:
2930 case X86::COND_AE:
2931 case X86::COND_NE:
2932 case X86::COND_NP:
3779 bool X86::isZeroNode(SDValue Elt) {
5062 namespace X86 {
5087 } // namespace X86
5591 if (X86::isZeroNode(Op))
5616 AllZero &= X86::isZeroNode(Op);
5699 else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
6018 if (X86::isZeroNode(Scl)) {
6663 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
6923 if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
7158 !X86::mayFoldLoadIntoBroadcastFromMem(
8090 // There are no known X86 targets with 512-bit ADDSUB instructions!
8817 if (X86::isZeroNode(Elt)) {
8996 X86::isZeroNode(Op.getOperand(0)) &&
8997 !X86::isZeroNode(Op.getOperand(1))) {
9389 /// used by the X86 shuffle instructions (not a fully general
9404 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
9893 // X86 has dedicated shuffle that can be lowered to VEXPAND
10004 // X86 has dedicated unpack instructions that can handle specific blend
10358 // X86 has dedicated pack instructions that can handle specific truncation
11153 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
11190 !X86::mayFoldLoad(Input, Subtarget)))
14950 X86::mayFoldLoad(peekThroughOneUseBitcasts(V1), Subtarget)) {
17903 if (llvm::isNullConstant(Idx) && !X86::mayFoldIntoZeroExtend(Op) &&
17904 !X86::mayFoldIntoStore(Op))
18092 if (IdxVal == 0 && !X86::mayFoldIntoZeroExtend(Op) &&
18093 !(Subtarget.hasSSE41() && X86::mayFoldIntoStore(Op))) {
18258 bool IsZeroElt = X86::isZeroNode(N1);
18314 X86::mayFoldLoad(N1, Subtarget)))) {
18387 if (IdxVal == 0 && (!MinSize || !X86::mayFoldLoad(N1, Subtarget))) {
18420 if (X86::isZeroNode(Op.getOperand(0)))
18620 X86::isOffsetSuitableForCodeModel(Offset, M, true)) {
18720 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18725 return GetTLSADDR(DAG, Chain, GA, &InGlue, PtrVT, X86::EAX, X86II::MO_TLSGD);
18733 X86::RAX, X86II::MO_TLSGD);
18741 X86::EAX, X86II::MO_TLSGD);
18756 unsigned ReturnReg = Is64BitLP64 ? X86::RAX : X86::EAX;
18761 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18764 Base = GetTLSADDR(DAG, Chain, GA, &InGlue, PtrVT, X86::EAX,
18913 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
21283 // but making use of X86 specifics to produce better instruction sequences.
21612 SDValue Rnd = DAG.getTargetConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, DL,
21806 /// This mode isn't supported in hardware on X86. But as long as we aren't
22021 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
22198 X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
22291 SelectionDAG &DAG, X86::CondCode &X86CC) {
22308 X86CC = (CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE);
22440 X86::CondCode &X86CC) {
22604 case X86::COND_A: case X86::COND_AE:
22605 case X86::COND_B: case X86::COND_BE:
22608 case X86::COND_G: case X86::COND_GE:
22609 case X86::COND_L: case X86::COND_LE:
22610 case X86::COND_O: case X86::COND_NO: {
22722 !X86::mayFoldLoad(Op0, Subtarget) && !X86::mayFoldLoad(Op1, Subtarget) &&
22731 if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
22763 Op0.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
22772 Op1.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
22973 SelectionDAG &DAG, X86::CondCode &X86CC) {
23032 X86CC = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
23714 X86::CondCode X86Cond;
23716 X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
23719 X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
23751 /// corresponding X86 condition code constant in X86CC.
23758 X86::CondCode X86CondCode;
23792 X86CondCode = (X86::CondCode)Op0.getConstantOperandVal(0);
23793 X86CondCode = X86::GetOppositeBranchCondition(X86CondCode);
23807 X86::CondCode CondCode = CC == ISD::SETEQ ? X86::COND_O : X86::COND_NO;
23825 X86CondCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
23832 X86::CondCode CondCode =
23834 assert(CondCode != X86::COND_INVALID && "Unexpected condition code!");
23911 X86::CondCode CondCode = TranslateX86CC(CC, dl, /*IsFP*/ true, Op0, Op1, DAG);
23912 if (CondCode == X86::COND_INVALID)
23939 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
23956 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
23967 Cond = X86::COND_O;
23971 Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
23975 Cond = X86::COND_O;
23979 Cond = X86::COND_B;
23983 Cond = X86::COND_O;
23987 Cond = X86::COND_O;
24007 X86::CondCode Cond;
24016 /// Return true if opcode is a X86 logical comparison.
24158 ((CondCode == X86::COND_NE && MatchFFSMinus1(Op1, Op2)) ||
24159 (CondCode == X86::COND_E && MatchFFSMinus1(Op2, Op1)))) {
24162 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
24174 if (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE)) {
24182 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
24185 } else if (!Subtarget.canUseCMOV() && CondCode == X86::COND_E &&
24222 ((CondCode == X86::COND_S) || // smin(x, 0)
24223 (CondCode == X86::COND_G && hasAndNot(Op1)))) { // smax(x, 0)
24233 if (CondCode == X86::COND_G)
24267 X86::CondCode X86Cond;
24282 X86::CondCode X86CondCode;
24292 CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
24293 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
24303 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
24308 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
24309 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
24315 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
24337 (Op.getValueType() == MVT::i16 && !X86::mayFoldLoad(Op1, Subtarget) &&
24338 !X86::mayFoldLoad(Op2, Subtarget))) {
24822 X86::CondCode X86Cond;
24826 X86Cond = X86::GetOppositeBranchCondition(X86Cond);
24861 SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
24864 CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
24874 SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
24877 CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
24881 X86::CondCode X86Cond =
24892 X86::CondCode X86Cond;
25137 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
25336 if (X86::isZeroNode(Mask))
25480 return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
25487 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
25489 RC ^= X86::STATIC_ROUNDING::NO_EXC;
25492 return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
25501 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
25503 RC ^= X86::STATIC_ROUNDING::NO_EXC;
25504 return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
25505 RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
25506 RC == X86::STATIC_ROUNDING::TO_POS_INF ||
25507 RC == X86::STATIC_ROUNDING::TO_ZERO;
25946 SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
25947 SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
25952 SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
25953 SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
25959 SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
25964 SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
26095 SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
26196 X86::CondCode X86CC;
26205 X86CC = X86::COND_B;
26212 X86CC = X86::COND_E;
26223 X86CC = X86::COND_E;
26234 X86CC = X86::COND_B;
26245 X86CC = X86::COND_A;
26267 X86::CondCode X86CC;
26272 X86CC = X86::COND_A;
26276 X86CC = X86::COND_A;
26280 X86CC = X86::COND_B;
26284 X86CC = X86::COND_B;
26288 X86CC = X86::COND_O;
26292 X86CC = X86::COND_O;
26296 X86CC = X86::COND_S;
26300 X86CC = X86::COND_S;
26304 X86CC = X86::COND_E;
26308 X86CC = X86::COND_E;
26412 SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
26414 SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
26650 LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
26651 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
26654 LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
26655 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
26691 if (Opcode != X86::RDTSCP)
26697 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
26706 getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
26770 bool X86::isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget,
26790 if (X86::isExtendedSwiftAsyncFrameSupported(Subtarget, MF)) {
26794 SDValue CopyRBP = DAG.getCopyFromReg(Chain, dl, X86::RBP, MVT::i64);
26796 SDValue(DAG.getMachineNode(X86::SUB64ri32, dl, MVT::i64, CopyRBP,
26882 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
26903 SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
26938 SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(1), DL, DAG);
26979 SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(0), DL, DAG);
26993 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
27013 Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
27033 Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
27101 X86::CondCode CC = (X86::CondCode)Op.getConstantOperandVal(4);
27147 DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
27219 expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
27228 SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
27342 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
27343 (FrameReg == X86::EBP && VT == MVT::i32)) &&
27359 .Case("esp", X86::ESP)
27360 .Case("rsp", X86::RSP)
27361 .Case("ebp", X86::EBP)
27362 .Case("rbp", X86::RBP)
27363 .Case("r14", X86::R14)
27364 .Case("r15", X86::R15)
27367 if (Reg == X86::EBP || Reg == X86::RBP) {
27375 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
27396 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
27398 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
27405 return X86::NoRegister;
27406 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
27422 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
27423 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
27426 Register StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
27491 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
27493 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
27494 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
27549 NestReg = X86::ECX;
27581 NestReg = X86::EAX;
27593 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
27718 case RoundingMode::NearestTiesToEven: FieldVal = X86::rmToNearest; break;
27719 case RoundingMode::TowardNegative: FieldVal = X86::rmDownward; break;
27720 case RoundingMode::TowardPositive: FieldVal = X86::rmUpward; break;
27721 case RoundingMode::TowardZero: FieldVal = X86::rmTowardZero; break;
27723 llvm_unreachable("rounding mode is not supported by X86 hardware");
27736 // (0xc9 << 4) & 0xc00 = X86::rmTowardZero
27737 // (0xc9 << 6) & 0xc00 = X86::rmToNearest
28079 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
28113 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
28211 // Since X86 does not have CMOV for 8-bit integer, we don't convert
28216 SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_NS, DL, MVT::i8),
29233 if (!X86::isConstantSplat(Amt, APIntShiftAmt))
29963 bool IsCstSplat = X86::isConstantSplat(Amt, APIntShiftAmt);
30165 bool IsCstSplat = X86::isConstantSplat(Amt, CstSplatValue);
30820 X86::CondCode CC = X86::COND_INVALID;
30826 CC = X86::COND_E;
30829 CC = X86::COND_NE;
30832 CC = X86::COND_S;
30835 CC = X86::COND_NS;
31023 DAG.getRegister(X86::RSP, MVT::i64), // Base
31030 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
31037 DAG.getRegister(X86::ESP, MVT::i32), // Base
31045 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
31081 case MVT::i8: Reg = X86::AL; size = 1; break;
31082 case MVT::i16: Reg = X86::AX; size = 2; break;
31083 case MVT::i32: Reg = X86::EAX; size = 4; break;
31086 Reg = X86::RAX; size = 8;
31103 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
31105 SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
31595 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
31633 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
31706 // On X86, the only ordering which actually requires an instruction is
31844 SDValue SetCC = getSETCC(IsSigned ? X86::COND_O : X86::COND_B,
32218 return SDValue(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
32326 auto COND_NE = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
32344 auto COND_NE = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
33342 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
33345 return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
33348 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
33352 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPRU, X86::ECX, Subtarget,
33356 expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
33362 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
33375 Regs64bit ? X86::RAX : X86::EAX, cpInL, SDValue());
33377 DAG.getCopyToReg(cpInL.getValue(0), dl, Regs64bit ? X86::RDX : X86::EDX,
33383 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
33401 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl, X86::EBX, swapInL,
33410 Regs64bit ? X86::RAX : X86::EAX,
33413 Regs64bit ? X86::RDX : X86::EDX,
33417 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
33419 SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
34078 // X86 supports extremely general addressing modes.
34081 // X86 allows a sign-extended 32-bit immediate field as a displacement.
34082 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
34256 // X86 has 8, 16, and 32-bit zero-extending loads.
34434 // X86 Scheduler Hooks
34443 if (mi.readsRegister(X86::EFLAGS, /*TRI=*/nullptr))
34446 if (mi.definesRegister(X86::EFLAGS, /*TRI=*/nullptr))
34453 if (Succ->isLiveIn(X86::EFLAGS))
34492 mainMBB->addLiveIn(X86::EFLAGS);
34493 fallMBB->addLiveIn(X86::EFLAGS);
34494 sinkMBB->addLiveIn(X86::EFLAGS);
34512 BuildMI(thisMBB, MIMD, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
34518 BuildMI(mainMBB, MIMD, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
34519 BuildMI(mainMBB, MIMD, TII->get(X86::JMP_1)).addMBB(sinkMBB);
34526 BuildMI(fallMBB, MIMD, TII->get(X86::XABORT_DEF));
34528 .addReg(X86::EAX);
34533 BuildMI(*sinkMBB, sinkMBB->begin(), MIMD, TII->get(X86::PHI), DstReg)
34544 // Emit va_arg instruction on X86-64.
34555 static_assert(X86::AddrNumOperands == 5, "VAARG assumes 5 address operands");
34670 BuildMI(thisMBB, MIMD, TII->get(X86::MOV32rm), OffsetReg)
34679 BuildMI(thisMBB, MIMD, TII->get(X86::CMP32ri))
34685 BuildMI(thisMBB, MIMD, TII->get(X86::JCC_1))
34686 .addMBB(overflowMBB).addImm(X86::COND_AE);
34697 TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
34709 BuildMI(offsetMBB, MIMD, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
34712 .addImm(X86::sub_32bit);
34715 BuildMI(offsetMBB, MIMD, TII->get(X86::ADD64rr), OffsetDestReg)
34720 BuildMI(offsetMBB, MIMD, TII->get(X86::ADD32rr), OffsetDestReg)
34727 BuildMI(offsetMBB, MIMD, TII->get(X86::ADD32ri), NextOffsetReg)
34732 BuildMI(offsetMBB, MIMD, TII->get(X86::MOV32mr))
34742 BuildMI(offsetMBB, MIMD, TII->get(X86::JMP_1))
34753 TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
34771 TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
34778 TII->get(Subtarget.isTarget64BitLP64() ? X86::AND64ri32 : X86::AND32ri),
34792 TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
34799 TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64mr : X86::MOV32mr))
34811 TII->get(X86::PHI), DestReg)
34835 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
34844 case X86::CMOV_FR16:
34845 case X86::CMOV_FR16X:
34846 case X86::CMOV_FR32:
34847 case X86::CMOV_FR32X:
34848 case X86::CMOV_FR64:
34849 case X86::CMOV_FR64X:
34850 case X86::CMOV_GR8:
34851 case X86::CMOV_GR16:
34852 case X86::CMOV_GR32:
34853 case X86::CMOV_RFP32:
34854 case X86::CMOV_RFP64:
34855 case X86::CMOV_RFP80:
34856 case X86::CMOV_VR64:
34857 case X86::CMOV_VR128:
34858 case X86::CMOV_VR128X:
34859 case X86::CMOV_VR256:
34860 case X86::CMOV_VR256X:
34861 case X86::CMOV_VR512:
34862 case X86::CMOV_VK1:
34863 case X86::CMOV_VK2:
34864 case X86::CMOV_VK4:
34865 case X86::CMOV_VK8:
34866 case X86::CMOV_VK16:
34867 case X86::CMOV_VK32:
34868 case X86::CMOV_VK64:
34889 X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
34890 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
34921 BuildMI(*SinkMBB, SinkInsertionPoint, MIMD, TII->get(X86::PHI), DestReg)
35027 FirstInsertedMBB->addLiveIn(X86::EFLAGS);
35032 if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS, /*TRI=*/nullptr) &&
35034 SecondInsertedMBB->addLiveIn(X86::EFLAGS);
35035 SinkMBB->addLiveIn(X86::EFLAGS);
35056 X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
35057 BuildMI(ThisMBB, MIMD, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
35059 X86::CondCode SecondCC =
35060 X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
35061 BuildMI(FirstInsertedMBB, MIMD, TII->get(X86::JCC_1))
35071 BuildMI(*SinkMBB, SinkMBB->begin(), MIMD, TII->get(X86::PHI), DestReg)
35141 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
35142 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
35188 if (!LastCMOV->killsRegister(X86::EFLAGS, /*TRI=*/nullptr) &&
35190 FalseMBB->addLiveIn(X86::EFLAGS);
35191 SinkMBB->addLiveIn(X86::EFLAGS);
35215 BuildMI(ThisMBB, MIMD, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
35233 return X86::SUB64ri32;
35235 return X86::SUB32ri;
35261 Register physSPReg = TFI.Uses64BitFramePtr ? X86::RSP : X86::ESP;
35264 TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
35266 TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
35271 const unsigned Opc = TFI.Uses64BitFramePtr ? X86::SUB64rr : X86::SUB32rr;
35280 TII->get(TFI.Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
35284 BuildMI(testMBB, MIMD, TII->get(X86::JCC_1))
35286 .addImm(X86::COND_GE);
35303 TFI.Uses64BitFramePtr ? X86::XOR64mi32 : X86::XOR32mi;
35312 BuildMI(blockMBB, MIMD, TII->get(X86::JMP_1)).addMBB(testMBB);
35345 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
35378 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
35393 BuildMI(BB, MIMD, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
35395 BuildMI(BB, MIMD, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
35398 BuildMI(BB, MIMD, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
35406 BuildMI(bumpMBB, MIMD, TII->get(X86::JMP_1)).addMBB(continueMBB);
35412 BuildMI(mallocMBB, MIMD, TII->get(X86::MOV64rr), X86::RDI)
35414 BuildMI(mallocMBB, MIMD, TII->get(X86::CALL64pcrel32))
35417 .addReg(X86::RDI, RegState::Implicit)
35418 .addReg(X86::RAX, RegState::ImplicitDefine);
35420 BuildMI(mallocMBB, MIMD, TII->get(X86::MOV32rr), X86::EDI)
35422 BuildMI(mallocMBB, MIMD, TII->get(X86::CALL64pcrel32))
35425 .addReg(X86::EDI, RegState::Implicit)
35426 .addReg(X86::EAX, RegState::ImplicitDefine);
35428 BuildMI(mallocMBB, MIMD, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
35430 BuildMI(mallocMBB, MIMD, TII->get(X86::PUSH32r)).addReg(sizeVReg);
35431 BuildMI(mallocMBB, MIMD, TII->get(X86::CALLpcrel32))
35434 .addReg(X86::EAX, RegState::ImplicitDefine);
35438 BuildMI(mallocMBB, MIMD, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
35442 .addReg(IsLP64 ? X86::RAX : X86::EAX);
35443 BuildMI(mallocMBB, MIMD, TII->get(X86::JMP_1)).addMBB(continueMBB);
35452 BuildMI(*continueMBB, continueMBB->begin(), MIMD, TII->get(X86::PHI),
35497 BuildMI(*RestoreMBB, RestoreMBBI, MIMD, TII.get(X86::JMP_4)).addMBB(TargetMBB);
35554 BuildMI(*BB, MI, MIMD, TII->get(X86::MOV64rm), X86::RDI)
35555 .addReg(X86::RIP)
35561 MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL64m));
35562 addDirectMem(MIB, X86::RDI);
35563 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
35566 BuildMI(*BB, MI, MIMD, TII->get(X86::MOV32rm), X86::EAX)
35573 MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL32m));
35574 addDirectMem(MIB, X86::EAX);
35575 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
35578 BuildMI(*BB, MI, MIMD, TII->get(X86::MOV32rm), X86::EAX)
35585 MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL32m));
35586 addDirectMem(MIB, X86::EAX);
35587 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
35596 case X86::INDIRECT_THUNK_CALL32:
35597 return X86::CALLpcrel32;
35598 case X86::INDIRECT_THUNK_CALL64:
35599 return X86::CALL64pcrel32;
35600 case X86::INDIRECT_THUNK_TCRETURN32:
35601 return X86::TCRETURNdi;
35602 case X86::INDIRECT_THUNK_TCRETURN64:
35603 return X86::TCRETURNdi64;
35625 case X86::EAX:
35628 case X86::ECX:
35631 case X86::EDX:
35634 case X86::EDI:
35637 case X86::R11:
35648 case X86::EAX:
35651 case X86::ECX:
35654 case X86::EDX:
35657 case X86::EDI:
35660 case X86::R11:
35692 AvailableRegs.push_back(X86::R11);
35694 AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
35755 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
35763 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
35767 unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35771 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35772 if (i == X86::AddrDisp)
35856 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35860 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::LEA64r), LabelReg)
35861 .addReg(X86::RIP)
35868 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::LEA32r), LabelReg)
35876 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
35879 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35880 if (i == X86::AddrDisp)
35896 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::EH_SjLj_Setup))
35906 BuildMI(mainMBB, MIMD, TII->get(X86::MOV32r0), mainDstReg);
35910 BuildMI(*sinkMBB, sinkMBB->begin(), MIMD, TII->get(X86::PHI), DstReg)
35924 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
35929 BuildMI(restoreMBB, MIMD, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
35930 BuildMI(restoreMBB, MIMD, TII->get(X86::JMP_1)).addMBB(sinkMBB);
36005 Register ZReg = MRI.createVirtualRegister(&X86::GR32RegClass);
36006 BuildMI(checkSspMBB, MIMD, TII->get(X86::MOV32r0), ZReg);
36010 BuildMI(checkSspMBB, MIMD, TII->get(X86::SUBREG_TO_REG), TmpZReg)
36013 .addImm(X86::sub_32bit);
36019 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
36024 unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
36028 BuildMI(checkSspMBB, MIMD, TII->get(X86::JCC_1))
36030 .addImm(X86::COND_E);
36036 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
36040 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36042 if (i == X86::AddrDisp)
36054 unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
36060 BuildMI(fallMBB, MIMD, TII->get(X86::JCC_1))
36062 .addImm(X86::COND_BE);
36067 unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
36075 unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
36085 BuildMI(fixShadowMBB, MIMD, TII->get(X86::JCC_1))
36087 .addImm(X86::COND_E);
36092 unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64ri : X86::SHL32ri;
36100 unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
36109 BuildMI(fixShadowLoopMBB, MIMD, TII->get(X86::PHI), CounterReg)
36119 unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
36123 BuildMI(fixShadowLoopMBB, MIMD, TII->get(X86::JCC_1))
36125 .addImm(X86::COND_NE);
36149 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
36153 Register FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
36161 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
36162 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
36173 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36185 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36187 if (i == X86::AddrDisp)
36199 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36200 if (i == X86::AddrDisp)
36234 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
36237 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
36239 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
36242 BuildMI(*MBB, MI, MIMD, TII->get(X86::LEA64r), VR)
36243 .addReg(X86::RIP)
36249 BuildMI(*MBB, MI, MIMD, TII->get(X86::LEA32r), VR)
36323 BuildMI(TrapBB, MIMD, TII->get(X86::TRAP));
36354 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
36359 BuildMI(DispatchBB, MIMD, TII->get(X86::NOOP))
36364 Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
36365 addFrameReference(BuildMI(DispatchBB, MIMD, TII->get(X86::MOV32rm), IReg), FI,
36367 BuildMI(DispatchBB, MIMD, TII->get(X86::CMP32ri))
36370 BuildMI(DispatchBB, MIMD, TII->get(X86::JCC_1))
36372 .addImm(X86::COND_AE);
36375 Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
36376 Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
36379 BuildMI(DispContBB, MIMD, TII->get(X86::LEA64r), BReg)
36380 .addReg(X86::RIP)
36389 .addImm(X86::sub_32bit);
36394 BuildMI(DispContBB, MIMD, TII->get(X86::JMP64m))
36402 Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
36403 Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
36404 Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
36407 BuildMI(DispContBB, MIMD, TII->get(X86::MOV32rm), OReg)
36414 BuildMI(DispContBB, MIMD, TII->get(X86::MOVSX64rr32), OReg64)
36417 BuildMI(DispContBB, MIMD, TII->get(X86::ADD64rr), TReg)
36421 BuildMI(DispContBB, MIMD, TII->get(X86::JMP64r)).addReg(TReg);
36429 BuildMI(DispContBB, MIMD, TII->get(X86::JMP32m))
36530 return X86::TMM0 + Imm;
36534 case X86::TLS_addr32:
36535 case X86::TLS_addr64:
36536 case X86::TLS_addrX32:
36537 case X86::TLS_base_addr32:
36538 case X86::TLS_base_addr64:
36539 case X86::TLS_base_addrX32:
36540 case X86::TLS_desc32:
36541 case X86::TLS_desc64:
36543 case X86::INDIRECT_THUNK_CALL32:
36544 case X86::INDIRECT_THUNK_CALL64:
36545 case X86::INDIRECT_THUNK_TCRETURN32:
36546 case X86::INDIRECT_THUNK_TCRETURN64:
36548 case X86::CATCHRET:
36550 case X86::SEG_ALLOCA_32:
36551 case X86::SEG_ALLOCA_64:
36553 case X86::PROBED_ALLOCA_32:
36554 case X86::PROBED_ALLOCA_64:
36556 case X86::TLSCall_32:
36557 case X86::TLSCall_64:
36559 case X86::CMOV_FR16:
36560 case X86::CMOV_FR16X:
36561 case X86::CMOV_FR32:
36562 case X86::CMOV_FR32X:
36563 case X86::CMOV_FR64:
36564 case X86::CMOV_FR64X:
36565 case X86::CMOV_GR8:
36566 case X86::CMOV_GR16:
36567 case X86::CMOV_GR32:
36568 case X86::CMOV_RFP32:
36569 case X86::CMOV_RFP64:
36570 case X86::CMOV_RFP80:
36571 case X86::CMOV_VR64:
36572 case X86::CMOV_VR128:
36573 case X86::CMOV_VR128X:
36574 case X86::CMOV_VR256:
36575 case X86::CMOV_VR256X:
36576 case X86::CMOV_VR512:
36577 case X86::CMOV_VK1:
36578 case X86::CMOV_VK2:
36579 case X86::CMOV_VK4:
36580 case X86::CMOV_VK8:
36581 case X86::CMOV_VK16:
36582 case X86::CMOV_VK32:
36583 case X86::CMOV_VK64:
36586 case X86::FP80_ADDr:
36587 case X86::FP80_ADDm32: {
36592 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FNSTCW16m)),
36596 Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36597 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOVZX32rm16), OldCW),
36602 Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36603 BuildMI(*BB, MI, MIMD, TII->get(X86::OR32ri), NewCW)
36609 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
36611 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
36616 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOV16mr)),
36621 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
36625 if (MI.getOpcode() == X86::FP80_ADDr) {
36626 BuildMI(*BB, MI, MIMD, TII->get(X86::ADD_Fp80))
36631 BuildMI(*BB, MI, MIMD, TII->get(X86::ADD_Fp80m32))
36642 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
36649 case X86::FP32_TO_INT16_IN_MEM:
36650 case X86::FP32_TO_INT32_IN_MEM:
36651 case X86::FP32_TO_INT64_IN_MEM:
36652 case X86::FP64_TO_INT16_IN_MEM:
36653 case X86::FP64_TO_INT32_IN_MEM:
36654 case X86::FP64_TO_INT64_IN_MEM:
36655 case X86::FP80_TO_INT16_IN_MEM:
36656 case X86::FP80_TO_INT32_IN_MEM:
36657 case X86::FP80_TO_INT64_IN_MEM: {
36662 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FNSTCW16m)),
36666 Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36667 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOVZX32rm16), OldCW),
36671 Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36672 BuildMI(*BB, MI, MIMD, TII->get(X86::OR32ri), NewCW)
36677 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
36679 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
36684 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOV16mr)),
36690 TII->get(X86::FLDCW16m)), NewCWFrameIdx);
36692 // Get the X86 opcode to use.
36697 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
36698 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
36699 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
36700 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
36701 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
36702 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
36703 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
36704 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
36705 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
36711 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
36714 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
36722 case X86::XBEGIN:
36725 case X86::VAARG_64:
36726 case X86::VAARG_X32:
36729 case X86::EH_SjLj_SetJmp32:
36730 case X86::EH_SjLj_SetJmp64:
36733 case X86::EH_SjLj_LongJmp32:
36734 case X86::EH_SjLj_LongJmp64:
36737 case X86::Int_eh_sjlj_setup_dispatch:
36753 case X86::LCMPXCHG8B: {
36773 assert(TRI->getBaseRegister() == X86::ESI &&
36774 "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
36785 if (AM.IndexReg == X86::NoRegister)
36793 (RMBBI->definesRegister(X86::EAX, /*TRI=*/nullptr) ||
36794 RMBBI->definesRegister(X86::EBX, /*TRI=*/nullptr) ||
36795 RMBBI->definesRegister(X86::ECX, /*TRI=*/nullptr) ||
36796 RMBBI->definesRegister(X86::EDX, /*TRI=*/nullptr))) {
36801 BuildMI(*BB, *MBBI, MIMD, TII->get(X86::LEA32r), computedAddrVReg), AM);
36807 case X86::LCMPXCHG16B_NO_RBX: {
36811 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
36816 MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36818 .addReg(X86::RBX);
36819 Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36821 BuildMI(*BB, MI, MIMD, TII->get(X86::LCMPXCHG16B_SAVE_RBX), Dst);
36822 for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
36824 MIB.add(MI.getOperand(X86::AddrNumOperands));
36828 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::RBX)
36829 .add(MI.getOperand(X86::AddrNumOperands));
36831 BuildMI(*BB, MI, MIMD, TII->get(X86::LCMPXCHG16B));
36832 for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
36838 case X86::MWAITX: {
36841 bool IsRBX = (BasePtr == X86::RBX || BasePtr == X86::EBX);
36845 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::ECX)
36847 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EAX)
36849 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EBX)
36851 BuildMI(*BB, MI, MIMD, TII->get(X86::MWAITXrrr));
36858 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::ECX)
36860 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EAX)
36865 MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36867 .addReg(X86::RBX);
36869 Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36870 BuildMI(*BB, MI, MIMD, TII->get(X86::MWAITX_SAVE_RBX))
36887 BuildMI(*BB, MI, MIMD, TII->get(X86::SUB32ri), X86::ESP)
36888 .addReg(X86::ESP)
36902 addRegOffset(BuildMI(*BB, MI, MIMD, TII->get(X86::LEA32r),
36904 X86::ESP, false, ArgOffset);
36908 case X86::PTDPBSSD:
36909 case X86::PTDPBSUD:
36910 case X86::PTDPBUSD:
36911 case X86::PTDPBUUD:
36912 case X86::PTDPBF16PS:
36913 case X86::PTDPFP16PS: {
36918 case X86::PTDPBSSD: Opc = X86::TDPBSSD; break;
36919 case X86::PTDPBSUD: Opc = X86::TDPBSUD; break;
36920 case X86::PTDPBUSD: Opc = X86::TDPBUSD; break;
36921 case X86::PTDPBUUD: Opc = X86::TDPBUUD; break;
36922 case X86::PTDPBF16PS: Opc = X86::TDPBF16PS; break;
36923 case X86::PTDPFP16PS: Opc = X86::TDPFP16PS; break;
36936 case X86::PTILEZERO: {
36938 BuildMI(*BB, MI, MIMD, TII->get(X86::TILEZERO), TMMImmToTMMReg(Imm));
36944 case X86::PTILEZEROV: {
36949 case X86::PTILELOADD:
36950 case X86::PTILELOADDT1:
36951 case X86::PTILESTORED: {
36956 case X86::PTILELOADD:
36957 Opc = GET_EGPR_IF_ENABLED(X86::TILELOADD);
36959 case X86::PTILELOADDT1:
36960 Opc = GET_EGPR_IF_ENABLED(X86::TILELOADDT1);
36962 case X86::PTILESTORED:
36963 Opc = GET_EGPR_IF_ENABLED(X86::TILESTORED);
36970 if (Opc != X86::TILESTORED && Opc != X86::TILESTORED_EVEX)
36980 if (Opc == X86::TILESTORED || Opc == X86::TILESTORED_EVEX)
36987 case X86::PTCMMIMFP16PS:
36988 case X86::PTCMMRLFP16PS: {
36994 case X86::PTCMMIMFP16PS: Opc = X86::TCMMIMFP16PS; break;
36995 case X86::PTCMMRLFP16PS: Opc = X86::TCMMRLFP16PS; break;
37010 // X86 Optimization Hooks
38787 X86::mayFoldLoad(V1.getOperand(0), Subtarget)) {
39663 namespace X86 {
39667 } // namespace X86
40140 {Op}, 0, Op, {0}, {}, /*Depth*/ 0, X86::MaxShuffleCombineDepth,
40336 if (!X86::mayFoldLoad(peekThroughOneUseBitcasts(N0), Subtarget) ||
40337 X86::mayFoldLoad(peekThroughOneUseBitcasts(N1), Subtarget))
40740 X86::MaxShuffleCombineDepth,
41661 // X86 targets with 512-bit ADDSUB instructions!
41667 // X86 targets with FP16 ADDSUB instructions!
42630 assert(Depth < X86::MaxShuffleCombineDepth && "Depth out of range");
42638 {Op}, 0, Op, DemandedMask, {}, 0, X86::MaxShuffleCombineDepth - Depth,
44396 X86::CondCode X86CC;
46411 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
46469 if (CC == X86::COND_A && !Comparison.isMaxValue()) {
46471 CC = X86::COND_AE;
46472 } else if (CC == X86::COND_LE && !Comparison.isMaxSignedValue()) {
46474 CC = X86::COND_L;
46479 if (CC == X86::COND_AE && !Comparison.isMinValue()) {
46481 CC = X86::COND_A;
46482 } else if (CC == X86::COND_L && !Comparison.isMinSignedValue()) {
46484 CC = X86::COND_LE;
46511 if (CC == X86::COND_S && Addend == 1)
46512 CC = X86::COND_LE;
46513 else if (CC == X86::COND_NS && Addend == 1)
46514 CC = X86::COND_G;
46515 else if (CC == X86::COND_G && Addend == -1)
46516 CC = X86::COND_GE;
46517 else if (CC == X86::COND_LE && Addend == -1)
46518 CC = X86::COND_L;
46530 static SDValue checkSignTestSetCCCombine(SDValue Cmp, X86::CondCode &CC,
46532 if (CC != X86::COND_S && CC != X86::COND_NS)
46579 CC = CC == X86::COND_S ? X86::COND_NE : X86::COND_E;
46599 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
46606 if (CC != X86::COND_E && CC != X86::COND_NE)
46616 bool needOppositeCond = (CC == X86::COND_E);
46660 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
46665 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
46667 CC = X86::GetOppositeBranchCondition(CC);
46703 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
46705 CC = X86::GetOppositeBranchCondition(CC);
46717 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
46718 X86::CondCode &CC1, SDValue &Flags,
46749 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
46750 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
46775 if (CarryCC == X86::COND_B)
46777 if (CarryCC == X86::COND_A) {
46796 if (CarryCC == X86::COND_E &&
46817 static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
46837 X86::CondCode InvCC;
46839 case X86::COND_B:
46841 InvCC = X86::COND_E;
46843 case X86::COND_AE:
46845 InvCC = X86::COND_NE;
46847 case X86::COND_E:
46849 InvCC = X86::COND_B;
46851 case X86::COND_NE:
46853 InvCC = X86::COND_AE;
46855 case X86::COND_A:
46856 case X86::COND_BE:
46861 InvCC = X86::COND_INVALID;
46865 if (InvCC != X86::COND_INVALID) {
46872 if (CC == X86::COND_B || CC == X86::COND_AE) {
46885 if (CC == X86::COND_E || CC == X86::COND_NE) {
46888 CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
46906 CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
46959 if (CC == X86::COND_E && OpVT.is128BitVector() && Subtarget.hasAVX()) {
46981 static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
46986 if (!(CC == X86::COND_E || CC == X86::COND_NE))
47204 CC = IsAnyOf ? CC : (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
47216 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
47219 if (CC == X86::COND_B)
47238 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
47246 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
47254 // We can't always do this as FCMOV only supports a subset of X86 cond.
47274 CC = X86::GetOppositeBranchCondition(CC);
47373 if (CC == X86::COND_NE &&
47375 CC = X86::GetOppositeBranchCondition(CC);
47379 if (CC == X86::COND_E &&
47395 if (CC == X86::COND_AE && isOneConstant(FalseOp) &&
47431 if (CC == X86::COND_NE) {
47433 X86::CondCode CC0, CC1;
47438 CC0 = X86::GetOppositeBranchCondition(CC0);
47439 CC1 = X86::GetOppositeBranchCondition(CC1);
47456 if ((CC == X86::COND_NE || CC == X86::COND_E) &&
47461 if (CC == X86::COND_E)
47480 DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
48047 // TODO: This is X86 specific because we want to be able to handle wide types
48197 // sexts in X86 are MOVs. The MOVs have the same code size
48927 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
48928 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
48930 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
48931 X86::CondCode tmp = cc0;
48936 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
48937 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
48940 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
49427 if (!X86::isConstantSplat(Op1, SplatVal, false) || !SplatVal.isMask())
49473 // This function recognizes cases where X86 bzhi instruction can replace and
49737 if (static_cast<X86::CondCode>(BrCond->getConstantOperandVal(CondNo)) !=
49738 X86::COND_NE)
49748 X86::CondCode CC =
49749 static_cast<X86::CondCode>(CCN->getAsAPIntVal().getSExtValue());
49750 X86::CondCode OppositeCC = X86::GetOppositeBranchCondition(CC);
49813 X86::CondCode CC0 =
49814 static_cast<X86::CondCode>(SetCC0.getConstantOperandVal(0));
49816 if (CC0 == X86::COND_P || CC0 == X86::COND_NP)
49825 IsOR ? DAG.getTargetConstant(X86::GetOppositeBranchCondition(CC0),
49829 X86::CondCode CC1 =
49830 static_cast<X86::CondCode>(CC1N->getAsAPIntVal().getSExtValue());
49831 X86::CondCode OppositeCC1 = X86::GetOppositeBranchCondition(CC1);
49832 X86::CondCode CFlagsCC = IsOR ? CC1 : OppositeCC1;
49835 X86::getCCMPCondFlagsFromCondCode(CFlagsCC), DL, MVT::i8);
49993 X86::CondCode X86CC = X86::COND_B;
49997 X86CC = X86::COND_AE;
50007 X86CC = X86CC == X86::COND_AE ? X86::COND_B : X86::COND_AE;
50102 X86::MaxShuffleCombineDepth,
50282 // The result of the shift is true or false, and on X86, the 32-bit
50317 X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
50440 X86::CondCode CC;
50443 CC = (X86::CondCode)Y.getConstantOperandVal(0);
50457 if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnes()) ||
50458 (IsSub && CC == X86::COND_B && ConstantX->isZero())) {
50463 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
50467 if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnes()) ||
50468 (IsSub && CC == X86::COND_A && ConstantX->isZero())) {
50480 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
50486 if (CC == X86::COND_B) {
50497 if (CC == X86::COND_A) {
50517 if (CC == X86::COND_AE) {
50525 if (CC == X86::COND_BE) {
50547 if (CC != X86::COND_E && CC != X86::COND_NE)
50551 !X86::isZeroNode(EFLAGS.getOperand(1)) ||
50565 if ((IsSub && CC == X86::COND_NE && ConstantX->isZero()) ||
50566 (!IsSub && CC == X86::COND_E && ConstantX->isAllOnes())) {
50571 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
50579 if ((IsSub && CC == X86::COND_E && ConstantX->isZero()) ||
50580 (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnes())) {
50585 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
50600 if (CC == X86::COND_NE)
50761 X86::CondCode CCode = (X86::CondCode)Cond.getConstantOperandVal(0);
50762 CCode = X86::GetOppositeBranchCondition(CCode);
52367 // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
52393 // TODO: This is X86 specific because we want to be able to handle wide types
52979 X86::CondCode NewCC = X86::GetOppositeBranchCondition(
52980 X86::CondCode(LHS->getConstantOperandVal(0)));
54220 X86::CondCode X86CC;
54781 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
54796 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
55090 X86::CondCode CC;
55097 CC = (X86::CondCode)User->getConstantOperandVal(0);
55101 CC = (X86::CondCode)User->getConstantOperandVal(2);
55108 case X86::COND_A: case X86::COND_AE:
55109 case X86::COND_B: case X86::COND_BE:
55110 case X86::COND_O: case X86::COND_NO:
55111 case X86::COND_G: case X86::COND_GE:
55112 case X86::COND_L: case X86::COND_LE:
55140 X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
55141 if (CC != X86::COND_E && CC != X86::COND_NE)
55282 // Use a X86 specific opcode to avoid DAG combine messing with it.
55386 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), CarryIn),
55739 if (X86::mayFoldLoad(OtherOp, Subtarget))
55835 X86::isZeroNode(Op0.getOperand(1))) {
55855 X86::CondCode CC = (X86::CondCode)N1.getConstantOperandVal(2);
55856 if (CC != X86::COND_S && CC != X86::COND_NS)
55899 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
55900 X86::CondCode NewCC = X86::GetOppositeBranchCondition(CC);
55917 if (N->getConstantOperandVal(3) != X86::COND_NE)
55926 if (!X86::isZeroNode(Sub.getOperand(0)) || SetCC.getOpcode() != X86ISD::SETCC)
55955 // X86 can't encode an immediate LHS of a sub. See if we can push the
55980 X86::isZeroNode(Op1.getOperand(1))) {
55989 !(X86::isZeroNode(Op0) && X86::isZeroNode(Op1.getOperand(1)))) {
56106 X86::mayFoldLoadIntoBroadcastFromMem(Op0.getOperand(0),
56117 X86::mayFoldLoad(Op0.getOperand(0), Subtarget))) &&
56138 !X86::mayFoldLoad(Op0.getOperand(0), Subtarget))
58090 if (X86::mayFoldLoad(N0, Subtarget) && IsFoldableRMW(N0, Op))
58105 if (X86::mayFoldLoad(N1, Subtarget) &&
58109 if (X86::mayFoldLoad(N0, Subtarget) &&
58124 // X86 Inline Assembly Support
58238 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
58239 X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
58240 .Case("{@cca}", X86::COND_A)
58241 .Case("{@ccae}", X86::COND_AE)
58242 .Case("{@ccb}", X86::COND_B)
58243 .Case("{@ccbe}", X86::COND_BE)
58244 .Case("{@ccc}", X86::COND_B)
58245 .Case("{@cce}", X86::COND_E)
58246 .Case("{@ccz}", X86::COND_E)
58247 .Case("{@ccg}", X86::COND_G)
58248 .Case("{@ccge}", X86::COND_GE)
58249 .Case("{@ccl}", X86::COND_L)
58250 .Case("{@ccle}", X86::COND_LE)
58251 .Case("{@ccna}", X86::COND_BE)
58252 .Case("{@ccnae}", X86::COND_B)
58253 .Case("{@ccnb}", X86::COND_AE)
58254 .Case("{@ccnbe}", X86::COND_A)
58255 .Case("{@ccnc}", X86::COND_AE)
58256 .Case("{@ccne}", X86::COND_NE)
58257 .Case("{@ccnz}", X86::COND_NE)
58258 .Case("{@ccng}", X86::COND_LE)
58259 .Case("{@ccnge}", X86::COND_L)
58260 .Case("{@ccnl}", X86::COND_GE)
58261 .Case("{@ccnle}", X86::COND_G)
58262 .Case("{@ccno}", X86::COND_NO)
58263 .Case("{@ccnp}", X86::COND_NP)
58264 .Case("{@ccns}", X86::COND_NS)
58265 .Case("{@cco}", X86::COND_O)
58266 .Case("{@ccp}", X86::COND_P)
58267 .Case("{@ccs}", X86::COND_S)
58268 .Default(X86::COND_INVALID);
58344 } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
58518 X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
58519 if (Cond == X86::COND_INVALID)
58528 Glue = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Glue);
58531 Glue = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
58704 return RC.hasSuperClassEq(&X86::GR8RegClass) ||
58705 RC.hasSuperClassEq(&X86::GR16RegClass) ||
58706 RC.hasSuperClassEq(&X86::GR32RegClass) ||
58707 RC.hasSuperClassEq(&X86::GR64RegClass) ||
58708 RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
58714 return RC.hasSuperClassEq(&X86::FR16XRegClass) ||
58715 RC.hasSuperClassEq(&X86::FR32XRegClass) ||
58716 RC.hasSuperClassEq(&X86::FR64XRegClass) ||
58717 RC.hasSuperClassEq(&X86::VR128XRegClass) ||
58718 RC.hasSuperClassEq(&X86::VR256XRegClass) ||
58719 RC.hasSuperClassEq(&X86::VR512RegClass);
58725 return RC.hasSuperClassEq(&X86::VK1RegClass) ||
58726 RC.hasSuperClassEq(&X86::VK2RegClass) ||
58727 RC.hasSuperClassEq(&X86::VK4RegClass) ||
58728 RC.hasSuperClassEq(&X86::VK8RegClass) ||
58729 RC.hasSuperClassEq(&X86::VK16RegClass) ||
58730 RC.hasSuperClassEq(&X86::VK32RegClass) ||
58731 RC.hasSuperClassEq(&X86::VK64RegClass);
58751 return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
58754 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
58762 return std::make_pair(0U, &X86::VK1RegClass);
58764 return std::make_pair(0U, &X86::VK8RegClass);
58766 return std::make_pair(0U, &X86::VK16RegClass);
58770 return std::make_pair(0U, &X86::VK32RegClass);
58772 return std::make_pair(0U, &X86::VK64RegClass);
58779 ? &X86::GR8RegClass
58780 : &X86::GR8_NOREX2RegClass);
58783 ? &X86::GR16RegClass
58784 : &X86::GR16_NOREX2RegClass);
58787 ? &X86::GR32RegClass
58788 : &X86::GR32_NOREX2RegClass);
58791 ? &X86::GR64RegClass
58792 : &X86::GR64_NOREX2RegClass);
58799 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
58801 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
58804 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
58806 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
58812 ? &X86::GR8RegClass
58813 : &X86::GR8_NOREX2RegClass);
58816 ? &X86::GR16RegClass
58817 : &X86::GR16_NOREX2RegClass);
58821 ? &X86::GR32RegClass
58822 : &X86::GR32_NOREX2RegClass);
58825 ? &X86::GR64RegClass
58826 : &X86::GR64_NOREX2RegClass);
58830 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
58832 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
58835 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
58837 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
58843 return std::make_pair(0U, &X86::RFP32RegClass);
58845 return std::make_pair(0U, &X86::RFP64RegClass);
58847 return std::make_pair(0U, &X86::RFP80RegClass);
58851 return std::make_pair(0U, &X86::VR64RegClass);
58862 return std::make_pair(0U, &X86::FR16XRegClass);
58867 return std::make_pair(0U, &X86::FR32XRegClass);
58868 return std::make_pair(0U, &X86::FR32RegClass);
58872 return std::make_pair(0U, &X86::FR64XRegClass);
58873 return std::make_pair(0U, &X86::FR64RegClass);
58877 return std::make_pair(0U, &X86::VR128XRegClass);
58878 return std::make_pair(0U, &X86::VR128RegClass);
58886 return std::make_pair(0U, &X86::VR128XRegClass);
58887 return std::make_pair(0U, &X86::VR128RegClass);
58892 return std::make_pair(0U, &X86::VR128XRegClass);
58893 return std::make_pair(0U, &X86::VR128RegClass);
58902 return std::make_pair(0U, &X86::VR128XRegClass);
58903 return std::make_pair(0U, &X86::VR128RegClass);
58909 return std::make_pair(0U, &X86::VR256XRegClass);
58910 return std::make_pair(0U, &X86::VR256RegClass);
58915 return std::make_pair(0U, &X86::VR256XRegClass);
58916 return std::make_pair(0U, &X86::VR256RegClass);
58924 return std::make_pair(0U, &X86::VR256XRegClass);
58926 return std::make_pair(0U, &X86::VR256RegClass);
58932 return std::make_pair(0U, &X86::VR512RegClass);
58933 return std::make_pair(0U, &X86::VR512_0_15RegClass);
58938 return std::make_pair(0U, &X86::VR512RegClass);
58939 return std::make_pair(0U, &X86::VR512_0_15RegClass);
58948 return std::make_pair(0U, &X86::VR512RegClass);
58949 return std::make_pair(0U, &X86::VR512_0_15RegClass);
58963 return std::make_pair(0U, &X86::VR64RegClass);
58972 return std::make_pair(X86::XMM0, &X86::FR16XRegClass);
58975 return std::make_pair(X86::XMM0, &X86::FR32RegClass);
58978 return std::make_pair(X86::XMM0, &X86::FR64RegClass);
58982 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
58986 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
58994 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
58999 return std::make_pair(X86::YMM0, &X86::VR256RegClass);
59003 return std::make_pair(X86::YMM0, &X86::VR256RegClass);
59011 return std::make_pair(X86::YMM0, &X86::VR256RegClass);
59016 return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
59020 return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
59028 return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
59036 return std::make_pair(0U, &X86::VK1WMRegClass);
59038 return std::make_pair(0U, &X86::VK8WMRegClass);
59040 return std::make_pair(0U, &X86::VK16WMRegClass);
59044 return std::make_pair(0U, &X86::VK32WMRegClass);
59046 return std::make_pair(0U, &X86::VK64WMRegClass);
59056 return std::make_pair(0U, &X86::GR8_NOREX2RegClass);
59058 return std::make_pair(0U, &X86::GR16_NOREX2RegClass);
59060 return std::make_pair(0U, &X86::GR32_NOREX2RegClass);
59062 return std::make_pair(0U, &X86::GR64_NOREX2RegClass);
59066 return std::make_pair(0U, &X86::GR8RegClass);
59068 return std::make_pair(0U, &X86::GR16RegClass);
59070 return std::make_pair(0U, &X86::GR32RegClass);
59072 return std::make_pair(0U, &X86::GR64RegClass);
59077 if (parseConstraintCode(Constraint) != X86::COND_INVALID)
59078 return std::make_pair(0U, &X86::GR32RegClass);
59099 return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
59100 return std::make_pair(X86::FP0 + Constraint[4] - '0',
59101 &X86::RFP80RegClass);
59106 return std::make_pair(X86::FP0, &X86::RFP80RegClass);
59111 return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
59117 return std::make_pair(X86::DF, &X86::DFCCRRegClass);
59122 return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
59166 Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
59167 : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
59168 : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
59169 : /*Size == 64*/ (is64Bit ? &X86::GR64RegClass : nullptr);
59174 case X86::RAX:
59175 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
59176 case X86::RDX:
59177 return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
59178 case X86::RCX:
59179 return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
59180 case X86::RBX:
59181 return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
59182 case X86::RSI:
59183 return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
59184 case X86::RDI:
59185 return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
59186 case X86::RBP:
59187 return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
59206 Res.second = &X86::FR16XRegClass;
59208 Res.second = &X86::FR32XRegClass;
59210 Res.second = &X86::FR64XRegClass;
59211 else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
59212 Res.second = &X86::VR128XRegClass;
59213 else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
59214 Res.second = &X86::VR256XRegClass;
59215 else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
59216 Res.second = &X86::VR512RegClass;
59224 Res.second = &X86::VK1RegClass;
59226 Res.second = &X86::VK8RegClass;
59228 Res.second = &X86::VK16RegClass;
59230 Res.second = &X86::VK32RegClass;
59232 Res.second = &X86::VK64RegClass;
59278 if (X86::GR64RegClass.contains(*I))
59279 RC = &X86::GR64RegClass;
59319 case X86::CALL64m:
59320 case X86::CALL64m_NT:
59321 case X86::TAILJMPm64:
59322 case X86::TAILJMPm64_REX: {
59325 if (!TII->unfoldMemoryOperand(MF, *OrigCall, X86::R11, /*UnfoldLoad=*/true,
59345 case X86::CALL64r:
59346 case X86::CALL64r_NT:
59347 case X86::TAILJMPr64:
59348 case X86::TAILJMPr64_REX:
59353 case X86::CALL64pcrel32:
59354 case X86::TAILJMPd64:
59360 TargetReg = X86::R11;
59367 return BuildMI(MBB, MBBI, MIMetadata(*MBBI), TII->get(X86::KCFI_CHECK))