Lines Matching +full:nand +full:- +full:style
1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
35 "amdgpu-bypass-slow-div",
36 cl::desc("Skip 64-bit divide for dynamic 32-bit values"),
56 // In order for this to be a signed 24-bit value, bit 23, must
181 // There are no 64-bit extloads. These should be done as a 32-bit extload and
182 // an extension to 64-bit.
411 if (Subtarget->has16BitInsts())
478 // The hardware supports 32-bit FSHR, but not FSHL.
481 // The hardware supports 32-bit ROTR, but not ROTL.
609 // The expansion for 64-bit division is enormous.
633 const auto Flags = Op.getNode()->getFlags();
640 //===----------------------------------------------------------------------===//
642 //===----------------------------------------------------------------------===//
683 unsigned Opc = N->getOpcode();
687 SDValue BCSrc = N->getOperand(0);
699 /// \p returns true if the operation will definitely need to use a 64-bit
704 return (N->getNumOperands() > 2 && N->getOpcode() != ISD::SELECT) ||
713 return N->getValueType(0) == MVT::f32;
723 switch (N->getOpcode()) {
738 switch (N->getConstantOperandVal(0)) {
758 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
764 MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
766 assert(!N->use_empty());
768 // XXX - Should this limit number of uses to check?
769 for (const SDNode *U : N->uses()) {
786 // Round to the next multiple of 32-bits.
807 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
825 // If we are reducing to a 32-bit load or a smaller multi-dword load,
830 EVT OldVT = N->getValueType(0);
834 unsigned AS = MN->getAddressSpace();
835 // Do not shrink an aligned scalar load to sub-dword.
836 // Scalar engine cannot do sub-dword loads.
837 // TODO: Update this for GFX12 which does have scalar sub-dword loads.
838 if (OldSize >= 32 && NewSize < 32 && MN->getAlign() >= Align(4) &&
842 MN->isInvariant())) &&
843 AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand()))
846 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
877 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
878 // profitable with the expansion for 64-bit since it's generally good to
889 switch (N->getOpcode()) {
894 unsigned IntrID = N->getConstantOperandVal(0);
898 if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() ==
902 case AMDGPUISD::SETCC: // ballot-style instruction
928 return DAG.getNode(AMDGPUISD::RCP, SL, VT, NegSrc, Op->getFlags());
939 //===---------------------------------------------------------------------===//
941 //===---------------------------------------------------------------------===//
948 (Subtarget->has16BitInsts() && (VT == MVT::f16 || VT == MVT::bf16));
988 unsigned SrcSize = Source->getScalarSizeInBits();
989 unsigned DestSize = Dest->getScalarSizeInBits();
991 if (DestSize== 16 && Subtarget->has16BitInsts())
998 unsigned SrcSize = Src->getScalarSizeInBits();
999 unsigned DestSize = Dest->getScalarSizeInBits();
1001 if (SrcSize == 16 && Subtarget->has16BitInsts())
1008 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
1009 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
1010 // this will enable reducing 64-bit operations the 32-bit, which is always
1020 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
1021 // limited number of native 64-bit operations. Shrinking an operation to fit
1022 // in a single 32-bit register should always be helpful. As currently used,
1024 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
1031 assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
1032 N->getOpcode() == ISD::SRL) &&
1034 // Always commute pre-type legalization and right shifts.
1037 N->getOpcode() != ISD::SHL || N->getOperand(0).getOpcode() != ISD::OR)
1040 // If only user is a i32 right-shift, then don't destroy a BFE pattern.
1041 if (N->getValueType(0) == MVT::i32 && N->use_size() == 1 &&
1042 (N->use_begin()->getOpcode() == ISD::SRA ||
1043 N->use_begin()->getOpcode() == ISD::SRL))
1053 return LHS0 && LHS1 && RHSLd && LHS0->getExtensionType() == ISD::ZEXTLOAD &&
1054 LHS1->getAPIntValue() == LHS0->getMemoryVT().getScalarSizeInBits() &&
1055 RHSLd->getExtensionType() == ISD::ZEXTLOAD;
1057 SDValue LHS = N->getOperand(0).getOperand(0);
1058 SDValue RHS = N->getOperand(0).getOperand(1);
1062 //===---------------------------------------------------------------------===//
1064 //===---------------------------------------------------------------------===//
1151 LLVMContext &Ctx = Fn.getParent()->getContext();
1175 // to get accurate in-memory offsets. The "PartOffset" is completely useless
1208 // all the floating-point vector types.
1276 //===---------------------------------------------------------------------===//
1278 //===---------------------------------------------------------------------===//
1297 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
1305 for (SDNode *U : DAG.getEntryNode().getNode()->uses()) {
1307 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
1308 if (FI->getIndex() < 0) {
1309 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
1311 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
1336 FuncName = G->getSymbol();
1338 FuncName = G->getGlobal()->getName();
1342 DAG.getContext()->diagnose(NoCalls);
1363 DAG.getContext()->diagnose(NoDynamicAlloca);
1372 Op->print(errs(), &DAG);
1419 switch (N->getOpcode()) {
1462 const GlobalValue *GV = G->getGlobal();
1464 if (!MFI->isModuleEntryFunction()) {
1471 if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1472 G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) {
1473 if (!MFI->isModuleEntryFunction() &&
1474 GV->getName() != "llvm.amdgcn.module.lds") {
1478 Fn, "local memory global used by non-kernel function",
1480 DAG.getContext()->diagnose(BadLDSDecl);
1494 // XXX: What does the value of G->getOffset() mean?
1495 assert(G->getOffset() == 0 &&
1496 "Do not know what to do with an non-zero offset");
1501 unsigned Offset = MFI->allocateLDSGlobal(DL, *cast<GlobalVariable>(GV));
1520 for (const SDUse &U : Op->ops()) {
1536 for (const SDUse &U : Op->ops())
1555 // Extract 32-bit registers at a time.
1599 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1684 // select (fcmp olt (lhs, K)), (fneg lhs), -K
1685 // -> fneg (fmin_legacy lhs, K)
1689 APFloat NegRHS = neg(CRHS->getValueAPF());
1690 if (NegRHS == CFalse->getValueAPF()) {
1735 // otherwise be a 1-vector.
1743 HiVT = NumElts - LoNumElts == 1
1745 : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts);
1782 SDValue BasePtr = Load->getBasePtr();
1783 EVT MemVT = Load->getMemoryVT();
1785 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1796 Align BaseAlign = Load->getAlign();
1799 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1800 Load->getChain(), BasePtr, SrcValue, LoMemVT,
1801 BaseAlign, Load->getMemOperand()->getFlags());
1804 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1806 HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1831 SDValue BasePtr = Load->getBasePtr();
1832 EVT MemVT = Load->getMemoryVT();
1834 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1835 Align BaseAlign = Load->getAlign();
1838 // Widen from vec3 to vec4 when the load is at least 8-byte aligned
1839 // or 16-byte fully dereferenceable. Otherwise, split the vector load.
1852 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1853 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1864 SDValue Val = Store->getValue();
1872 EVT MemVT = Store->getMemoryVT();
1873 SDValue Chain = Store->getChain();
1874 SDValue BasePtr = Store->getBasePtr();
1887 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1888 Align BaseAlign = Store->getAlign();
1894 Store->getMemOperand()->getFlags());
1897 HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1902 // This is a shortcut for integer division because we have fast i32<->f32
1904 // float is enough to accurately represent up to a 24-bit signed integer.
1924 unsigned DivBits = BitSize - SignBits;
1937 // jq = jq >> (bitsize - 2)
1939 DAG.getConstant(BitSize - 2, DL, VT));
1963 // float fqneg = -fq;
1969 if (Subtarget->isGCN()) {
1972 MFI->getMode().FP32Denormals != DenormalMode::getPreserveSign();
1976 unsigned OpCode = !Subtarget->hasMadMacF32Insts() ? (unsigned)ISD::FMA
2012 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
2065 !Subtarget->hasMadMacF32Insts() ? (unsigned)ISD::FMA
2066 : MFI->getMode().FP32Denormals == DenormalMode::getPreserveSign()
2094 // First round of UNR (Unsigned integer Newton-Raphson).
2206 const unsigned bitPos = halfBitWidth - i - 1;
2298 SDValue NegOne = DAG.getConstant(-1, DL, VT);
2349 // (frem x, y) -> (fma (fneg (ftrunc (fdiv x, y))), y, x)
2353 auto Flags = Op->getFlags();
2385 // TODO: Should this propagate fast-math-flags?
2396 DAG.getConstant(FractBits - 32, SL, MVT::i32),
2424 // Extend back to 64-bits.
2430 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2439 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2461 // TODO: Should this propagate fast-math-flags?
2493 // XXX - May require not supporting f32 denormals?
2505 // TODO: Should this propagate fast-math-flags?
2531 // result += -1.0.
2536 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2546 // TODO: Should this propagate fast-math-flags?
2654 // log2 = amdgpu_log2 - (is_denormal ? 32.0 : 0.0)
2659 SDNodeFlags Flags = Op->getFlags();
2663 assert(!Subtarget->has16BitInsts());
2694 SDNodeFlags Flags = Op->getFlags();
2704 if (VT == MVT::f16 && !Subtarget->has16BitInsts()) {
2710 if (VT == MVT::f16 && !Subtarget->has16BitInsts()) {
2725 if (Subtarget->hasFastFMAF32()) {
2727 const float c_log10 = 0x1.344134p-2f;
2728 const float cc_log10 = 0x1.09f79ep-26f;
2731 const float c_log = 0x1.62e42ep-1f;
2732 const float cc_log = 0x1.efa39ep-25f;
2744 const float ch_log10 = 0x1.344000p-2f;
2745 const float ct_log10 = 0x1.3509f6p-18f;
2748 const float ch_log = 0x1.62e000p-1f;
2749 const float ct_log = 0x1.0bfbe8p-15f;
2808 DAG.getConstantFP(-32.0 * Log2BaseInverted, SL, VT);
2817 if (Subtarget->hasFastFMAF32())
2839 SDNodeFlags Flags = Op->getFlags();
2843 assert(!Subtarget->has16BitInsts());
2855 // bool needs_scaling = x < -0x1.f80000p+6f;
2856 // v_exp_f32(x + (s ? 0x1.0p+6f : 0.0f)) * (s ? 0x1.0p-64f : 1.0f);
2858 // -nextafter(128.0, -1)
2859 SDValue RangeCheckConst = DAG.getConstantFP(-0x1.f80000p+6f, SL, VT);
2875 SDValue TwoExpNeg64 = DAG.getConstantFP(0x1.0p-64f, SL, VT);
2899 SDValue Threshold = DAG.getConstantFP(-0x1.5d58a0p+6f, SL, VT);
2913 SDValue ResultScaleFactor = DAG.getConstantFP(0x1.969d48p-93f, SL, VT);
2921 /// Emit approx-funcs appropriate lowering for exp10. inf/nan should still be
2930 // exp2(x * 0x1.a92000p+1f) * exp2(x * 0x1.4f0978p-11f);
2932 SDValue K1 = DAG.getConstantFP(0x1.4f0978p-11f, SL, VT);
2941 // bool s = x < -0x1.2f7030p+5f;
2944 // exp2(x * 0x1.4f0978p-11f) *
2945 // (s ? 0x1.9f623ep-107f : 1.0f);
2949 SDValue Threshold = DAG.getConstantFP(-0x1.2f7030p+5f, SL, VT);
2958 SDValue K1 = DAG.getConstantFP(0x1.4f0978p-11f, SL, VT);
2967 SDValue ResultScaleFactor = DAG.getConstantFP(0x1.9f623ep-107f, SL, VT);
2979 SDNodeFlags Flags = Op->getFlags();
2990 // exp(f16 x) ->
3003 // library behavior. Also, is known-not-daz source sufficient?
3020 // f = x*(64/ln(2)) - n
3021 // r = f*(ln(2)/64) = x - n*(ln(2)/64)
3037 if (Subtarget->hasFastFMAF32()) {
3039 const float cc_exp = 0x1.4ae0bep-26f; // c+cc are 49 bits
3041 const float cc_exp10 = 0x1.2f346ep-24f;
3052 const float cl_exp = 0x1.47652ap-12f; // ch + cl are 36 bits
3055 const float cl_exp10 = 0x1.4f0978p-11f;
3085 DAG.getConstantFP(IsExp10 ? -0x1.66d3e8p+5f : -0x1.9d1da0p+6f, SL, VT);
3130 SDValue NumExtBits = DAG.getConstant(32u - NumBits, SL, MVT::i32);
3156 bool Is64BitScalar = !Src->isDivergent() && Src.getValueType() == MVT::i64;
3159 // (ctlz hi:lo) -> (umin (ffbh src), 32)
3160 // (cttz hi:lo) -> (umin (ffbl src), 32)
3161 // (ctlz_zero_undef src) -> (ffbh src)
3162 // (cttz_zero_undef src) -> (ffbl src)
3164 // 64-bit scalar version produce 32-bit result
3165 // (ctlz hi:lo) -> (umin (S_FLBIT_I32_B64 src), 64)
3166 // (cttz hi:lo) -> (umin (S_FF1_I32_B64 src), 64)
3167 // (ctlz_zero_undef src) -> (S_FLBIT_I32_B64 src)
3168 // (cttz_zero_undef src) -> (S_FF1_I32_B64 src)
3184 // (ctlz hi:lo) -> (umin3 (ffbh hi), (uaddsat (ffbh lo), 32), 64)
3185 // (cttz hi:lo) -> (umin3 (uaddsat (ffbl hi), 32), (ffbl lo), 64)
3186 // (ctlz_zero_undef hi:lo) -> (umin (ffbh hi), (add (ffbh lo), 32))
3187 // (cttz_zero_undef hi:lo) -> (umin (add (ffbl hi), 32), (ffbl lo))
3208 // The regular method converting a 64-bit integer to float roughly consists of
3210 // conversion from a 64-bit integer to a float is essentially the same as the
3211 // one from a 32-bit integer. The only difference is that it has more
3212 // trailing bits to be rounded. To leverage the native 32-bit conversion, a
3213 // 64-bit integer could be preprocessed and fit into a 32-bit integer then
3221 // // reduced to a 32-bit one automatically.
3226 // // convert it as a 32-bit integer and scale the result back.
3227 // return uitofp(hi) * 2^(32 - shamt);
3241 if (Signed && Subtarget->isGCN()) {
3243 // i.e. Hi is 0 or -1. However, that only needs to take the MSB into
3245 // - 32 if Lo and Hi have opposite signs;
3246 // - 33 if Lo and Hi have the same sign.
3251 // - -1 if Lo and Hi have opposite signs; and
3252 // - 0 otherwise.
3256 // umin(sffbh(Hi), 33 + (Lo^Hi)>>31) - 1.
3260 // umin(sffbh(Hi) - 1, 32 + (Lo^Hi)>>31).
3291 // Normalize the given 64-bit integer.
3299 // Get the 32-bit normalized integer.
3301 // Convert the normalized 32-bit integer into f32.
3303 (Signed && Subtarget->isGCN()) ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
3307 // 64-bit integer is converted as a 32-bit one.
3311 if (Subtarget->isGCN())
3315 // part directly to emulate the multiplication of 2^ShAmt. That 8-bit
3347 // TODO: Should this propagate fast-math-flags?
3378 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
3426 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
3455 // The basic idea of converting a floating point number into a pair of 32-bit
3459 // hif := floor(tf * 2^-32);
3460 // lof := tf - hif * 2^32; // lof is always positive due to floor.
3467 // However, a 32-bit floating point number has only 23 bits mantissa and
3481 llvm::bit_cast<double>(UINT64_C(/*2^-32*/ 0x3df0000000000000)), SL,
3484 llvm::bit_cast<double>(UINT64_C(/*-2^32*/ 0xc1f0000000000000)), SL,
3488 llvm::bit_cast<float>(UINT32_C(/*2^-32*/ 0x2f800000)), SL, SrcVT);
3490 llvm::bit_cast<float>(UINT32_C(/*-2^32*/ 0xcf800000)), SL, SrcVT);
3492 // TODO: Should this propagate fast-math-flags?
3512 // r := xor(r, sign) - sign;
3536 // f64 -> f16 conversion using round-to-nearest-even rounding mode.
3554 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
3578 // B = clamp(1-E, 0, 13);
3666 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3687 //===----------------------------------------------------------------------===//
3689 //===----------------------------------------------------------------------===//
3697 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
3698 // as unsigned 24-bit values.
3706 bool IsIntrin = Node24->getOpcode() == ISD::INTRINSIC_WO_CHAIN;
3708 SDValue LHS = IsIntrin ? Node24->getOperand(1) : Node24->getOperand(0);
3709 SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1);
3710 unsigned NewOpcode = Node24->getOpcode();
3712 unsigned IID = Node24->getConstantOperandVal(0);
3727 llvm_unreachable("Expected 24-bit mul intrinsic");
3739 return DAG.getNode(NewOpcode, SDLoc(Node24), Node24->getVTList(),
3757 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
3758 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
3766 for (SDNode *U : Val->uses()) {
3768 if (M->isVolatile())
3803 if (!LN->isSimple() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
3808 EVT VT = LN->getMemoryVT();
3811 Align Alignment = LN->getAlign();
3814 unsigned AS = LN->getAddressSpace();
3820 VT, AS, Alignment, LN->getMemOperand()->getFlags(), &IsFast)) {
3840 = DAG.getLoad(NewVT, SL, LN->getChain(),
3841 LN->getBasePtr(), LN->getMemOperand());
3856 if (!SN->isSimple() || !ISD::isNormalStore(SN))
3859 EVT VT = SN->getMemoryVT();
3864 Align Alignment = SN->getAlign();
3867 unsigned AS = SN->getAddressSpace();
3874 VT, AS, Alignment, SN->getMemOperand()->getFlags(), &IsFast)) {
3889 SDValue Val = SN->getValue();
3900 return DAG.getStore(SN->getChain(), SL, CastVal,
3901 SN->getBasePtr(), SN->getMemOperand());
3910 SDValue N0 = N->getOperand(0);
3912 // (vt2 (assertzext (truncate vt0:x), vt1)) ->
3915 SDValue N1 = N->getOperand(1);
3916 EVT ExtVT = cast<VTSDNode>(N1)->getVT();
3922 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
3923 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
3932 unsigned IID = N->getConstantOperandVal(0);
3945 SDValue Src = N->getOperand(1);
3949 // frexp_exp (fneg x) -> frexp_exp x
3950 // frexp_exp (fabs x) -> frexp_exp x
3951 // frexp_exp (fneg (fabs x)) -> frexp_exp x
3952 SDValue Src = N->getOperand(1);
3956 return SDValue(DCI.DAG.UpdateNodeOperands(N, N->getOperand(0), PeekSign),
3964 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3980 // Re-visit the ands. It's possible we eliminated one of them and it could
3991 EVT VT = N->getValueType(0);
3993 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3997 SDValue LHS = N->getOperand(0);
3998 unsigned RHSVal = RHS->getZExtValue();
4005 switch (LHS->getOpcode()) {
4011 SDValue X = LHS->getOperand(0);
4016 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
4018 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
4038 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
4040 // On some subtargets, 64-bit shift is a quarter rate instruction. In the
4041 // common case, splitting this into a move and a 32-bit shift is faster and
4046 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
4059 if (N->getValueType(0) != MVT::i64)
4062 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
4068 unsigned RHSVal = RHS->getZExtValue();
4070 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
4072 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
4080 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
4082 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
4094 auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
4098 EVT VT = N->getValueType(0);
4099 SDValue LHS = N->getOperand(0);
4100 unsigned ShiftAmt = RHS->getZExtValue();
4104 // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
4109 if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) &&
4113 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)),
4114 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1)));
4127 // build_pair (srl hi_32(x), C - 32), 0
4132 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
4144 EVT VT = N->getValueType(0);
4145 SDValue Src = N->getOperand(0);
4147 // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x)
4166 // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
4169 if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) {
4186 // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit.
4188 // i16 (trunc (srl i64:x, K)), K <= 16 ->
4199 // - For left shifts, do the transform as long as the shift
4201 // - For right shift, do it if ShiftAmt <= (32 - Size) to avoid
4204 (Src.getOpcode() == ISD::SHL) ? 31 : (32 - VT.getScalarSizeInBits());
4253 if (V->getOpcode() != ISD::ADD)
4256 return isOneConstant(V->getOperand(1)) ? V->getOperand(0) : SDValue();
4261 assert(N->getOpcode() == ISD::MUL);
4262 EVT VT = N->getValueType(0);
4264 // Don't generate 24-bit multiplies on values that are in SGPRs, since
4265 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
4268 if (!N->isDivergent())
4278 SDValue N0 = N->getOperand(0);
4279 SDValue N1 = N->getOperand(1);
4281 // Undo InstCombine canonicalize X * (Y + 1) -> X * Y + X to enable mad
4284 // mul x, (add y, 1) -> add (mul x, y), x
4285 auto IsFoldableAdd = [](SDValue V) -> SDValue {
4290 if (V.hasOneUse() || all_of(V->uses(), [](const SDNode *U) -> bool {
4291 return U->getOpcode() == ISD::MUL;
4301 SDValue MulVal = DAG.getNode(N->getOpcode(), DL, VT, N1, MulOper);
4306 SDValue MulVal = DAG.getNode(N->getOpcode(), DL, VT, N0, MulOper);
4311 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
4326 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
4330 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
4339 // for signed multiply of 8 and 16-bit types.
4346 if (N->getValueType(0) != MVT::i32)
4352 bool Signed = N->getOpcode() == ISD::SMUL_LOHI;
4353 SDValue N0 = N->getOperand(0);
4354 SDValue N1 = N->getOperand(1);
4365 // Try to use two fast 24-bit multiplies (one for each half of the result)
4370 if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
4377 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
4395 EVT VT = N->getValueType(0);
4397 if (!Subtarget->hasMulI24() || VT.isVector())
4400 // Don't generate 24-bit multiplies on values that are in SGPRs, since
4401 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
4406 if (Subtarget->hasSMulHi() && !N->isDivergent())
4412 SDValue N0 = N->getOperand(0);
4413 SDValue N1 = N->getOperand(1);
4428 EVT VT = N->getValueType(0);
4430 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
4433 // Don't generate 24-bit multiplies on values that are in SGPRs, since
4434 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
4439 if (Subtarget->hasSMulHi() && !N->isDivergent())
4445 SDValue N0 = N->getOperand(0);
4446 SDValue N1 = N->getOperand(1);
4465 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
4479 // The native instructions return -1 on 0 input. Optimize out a select that
4480 // produces -1 on 0.
4493 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
4496 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
4497 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x
4506 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
4507 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x
4537 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
4538 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
4540 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
4541 // select c, (fabs x), +k -> fabs (select c, x, k)
4572 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
4588 if (LHS.getOpcode() == ISD::FABS && CRHS->isNegative())
4625 SDValue Cond = N->getOperand(0);
4629 EVT VT = N->getValueType(0);
4634 SDValue True = N->getOperand(1);
4635 SDValue False = N->getOperand(2);
4643 // select (setcc x, y), k, x -> select (setccinv x, y), x, k
4647 getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), LHS.getValueType());
4653 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
4680 if (C->isZero())
4681 return C->isNegative() ? NegatibleCost::Cheaper : NegatibleCost::Expensive;
4683 if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF()))
4684 return C->isNegative() ? NegatibleCost::Cheaper : NegatibleCost::Expensive;
4748 SDValue N0 = N->getOperand(0);
4749 EVT VT = N->getValueType(0);
4762 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
4776 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
4785 // (fneg (fmul x, y)) -> (fmul x, (fneg y))
4786 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
4797 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
4810 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
4842 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
4843 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
4844 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
4845 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
4859 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
4869 Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags());
4871 SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags());
4879 for (SDNode *U : Neg->uses())
4888 case ISD::FNEARBYINT: // XXX - Should fround be handled?
4898 // (fneg (fp_extend (fneg x))) -> (fp_extend x)
4899 // (fneg (rcp (fneg x))) -> (rcp x)
4906 // (fneg (fp_extend x)) -> (fp_extend (fneg x))
4907 // (fneg (rcp x)) -> (rcp (fneg x))
4909 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
4915 // (fneg (fp_round (fneg x))) -> (fp_round x)
4923 // (fneg (fp_round x)) -> (fp_round (fneg x))
4928 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
4936 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
4939 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
4942 // fneg (select c, a, b) -> select c, (fneg a), (fneg b)
4950 SDValue HighBits = BCSrc.getOperand(BCSrc.getNumOperands() - 1);
4960 // fneg (f64 (bitcast (build_vector x, y))) ->
4969 SmallVector<SDValue, 8> Ops(BCSrc->op_begin(), BCSrc->op_end());
4983 // fneg (bitcast (f32 (select cond, i32:lhs, i32:rhs))) ->
5010 SDValue N0 = N->getOperand(0);
5017 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
5022 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
5025 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
5034 const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
5038 // XXX - Should this flush denormals?
5039 const APFloat &Val = CFP->getValueAPF();
5041 return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
5049 switch(N->getOpcode()) {
5053 EVT DestVT = N->getValueType(0);
5061 SDValue Src = N->getOperand(0);
5088 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
5090 SDValue Src = N->getOperand(0);
5093 uint64_t CVal = C->getZExtValue();
5101 const APInt &Val = C->getValueAPF().bitcastToAPInt();
5159 assert(!N->getValueType(0).isVector() &&
5161 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
5165 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
5169 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
5173 SDValue BitsFrom = N->getOperand(0);
5174 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
5176 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
5180 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
5204 CVal->getSExtValue(),
5211 CVal->getZExtValue(),
5218 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
5254 SDValue N0 = N->getOperand(0);
5255 SDValue N1 = N->getOperand(1);
5256 SDValue N2 = N->getOperand(2);
5257 EVT VT = N->getValueType(0);
5268 return V.isNegative() ? -Zero : Zero;
5273 APFloat V0 = FTZ(N0CFP->getValueAPF());
5274 APFloat V1 = FTZ(N1CFP->getValueAPF());
5275 APFloat V2 = FTZ(N2CFP->getValueAPF());
5287 //===----------------------------------------------------------------------===//
5289 //===----------------------------------------------------------------------===//
5355 DAG.getCopyFromReg(Chain, SL, Info->getStackPtrOffsetReg(), MVT::i32);
5385 unsigned ExplicitArgOffset = Subtarget->getExplicitKernelArgOffset();
5386 const Align Alignment = Subtarget->getAlignmentForImplicitArgPtr();
5405 return getImplicitParameterOffset(MFI->getExplicitKernArgSize(), Param);
5652 uint32_t Width = CWidth->getZExtValue() & 0x1f;
5655 Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
5663 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
5687 unsigned SignBits = 32 - MaxValBits + 1;
5716 unsigned Sel = CMask->getZExtValue();
5747 Align Alignment = GA->getGlobal()->getPointerAlignment(DAG.getDataLayout());
5782 unsigned MaxValue = Subtarget->getMaxWorkitemID(
5803 unsigned SignBits = 32 - Width->getZExtValue() + 1;
5807 // TODO: Could probably figure something out with non-0 offsets.
5814 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
5864 switch (MI->getOpcode()) {
5875 auto [Dst, Src0, Src1, Src2] = MI->getFirst4Regs();
6013 switch (RMW->getOperation()) {
6014 case AtomicRMWInst::Nand:
6021 const DataLayout &DL = RMW->getFunction()->getDataLayout();
6022 unsigned ValSize = DL.getTypeSizeInBits(RMW->getType());
6028 if (auto *IntTy = dyn_cast<IntegerType>(RMW->getType())) {
6029 unsigned Size = IntTy->getBitWidth();
6046 for (auto &Op : I->operands()) {
6048 if (any_of(Ops, [&](Use *U) { return U->get() == Op.get(); }))