Lines Matching defs:SrcTy

238   LLT SrcTy = MRI.getType(SrcReg);
239 if (SrcTy == GCDTy) {
252 LLT SrcTy = MRI.getType(SrcReg);
253 LLT GCDTy = getGCDType(getGCDType(SrcTy, NarrowTy), DstTy);
1596 LLT SrcTy = MRI.getType(SrcReg);
1597 if (SrcTy.isVector())
1602 unsigned LeftoverBits = SrcTy.getSizeInBits() - HandledSize;
1603 if (SrcTy.isVector() && LeftoverBits != 0)
1606 if (8 * StoreMI.getMemSize().getValue() != SrcTy.getSizeInBits()) {
1719 LLT SrcTy = MRI.getType(LHS);
1725 if (!extractParts(LHS, SrcTy, NarrowTy, LeftoverTy, LHSPartRegs,
1731 if (!extractParts(MI.getOperand(3).getReg(), SrcTy, NarrowTy, Unused,
2084 LLT SrcTy = MRI.getType(Src1Reg);
2086 const int SrcSize = SrcTy.getSizeInBits();
2203 LLT SrcTy = MRI.getType(SrcReg);
2204 if (SrcTy.isVector())
2212 if (WideTy.getSizeInBits() >= SrcTy.getSizeInBits()) {
2213 if (SrcTy.isPointer()) {
2215 if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace())) {
2221 SrcTy = LLT::scalar(SrcTy.getSizeInBits());
2222 SrcReg = MIRBuilder.buildPtrToInt(SrcTy, SrcReg).getReg(0);
2225 // Widen SrcTy to WideTy. This does not affect the result, but since the
2226 // user requested this size, it is probably better handled than SrcTy and
2228 if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) {
2229 SrcTy = WideTy;
2239 auto ShiftAmt = MIRBuilder.buildConstant(SrcTy, DstSize * I);
2240 auto Shr = MIRBuilder.buildLShr(SrcTy, SrcReg, ShiftAmt);
2249 LLT LCMTy = getLCMType(SrcTy, WideTy);
2252 if (LCMTy.getSizeInBits() != SrcTy.getSizeInBits()) {
2254 if (SrcTy.isPointer()) {
2327 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
2331 if (SrcTy.isVector() || DstTy.isVector())
2335 if (SrcTy.isPointer()) {
2339 if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace()))
2342 LLT SrcAsIntTy = LLT::scalar(SrcTy.getSizeInBits());
2344 SrcTy = SrcAsIntTy;
2359 LLT ShiftTy = SrcTy;
2360 if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) {
2372 if (SrcTy.isScalar()) {
2379 if (!SrcTy.isVector())
2382 if (DstTy != SrcTy.getElementType())
2385 if (Offset % SrcTy.getScalarSizeInBits() != 0)
2391 MI.getOperand(2).setImm((WideTy.getSizeInBits() / SrcTy.getSizeInBits()) *
2549 LLT SrcTy = MRI.getType(LHS);
2551 unsigned SrcBitWidth = SrcTy.getScalarSizeInBits();
3107 LLT SrcTy = MRI.getType(MI.getOperand(2).getReg());
3114 TLI.isSExtCheaperThanZExt(getApproximateEVTForLLT(SrcTy, Ctx),
3468 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
3469 if (SrcTy.isVector()) {
3470 LLT SrcEltTy = SrcTy.getElementType();
3475 int NumSrcElt = SrcTy.getNumElements();
3777 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
3778 LLT SrcScalTy = LLT::scalar(SrcTy.getSizeInBits());
3817 LLT SrcTy = MRI.getType(ShuffleMI->getReg(1));
3825 LLT NewSrcTy = SrcTy.changeElementType(CastTy.getScalarType());
3864 LLT SrcTy = MRI.getType(Src);
3866 ElementCount SrcTyEC = SrcTy.getElementCount();
3887 SrcTy = LLT::vector(SrcTyEC.divideCoefficientBy(AdjustAmt), AdjustAmt);
3888 auto CastVec = MIRBuilder.buildBitcast(SrcTy, Src);
4113 LLT SrcTy = MRI.getType(SrcReg);
4121 if (StoreWidth != StoreSizeInBits && !SrcTy.isVector()) {
4127 if (StoreSizeInBits > SrcTy.getSizeInBits()) {
4130 SrcTy = WideTy;
4133 auto ZextInReg = MIRBuilder.buildZExtInReg(SrcTy, SrcReg, StoreWidth);
4143 if (MemTy != SrcTy)
4148 return reduceLoadStoreWidth(StoreMI, 0, SrcTy.getElementType());
4171 if (SrcTy.isPointer()) {
4172 const LLT IntPtrTy = LLT::scalar(SrcTy.getSizeInBits());
4203 LLT SrcTy = MRI.getType(SrcReg);
4209 assert(SrcTy.isVector() && "Expect a vector store type");
4221 SrcTy.getElementType(), SrcReg, MIRBuilder.buildConstant(IdxTy, I));
4428 LLT SrcTy = MRI.getType(SrcReg);
4429 auto Round = MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_ROUND, {SrcTy},
4447 LLT SrcTy = MRI.getType(SrcReg);
4449 MIRBuilder.buildInstr(TargetOpcode::G_FRINT, {SrcTy}, {SrcReg});
4710 LLT SrcTy = Val.getLLTTy(MRI);
4712 std::max(getStackTemporaryAlignment(SrcTy),
4716 createStackTemporary(SrcTy.getSizeInBytes(), StackTypeAlign, PtrInfo);
4990 LLT SrcTy = MRI.getType(SrcReg);
4999 assert(SrcTy.isVector() && NarrowTy.isVector() && "Expected vector types");
5000 assert((SrcTy.getScalarType() == NarrowTy.getScalarType()) && "bad type");
5002 if ((SrcTy.getSizeInBits() % NarrowTy.getSizeInBits() != 0) ||
5006 // This is most likely DstTy (smaller then register size) packed in SrcTy
5008 // lowered to bit sequence extracts from register. Unpack SrcTy to NarrowTy
5011 // %1:_(DstTy), %2, %3, %4 = G_UNMERGE_VALUES %0:_(SrcTy)
5013 // %5:_(NarrowTy), %6 = G_UNMERGE_VALUES %0:_(SrcTy) - reg sequence
5035 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
5041 if (NarrowTy == SrcTy)
5049 assert(SrcTy.isVector() && "Expected vector types");
5050 assert((SrcTy.getScalarType() == NarrowTy.getScalarType()) && "bad type");
5052 (NarrowTy.getNumElements() >= SrcTy.getNumElements()))
5054 // %2:_(DstTy) = G_CONCAT_VECTORS %0:_(SrcTy), %1:_(SrcTy)
5056 // %3:_(EltTy), %4, %5 = G_UNMERGE_VALUES %0:_(SrcTy)
5057 // %6:_(EltTy), %7, %8 = G_UNMERGE_VALUES %1:_(SrcTy)
5087 if ((NarrowTy.getSizeInBits() % SrcTy.getSizeInBits() != 0) ||
5091 // This is most likely SrcTy (smaller then register size) packed in DstTy
5093 // lowered to bit sequence packing into register. Merge SrcTy to NarrowTy
5096 // %0:_(DstTy) = G_MERGE_VALUES %1:_(SrcTy), %2, %3, %4
5098 // %5:_(NarrowTy) = G_MERGE_VALUES %1:_(SrcTy), %2 - sequence of bits in reg
5099 // %6:_(NarrowTy) = G_MERGE_VALUES %3:_(SrcTy), %4
5103 unsigned NumSrcElts = SrcTy.isVector() ? SrcTy.getNumElements() : 1;
5471 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
5474 NarrowTy.getSizeInBits() / SrcTy.getScalarSizeInBits();
5475 LLT SrcNarrowTy = LLT::fixed_vector(NewElemCount, SrcTy.getElementType());
5640 auto [DstReg, DstTy, SrcReg, SrcTy] = RdxMI.getFirst2RegLLTs();
5643 (SrcTy.getNumElements() % NarrowTy.getNumElements() != 0))
5650 NarrowTy.isVector() ? SrcTy.getNumElements() / NarrowTy.getNumElements()
5651 : SrcTy.getNumElements();
5697 if (isPowerOf2_32(SrcTy.getNumElements()) &&
5699 return tryNarrowPow2Reduction(MI, SrcReg, SrcTy, NarrowTy, ScalarOpc);
5721 auto [DstReg, DstTy, ScalarReg, ScalarTy, SrcReg, SrcTy] =
5735 unsigned NumParts = SrcTy.getNumElements();
5749 LLT SrcTy, LLT NarrowTy,
5754 SrcTy.getNumElements() / NarrowTy.getNumElements(), SplitSrcs,
6231 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
6234 unsigned coefficient = SrcTy.getNumElements() * MoreTy.getNumElements();
6240 LLT NewTy = SrcTy.changeElementCount(
6286 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
6289 unsigned SrcNumElts = SrcTy.getNumElements();
6301 moreElementsVectorDst(MI, SrcTy, 0);
6317 auto Undef = MIRBuilder.buildUndef(SrcTy);
6594 LLT SrcTy = MRI.getType(Src);
6599 if (SrcTy.getScalarType() != LLT::scalar(16) ||
6875 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
6878 if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) {
6908 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
6911 if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) {
6941 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
6944 if (SrcTy.isScalar() && SrcTy.getSizeInBits() == 2 * NarrowSize) {
7002 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
7003 unsigned Len = SrcTy.getSizeInBits();
7005 if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {DstTy, SrcTy}})) {
7008 auto ZeroSrc = MIRBuilder.buildConstant(SrcTy, 0);
7010 CmpInst::ICMP_EQ, SrcTy.changeElementSize(1), SrcReg, ZeroSrc);
7030 auto MIBShiftAmt = MIRBuilder.buildConstant(SrcTy, 1ULL << i);
7032 SrcTy, Op, MIRBuilder.buildLShr(SrcTy, Op, MIBShiftAmt));
7049 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
7051 unsigned Len = SrcTy.getSizeInBits();
7052 if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {DstTy, SrcTy}})) {
7056 auto Zero = MIRBuilder.buildConstant(SrcTy, 0);
7068 auto MIBCstNeg1 = MIRBuilder.buildConstant(SrcTy, -1);
7069 auto MIBNot = MIRBuilder.buildXor(SrcTy, SrcReg, MIBCstNeg1);
7071 SrcTy, MIBNot, MIRBuilder.buildAdd(SrcTy, SrcReg, MIBCstNeg1));
7072 if (!isSupported({TargetOpcode::G_CTPOP, {SrcTy, SrcTy}}) &&
7073 isSupported({TargetOpcode::G_CTLZ, {SrcTy, SrcTy}})) {
7074 auto MIBCstLen = MIRBuilder.buildConstant(SrcTy, Len);
7076 MIRBuilder.buildCTLZ(SrcTy, MIBTmp));
7290 LLT SrcTy = MRI.getType(Src);
7294 uint32_t SrcTyScalarSize = SrcTy.getScalarSizeInBits();
7303 LLT MidTy = SrcTy.changeElementSize(SrcTyScalarSize * 2);
7345 LLT SrcTy = MRI.getType(SrcReg);
7349 isPowerOf2_32(SrcTy.getNumElements()) &&
7350 isPowerOf2_32(SrcTy.getScalarSizeInBits())) {
7352 LLT SplitSrcTy = SrcTy.changeElementCount(
7353 SrcTy.getElementCount().divideCoefficientBy(2));
7361 if (DstTy.getScalarSizeInBits() * 2 < SrcTy.getScalarSizeInBits())
7374 if (DstTy.getScalarSizeInBits() * 2 < SrcTy.getScalarSizeInBits())
7388 auto [Dst, DstTy, Src, SrcTy, Amt, AmtTy] = MI.getFirst3RegLLTs();
7399 auto [Dst, DstTy, Src, SrcTy, Amt, AmtTy] = MI.getFirst3RegLLTs();
7408 if (LI.isLegalOrCustom({RevRot, {DstTy, SrcTy}}) &&
7596 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
7598 if (SrcTy == LLT::scalar(1)) {
7606 if (SrcTy != LLT::scalar(64))
7623 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
7629 if (SrcTy == S1) {
7637 if (SrcTy != S64)
7666 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
7670 if (SrcTy != S64 && SrcTy != S32)
7680 APFloat TwoPExpFP(SrcTy.getSizeInBits() == 32 ? APFloat::IEEEsingle()
7682 APInt::getZero(SrcTy.getSizeInBits()));
7687 MachineInstrBuilder Threshold = MIRBuilder.buildFConstant(SrcTy, TwoPExpFP);
7690 MachineInstrBuilder FSub = MIRBuilder.buildFSub(SrcTy, Src, Threshold);
7706 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
7711 if (SrcTy.getScalarType() != S32 || DstTy.getScalarType() != S64)
7718 unsigned SrcEltBits = SrcTy.getScalarSizeInBits();
7720 auto ExponentMask = MIRBuilder.buildConstant(SrcTy, 0x7F800000);
7721 auto ExponentLoBit = MIRBuilder.buildConstant(SrcTy, 23);
7723 auto AndExpMask = MIRBuilder.buildAnd(SrcTy, Src, ExponentMask);
7724 auto ExponentBits = MIRBuilder.buildLShr(SrcTy, AndExpMask, ExponentLoBit);
7726 auto SignMask = MIRBuilder.buildConstant(SrcTy,
7728 auto AndSignMask = MIRBuilder.buildAnd(SrcTy, Src, SignMask);
7729 auto SignLowBit = MIRBuilder.buildConstant(SrcTy, SrcEltBits - 1);
7730 auto Sign = MIRBuilder.buildAShr(SrcTy, AndSignMask, SignLowBit);
7733 auto MantissaMask = MIRBuilder.buildConstant(SrcTy, 0x007FFFFF);
7734 auto AndMantissaMask = MIRBuilder.buildAnd(SrcTy, Src, MantissaMask);
7735 auto K = MIRBuilder.buildConstant(SrcTy, 0x00800000);
7737 auto R = MIRBuilder.buildOr(SrcTy, AndMantissaMask, K);
7740 auto Bias = MIRBuilder.buildConstant(SrcTy, 127);
7741 auto Exponent = MIRBuilder.buildSub(SrcTy, ExponentBits, Bias);
7742 auto SubExponent = MIRBuilder.buildSub(SrcTy, Exponent, ExponentLoBit);
7743 auto ExponentSub = MIRBuilder.buildSub(SrcTy, ExponentLoBit, Exponent);
7757 auto ZeroSrcTy = MIRBuilder.buildConstant(SrcTy, 0);
7771 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
7787 const fltSemantics &Semantics = getFltSemanticForLLT(SrcTy.getScalarType());
7803 auto MaxC = MIRBuilder.buildFConstant(SrcTy, MinFloat);
7805 SrcTy.changeElementSize(1), Src, MaxC);
7806 auto Max = MIRBuilder.buildSelect(SrcTy, MaxP, Src, MaxC);
7808 auto MinC = MIRBuilder.buildFConstant(SrcTy, MaxFloat);
7810 MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, SrcTy.changeElementSize(1), Max,
7813 MIRBuilder.buildSelect(SrcTy, MinP, Max, MinC, MachineInstr::FmNoNans);
7841 MIRBuilder.buildFCmp(CmpInst::FCMP_ULT, SrcTy.changeElementSize(1), Src,
7842 MIRBuilder.buildFConstant(SrcTy, MinFloat));
7847 MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, SrcTy.changeElementSize(1), Src,
7848 MIRBuilder.buildFConstant(SrcTy, MaxFloat));
7987 auto [DstTy, SrcTy] = MI.getFirst2LLTs();
7991 if (DstTy.getScalarType() == S16 && SrcTy.getScalarType() == S64)
8041 LLT SrcTy = MRI.getType(Cmp->getReg(1));
8059 if (TLI.shouldExpandCmpUsingSelects(getApproximateEVTForLLT(SrcTy, Ctx)) ||
8571 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
8575 if (SrcTy.isVector()) {
8576 unsigned SrcEltSize = SrcTy.getElementType().getSizeInBits();
8580 (Offset + DstSize <= SrcTy.getSizeInBits())) {
8582 auto Unmerge = MIRBuilder.buildUnmerge(SrcTy.getElementType(), SrcReg);
8601 (SrcTy.isScalar() ||
8602 (SrcTy.isVector() && DstTy == SrcTy.getElementType()))) {
8603 LLT SrcIntTy = SrcTy;
8604 if (!SrcTy.isScalar()) {
8605 SrcIntTy = LLT::scalar(SrcTy.getSizeInBits());
9073 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
9090 unsigned BitSize = SrcTy.getScalarSizeInBits();
9091 const fltSemantics &Semantics = getFltSemanticForLLT(SrcTy.getScalarType());
9094 if (SrcTy.isVector())
9095 IntTy = LLT::vector(SrcTy.getElementCount(), IntTy);
9383 LLT SrcTy = MRI.getType(SrcReg);
9387 if (SrcTy.isScalar()) {
9388 if (DstTy.getSizeInBits() > SrcTy.getSizeInBits())
9793 LLT SrcTy = MRI.getType(Src);
9794 Offset = MIB.buildConstant(LLT::scalar(SrcTy.getSizeInBits()), CurrOffset)
9796 LoadPtr = MIB.buildPtrAdd(SrcTy, Src, Offset).getReg(0);
9891 LLT SrcTy = MRI.getType(Src);
9893 MIB.buildConstant(LLT::scalar(SrcTy.getSizeInBits()), CurrOffset);
9894 LoadPtr = MIB.buildPtrAdd(SrcTy, Src, Offset).getReg(0);