Lines Matching defs:NumElts
3062 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
3064 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
3074 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
3076 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
3560 unsigned NumElts = Mask.size();
3561 return isUndefInRange(Mask, 0, NumElts / 2);
3566 unsigned NumElts = Mask.size();
3567 return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
3615 unsigned NumElts = Mask.size();
3617 if (!isUndefOrEqual(M, I) && !isUndefOrEqual(M, I + NumElts))
3655 unsigned NumElts = Mask.size();
3656 APInt DemandedElts = APInt::getZero(NumElts);
3658 if (isInRange(M, 0, NumElts))
3793 unsigned NumElts = VT.getVectorNumElements();
3796 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
3801 for (unsigned i = 0; i < NumElts; ++i) {
3824 unsigned NumElts = VT.getVectorNumElements();
3827 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
3904 unsigned NumElts = LHS.getValueType().getVectorNumElements();
3906 RHS.getConstantOperandAPInt(1) == NumElts) ||
3908 LHS.getConstantOperandAPInt(1) == NumElts))
4033 unsigned NumElts = VT.getVectorNumElements();
4034 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
4508 unsigned NumElts = VT.getSizeInBits() / 32;
4509 SDValue Vec = DAG.getAllOnesConstant(dl, MVT::getVectorVT(MVT::i32, NumElts));
4551 int NumElts = VT.getVectorNumElements();
4553 for (int i = 0; i < NumElts; ++i) {
4556 Pos += (Unary ? 0 : NumElts * (i % 2));
4569 int NumElts = VT.getVectorNumElements();
4570 for (int i = 0; i < NumElts; ++i) {
4572 Pos += (Lo ? 0 : NumElts / 2);
4583 for (int I = 0, NumElts = Mask.size(); I != NumElts; ++I) {
4587 SDValue V = (M < NumElts) ? V1 : V2;
4590 Ops[I] = V.getOperand(M % NumElts);
4634 int NumElts = VT.getVectorNumElements();
4635 for (int I = 0; I != NumElts; I += 4) {
4638 PackMask.push_back(I + Offset + NumElts);
4639 PackMask.push_back(I + Offset + NumElts + 2);
4744 unsigned NumElts = SizeInBits / EltSizeInBits;
4759 if (NumSrcElts == NumElts) {
4777 UndefElts = APInt(NumElts, 0);
4778 EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
4780 for (unsigned i = 0; i != NumElts; ++i) {
4842 APInt UndefSrcElts = APInt::getAllOnes(NumElts);
4843 SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
5028 if (isAnyInRange(Mask, 0, NumElts) &&
5033 if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
5039 UndefElts = APInt::getZero(NumElts);
5040 for (int i = 0; i != (int)NumElts; ++i) {
5045 } else if (M < (int)NumElts) {
5050 if (UndefElts1[M - NumElts])
5052 EltBits.push_back(EltBits1[M - NumElts]);
5167 unsigned NumElts = VT.getVectorNumElements();
5170 unsigned Offset = Unary ? 0 : NumElts;
5189 int NumElts = DemandedElts.getBitWidth();
5190 int NumInnerElts = NumElts / 2;
5191 int NumEltsPerLane = NumElts / NumLanes;
5737 unsigned NumElts = Mask.size();
5738 assert(KnownUndef.getBitWidth() == NumElts &&
5739 KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
5741 for (unsigned i = 0; i != NumElts; ++i) {
5753 unsigned NumElts = Mask.size();
5754 KnownUndef = KnownZero = APInt::getZero(NumElts);
5756 for (unsigned i = 0; i != NumElts; ++i) {
5770 unsigned NumElts = CondVT.getVectorNumElements();
5779 Mask.resize(NumElts, SM_SentinelUndef);
5781 for (int i = 0; i != (int)NumElts; ++i) {
5788 Mask[i] += NumElts;
5814 unsigned NumElts = VT.getVectorNumElements();
5819 assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
5828 if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
5916 unsigned MaxElts = std::max(NumElts, NumSubSrcBCElts);
5917 assert((MaxElts % NumElts) == 0 && (MaxElts % NumSubSrcBCElts) == 0 &&
5919 InsertIdx *= (MaxElts / NumElts);
5921 NumSubElts *= (MaxElts / NumElts);
5935 if (Depth > 0 && InsertIdx == NumSubElts && NumElts == (2 * NumSubElts) &&
5944 Mask.push_back(i + NumElts);
5980 NumElts *= Scale;
5987 Mask.append(NumElts, SM_SentinelZero);
5989 for (int i = 0; i != (int)NumElts; ++i)
5995 M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
6013 N.getConstantOperandAPInt(2).uge(NumElts))
6020 for (unsigned i = 0; i != NumElts; ++i)
6087 assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
6088 N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
6135 if ((Offset0 && isInRange(M, 0, NumElts)) ||
6136 (Offset1 && isInRange(M, NumElts, 2 * NumElts)))
6164 Mask.append(NumElts - NumSrcElts, SM_SentinelZero);
6173 Mask.append(NumElts, SM_SentinelZero);
6207 for (int i = 0; i != (int)NumElts; ++i) {
6226 Mask.append(NumElts, 0);
6247 for (unsigned I = 0; I != NumElts; ++I)
6266 DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), NumBitsPerElt, NumElts,
6365 unsigned NumElts = Op.getValueType().getVectorNumElements();
6366 APInt DemandedElts = APInt::getAllOnes(NumElts);
6504 unsigned NumElts = VT.getVectorNumElements();
6512 for (unsigned i = 0; i < NumElts; ++i) {
7289 unsigned NumElts = VT.getVectorNumElements();
7299 assert((NumElts % Sequence.size()) == 0 && "Sequence doesn't fit.");
7328 MVT BcstVT = MVT::getVectorVT(EltType, NumElts / SeqLen);
7331 BcstVT = MVT::getVectorVT(EltType, Scale * (NumElts / SeqLen));
7342 if (!Ld || (NumElts - NumUndefElts) <= 1) {
7397 if (!Ld || NumElts - NumUndefElts != 1)
7475 if (!Ld->hasNUsesOfValue(NumElts - NumUndefElts, 0))
7769 unsigned NumElts = LastIdx - BaseIdx;
7774 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
7780 if (i * 2 == NumElts)
7807 if (i * 2 < NumElts) {
7819 if (i * 2 == NumElts)
7823 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
7878 unsigned NumElts = VT.getVectorNumElements();
7880 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
7882 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
7921 unsigned NumElts = VT.getVectorNumElements();
7932 for (unsigned i = 0, e = NumElts; i != e; ++i) {
8117 unsigned NumElts = VT.getVectorNumElements();
8120 unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
8213 unsigned NumElts = VT.getVectorNumElements();
8214 APInt DemandedElts = APInt::getAllOnes(NumElts);
8215 for (unsigned i = 0; i != NumElts; ++i)
8220 unsigned HalfNumElts = NumElts / 2;
8261 unsigned NumElts = VT.getVectorNumElements();
8262 unsigned Half = NumElts / 2;
8269 for (unsigned i = Half, e = NumElts; i != e; ++i)
8280 isHorizontalBinOpPart(BV, ISD::ADD, DL, DAG, Half, NumElts, InVec2,
8287 isHorizontalBinOpPart(BV, ISD::SUB, DL, DAG, Half, NumElts, InVec2,
8317 if (isHorizontalBinOpPart(BV, ISD::ADD, DL, DAG, 0, NumElts, InVec0,
8320 else if (isHorizontalBinOpPart(BV, ISD::SUB, DL, DAG, 0, NumElts, InVec0,
8323 else if (isHorizontalBinOpPart(BV, ISD::FADD, DL, DAG, 0, NumElts, InVec0,
8326 else if (isHorizontalBinOpPart(BV, ISD::FSUB, DL, DAG, 0, NumElts, InVec0,
8464 unsigned NumElts = VT.getVectorNumElements();
8468 assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
8470 if (IndicesVec.getValueType().getVectorNumElements() > NumElts) {
8474 NumElts * VT.getScalarSizeInBits());
8479 if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
8490 VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
8951 unsigned NumElts = VT.getVectorNumElements();
8952 for (unsigned i = 0; i != NumElts; ++i)
8953 ShuffleMask.push_back(i == InsertC ? NumElts : i);
9435 int NumElts = Mask.size();
9437 int NumLanes = NumElts / NumEltsPerLane;
9445 int Lane = (M % NumElts) / NumEltsPerLane;
9590 // TODO: Handle MaskSize != NumElts?
9594 int NumElts = VT.getVectorNumElements();
9595 if (MaskSize == NumElts) {
9597 int NumEltsPerLane = NumElts / NumLanes;
9907 unsigned NumElts = VT.getVectorNumElements();
9908 assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
9910 SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
9922 int NumElts = VT.getVectorNumElements();
9925 for (int i = 0; i != NumElts; i += 2) {
9960 isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
9964 for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
10065 unsigned NumElts = Mask.size();
10073 unsigned NumSrcElts = NumElts / Scale;
10076 unsigned UpperElts = NumElts - NumSrcElts;
10167 unsigned NumElts = VT.getVectorNumElements();
10172 unsigned NumSrcElts = NumElts / Scale;
10173 unsigned UpperElts = NumElts - NumSrcElts;
10218 unsigned NumElts = VT.getVectorNumElements();
10229 unsigned NumHalfSrcElts = NumElts / Scale;
10237 unsigned UpperElts = NumElts - NumSrcElts;
10265 MVT ConcatVT = MVT::getVectorVT(VT.getScalarType(), NumElts * 2);
10367 unsigned NumElts = VT.getVectorNumElements();
10413 MVT PackVT = MVT::getVectorVT(PackSVT, NumElts >> NumStages);
10574 int NumElts = Mask.size();
10576 int NumEltsPerLane = NumElts / NumLanes;
10577 assert((NumLanes * NumEltsPerLane) == NumElts && "Value type mismatch");
10596 if (M == Elt || (0 <= M && M < NumElts &&
10597 IsElementEquivalent(NumElts, V1, V1, M, Elt))) {
10602 if (M == (Elt + NumElts) ||
10603 (NumElts <= M &&
10604 IsElementEquivalent(NumElts, V2, V2, M - NumElts, Elt))) {
10606 Mask[Elt] = Elt + NumElts;
10620 Mask[Elt] = Elt + NumElts;
10663 unsigned NumElts = VT.getVectorNumElements();
10723 MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
10792 MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
10849 int NumElts = Mask.size();
10851 int NumLaneElts = NumElts / NumLanes;
10858 for (int Elt = 0; Elt != NumElts; ++Elt) {
10866 if (M < NumElts && (Op.isUndef() || Op == V1))
10868 else if (NumElts <= M && (Op.isUndef() || Op == V2)) {
10870 NormM -= NumElts;
10875 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
10895 SmallVector<int, 32> PermuteMask(NumElts, -1);
10896 for (int Elt = 0; Elt != NumElts; ++Elt) {
10901 if (NumElts <= M)
10902 NormM -= NumElts;
10903 bool IsFirstOp = M < NumElts;
11053 int NumElts = VT.getVectorNumElements();
11054 int NumEltsPerLane = NumElts / NumLanes;
11061 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11066 if (M < NumElts) {
11073 M -= NumElts;
11100 SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
11101 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11106 if (M < NumElts)
11119 return RotateAndPermute(V2, V1, Range2.first, NumElts);
11159 int NumElts = Mask.size();
11161 int NumEltsPerLane = NumElts / NumLanes;
11166 SmallVector<int, 32> V1Mask(NumElts, -1);
11167 SmallVector<int, 32> V2Mask(NumElts, -1);
11168 SmallVector<int, 32> FinalMask(NumElts, -1);
11169 for (int i = 0; i < NumElts; ++i) {
11171 if (M >= 0 && M < NumElts) {
11175 } else if (M >= NumElts) {
11176 V2Mask[i] = M - NumElts;
11177 FinalMask[i] = i + NumElts;
11253 V1Mask.assign(NumElts, -1);
11254 V2Mask.assign(NumElts, -1);
11255 FinalMask.assign(NumElts, -1);
11256 for (int i = 0; i != NumElts; i += NumEltsPerLane)
11259 if (M >= 0 && M < NumElts) {
11262 } else if (M >= NumElts) {
11263 V2Mask[i + (j / 2)] = M - NumElts;
11264 FinalMask[i + j] = i + (j / 2) + NumElts;
11287 unsigned NumElts = Mask.size();
11289 RotateVT = MVT::getVectorVT(RotateSVT, NumElts / NumSubElts);
11340 int NumElts = Mask.size();
11351 for (int i = 0; i < NumElts; ++i) {
11353 assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
11359 int StartIdx = i - (M % NumElts);
11367 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
11376 SDValue MaskV = M < NumElts ? V1 : V2;
11440 int NumElts = RepeatedMask.size();
11441 int Scale = 16 / NumElts;
11524 unsigned NumElts = Mask.size();
11527 assert((ZeroLo + ZeroHi) < NumElts && "Zeroable shuffle detected");
11532 SDValue Src = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11533 int Low = Mask[ZeroLo] < (int)NumElts ? 0 : NumElts;
11534 if (isSequentialOrUndefInRange(Mask, ZeroLo, NumElts - ZeroLo, Low))
11537 DAG.getTargetConstant(NumElts - ZeroLo, DL, MVT::i8));
11541 SDValue Src = Mask[0] < (int)NumElts ? V1 : V2;
11542 int Low = Mask[0] < (int)NumElts ? 0 : NumElts;
11543 if (isSequentialOrUndefInRange(Mask, 0, NumElts - ZeroHi, Low + ZeroHi))
11568 unsigned NumElts = Mask.size();
11569 unsigned Len = NumElts - (ZeroLo + ZeroHi);
11575 if (!isUndefOrInRange(StubMask, 0, NumElts) &&
11576 !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
11579 SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11588 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11594 unsigned Shift = Mask[ZeroLo] % NumElts;
11603 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11606 Shift += Mask[ZeroLo] % NumElts;
12222 unsigned NumElts = VT.getVectorNumElements();
12271 SmallVector<APInt> Bits(NumElts, APInt::getAllOnes(EltBits));
12323 if (VT.isFloatingPoint() || NumElts <= 4) {
12453 unsigned NumElts = VT.getVectorNumElements();
12457 if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
12459 else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
12465 if (NumElts == 4 &&
12470 NewMask.append(NumElts, -1);
14116 int NumElts = VT.getVectorNumElements();
14120 if (NumElts <= M)
14121 M += (Scale - 1) * NumElts;
14761 int NumElts = VT.getVectorNumElements();
14763 int NumEltsPerLane = NumElts / NumLanes;
14773 int NumEltsPerSublane = NumElts / NumSublanes;
14776 SmallVector<int, 16> InLaneMask(NumElts, SM_SentinelUndef);
14780 for (int i = 0; i != NumElts; ++i) {
15068 int NumElts = Mask.size();
15097 InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
15162 if (RepeatMask[i] < NumElts) {
15167 if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
15177 SmallVector<int, 16> NewMask(NumElts, -1);
15212 for (int i = 0; i != NumElts; ++i) {
15438 int NumElts = VT.getVectorNumElements();
15440 int NumLaneElts = NumElts / NumLanes;
15454 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15460 if (0 != ((M % NumElts) / NumLaneElts))
15469 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
15477 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
15478 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15524 int Lane = (M % NumElts) / NumLaneElts;
15528 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
15578 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
15592 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
15593 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
15638 int NumElts = VT.getVectorNumElements();
15640 (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
15642 assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
15646 for (int i = 0; i < NumElts; ++i)
15654 for (int i = 0; i < NumElts; ++i) {
15659 int Val = (i & 6) + NumElts * (i & 1);
15660 int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
15774 int NumElts = VT.getVectorNumElements();
15775 size_t FirstQtr = NumElts / 2;
15776 size_t ThirdQtr = NumElts + NumElts / 2;
15777 bool IsFirstHalf = IsInterleavingPattern(Mask, 0, NumElts);
15796 if (IsInterleavingPattern(SVN1->getMask(), 0, NumElts) &&
15801 IsInterleavingPattern(SVN2->getMask(), 0, NumElts)) {
16575 int NumElts = VT.getVectorNumElements();
16576 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16578 if (NumV2Elements == 1 && Mask[0] >= NumElts)
16661 unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
16662 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
17210 int NumElts = Mask.size();
17211 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
17213 if (NumV2Elements == 1 && Mask[0] >= NumElts)
17283 int NumElts = Mask.size();
17284 for (int i = 0; i != NumElts; ++i) {
17286 assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
17357 int NumElts = Mask.size();
17358 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
17363 for (int i = 0; i != NumElts; ++i) {
17368 Src = Mask[i] / NumElts;
17369 if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
17375 assert(SubvecElts != NumElts && "Identity shuffle?");
17382 if ((int)Zeroable.countl_one() >= (NumElts - SubvecElts)) {
17411 DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
17413 ShiftAmt += WideElts - NumElts;
17421 Offset += NumElts; // Increment for next iteration.
17815 unsigned NumElts = VT.getVectorNumElements();
17826 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
17841 MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
17880 MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
17956 unsigned NumElts = VecVT.getVectorNumElements();
17959 if (NumElts == 1) {
17964 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17965 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17989 unsigned NumElts = VT.getVectorNumElements();
17990 APInt DemandedElts = APInt::getZero(NumElts);
18009 DemandedElts |= APIntOps::ScaleBitMask(DemandedSrcElts, NumElts);
18188 unsigned NumElts = VecVT.getVectorNumElements();
18189 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
18190 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
18206 unsigned NumElts = VT.getVectorNumElements();
18236 MVT IdxVT = MVT::getVectorVT(IdxSVT, NumElts);
18245 for (unsigned I = 0; I != NumElts; ++I)
18254 if (N2C->getAPIntValue().uge(NumElts))
18269 SmallVector<SDValue, 8> CstVectorElts(NumElts, ZeroCst);
18279 for (unsigned i = 0; i != NumElts; ++i)
18280 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
18317 for (unsigned i = 0; i != NumElts; ++i)
18318 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
19040 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
19041 MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
19042 MVT VecVT = MVT::getVectorVT(VT, NumElts);
20232 unsigned NumElts = VT.getVectorNumElements();
20246 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
20249 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
20255 NumElts *= 512 / ExtVT.getSizeInBits();
20256 InVT = MVT::getVectorVT(MVT::i1, NumElts);
20260 NumElts);
20270 WideVT = MVT::getVectorVT(MVT::i8, NumElts);
20645 unsigned NumElts = InVT.getVectorNumElements();
20646 assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
20656 if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
20678 MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
20679 MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
22260 unsigned NumElts = VT.getVectorNumElements();
22261 APInt EltCount = APInt::getZero(NumElts);
23165 unsigned NumElts = VT.getVectorNumElements();
23168 for (unsigned i = 0; i < NumElts; ++i) {
24360 unsigned NumElts = VT.getVectorNumElements();
24366 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
24369 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
24375 NumElts *= 512 / ExtVT.getSizeInBits();
24376 InVT = MVT::getVectorVT(MVT::i1, NumElts);
24379 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
24395 WideVT = MVT::getVectorVT(VTElt, NumElts);
24446 unsigned NumElts = VT.getVectorNumElements();
24453 int InSize = InSVT.getSizeInBits() * NumElts;
24464 if (InVT.getVectorNumElements() != NumElts)
24499 APInt DemandedElts = APInt::getLowBitsSet(InNumElts, NumElts);
24501 unsigned Scale = InNumElts / NumElts;
24503 for (unsigned I = 0; I != NumElts; ++I)
24677 unsigned NumElts = StoredVal.getValueType().getVectorNumElements();
24678 assert(NumElts <= 8 && "Unexpected VT");
24690 if (NumElts < 8)
24692 StoredVal, dl, EVT::getIntegerVT(*DAG.getContext(), NumElts));
27954 int NumElts = VT.getVectorNumElements();
27955 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
28474 unsigned NumElts = VT.getVectorNumElements();
28476 unsigned NumEltsPerLane = NumElts / NumLanes;
28488 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
28529 for (unsigned i = 0; i != NumElts; i += 16) {
28633 unsigned NumElts = VT.getVectorNumElements();
28647 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
28663 for (unsigned i = 0; i != NumElts; i += 16) {
28712 unsigned NumElts = VT.getVectorNumElements();
28744 DAG.getVectorShuffle(VT, dl, A, A, ArrayRef(&Mask[0], NumElts));
28747 DAG.getVectorShuffle(VT, dl, B, B, ArrayRef(&Mask[0], NumElts));
28751 MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
28766 SmallVector<int, 16> ShufMask(NumElts);
28767 for (int i = 0; i != (int)NumElts; ++i)
28768 ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
28801 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
28861 unsigned NumElts = VT.getVectorNumElements();
28862 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
29276 unsigned NumElts = VT.getVectorNumElements();
29277 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29364 unsigned NumElts = VT.getVectorNumElements();
29365 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29380 SmallVector<int, 64>(NumElts, 0));
29538 unsigned NumElts = VT.getVectorNumElements();
29540 for (unsigned i = 0; i != NumElts; ++i) {
29552 ShuffleMask.push_back(i + NumElts);
29560 if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
29704 int NumElts = VT.getVectorNumElements();
29709 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
29725 for (int i = 0; i != NumElts; i += 16) {
29732 MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
29964 unsigned NumElts = VT.getVectorNumElements();
29994 MVT WideVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
30035 MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
30068 MVT WideVT = MVT::getVectorVT(WideSVT, NumElts);
30160 int NumElts = VT.getVectorNumElements();
30264 MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
30314 MVT::getVectorVT(Subtarget.hasBWI() ? MVT::i16 : MVT::i32, NumElts);
31277 int NumElts = VT.getVectorNumElements();
31297 for (int i = 0; i < NumElts; ++i)
31465 int NumElts = VT.getVectorNumElements();
31480 for (int i = 0; i != NumElts; ++i) {
31542 unsigned NumElts = VT.getVectorNumElements();
31570 for (unsigned i = 0; i < NumElts; ++i) {
32003 unsigned NumElts = VT.getVectorNumElements() * Factor;
32005 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
32006 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
32007 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
32156 unsigned NumElts = VT.getVectorNumElements() * Factor;
32158 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
32159 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
32160 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
32875 unsigned NumElts = InVT.getVectorNumElements();
32876 unsigned HalfNumElts = NumElts / 2;
32877 SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
33084 unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
33087 std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
33088 MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
33091 if (NumElts != SrcElts) {
37203 unsigned NumElts = DemandedElts.getBitWidth();
37478 if (NumElts > NumSrcElts && DemandedElts.countr_zero() >= NumSrcElts)
37493 if (NumElts > NumSrcElts && DemandedElts.countr_zero() >= NumSrcElts)
37500 if (DemandedElts.countr_zero() >= (NumElts / 2))
37512 for (unsigned I = 0; I != NumElts; ++I) {
37587 unsigned NumElts = VT.getVectorNumElements();
37588 if (Mask.size() == NumElts) {
37589 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
37591 for (unsigned i = 0; i != NumElts; ++i) {
37605 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
37608 unsigned OpIdx = (unsigned)M / NumElts;
37609 unsigned EltIdx = (unsigned)M % NumElts;
37756 unsigned NumElts = VT.getVectorNumElements();
37757 if (Mask.size() == NumElts) {
37758 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
37759 for (unsigned i = 0; i != NumElts; ++i) {
37771 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
37774 unsigned OpIdx = (unsigned)M / NumElts;
37775 unsigned EltIdx = (unsigned)M % NumElts;
38276 unsigned NumElts = V.getValueType().getVectorNumElements();
38277 KnownBits Known(NumElts);
38278 for (unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
38279 APInt Mask = APInt::getOneBitSet(NumElts, EltIdx);
38577 unsigned NumElts = VT1.getVectorNumElements();
38578 if (Mask.size() <= NumElts &&
38579 scaleShuffleElements(Mask, NumElts, ScaledMask)) {
38580 for (unsigned i = 0; i != NumElts; ++i)
39400 int NumElts = VT0.getVectorNumElements();
39402 int NumEltsPerLane = NumElts / NumLanes;
39502 if (M < NumElts) // BC0 element or UNDEF/Zero sentinel.
39505 M -= NumElts + (SubLane * NumHalfEltsPerLane);
39513 for (int i = 0; i != NumElts; ++i) {
39517 if (M < NumElts && BC0.getOperand(0) == BC0.getOperand(1) &&
39520 if (NumElts <= M && BC1.getOperand(0) == BC1.getOperand(1) &&
39772 unsigned NumElts = VT.getVectorNumElements();
39774 OpMask.assign(NumElts, SM_SentinelUndef);
39776 OpZero = OpUndef = APInt::getZero(NumElts);
40384 unsigned NumElts = VT.getVectorNumElements();
40395 !scaleShuffleElements(Mask0, NumElts, ScaledMask0) ||
40396 !scaleShuffleElements(Mask1, NumElts, ScaledMask1))
40402 if (!getShuffleDemandedElts(NumElts, BlendMask, DemandedElts, Demanded0,
40405 !getShuffleDemandedElts(NumElts, ScaledMask0, Demanded0, DemandedLHS0,
40407 !getShuffleDemandedElts(NumElts, ScaledMask1, Demanded1, DemandedLHS1,
40419 SmallVector<int, 32> NewBlendMask(NumElts, SM_SentinelUndef);
40420 SmallVector<int, 32> NewPermuteMask(NumElts, SM_SentinelUndef);
40421 for (unsigned I = 0; I != NumElts; ++I) {
40433 assert(isUndefOrEqual(NewBlendMask[M], M + NumElts) &&
40435 NewBlendMask[M] = M + NumElts;
40441 assert(isUndefOrInRange(NewPermuteMask, 0, NumElts) && "Bad permute");
40997 unsigned NumElts = VT.getVectorNumElements();
40999 SmallVector<Constant *, 32> ConstantVec(NumElts, Zero);
41201 unsigned NumElts = Src.getValueType().getVectorNumElements();
41204 Src.getConstantOperandAPInt(2) == (NumElts / 2)) {
41708 int NumElts = VT.getVectorNumElements();
41712 Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
41819 unsigned NumElts = DemandedElts.getBitWidth();
41852 if (NumCstElts != NumElts && NumCstElts != (NumElts * 2))
41854 unsigned Scale = NumCstElts / NumElts;
41885 int NumElts = DemandedElts.getBitWidth();
41913 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, 2 * NumElts);
42049 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
42059 if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
42088 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
42098 if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
42132 int NumElts = VT.getVectorNumElements();
42140 for (int I = 0; I != NumElts; ++I) {
42280 KnownZero = SrcZero.zextOrTrunc(NumElts);
42281 KnownUndef = SrcUndef.zextOrTrunc(NumElts);
42286 DecodeBLENDMask(NumElts, Op.getConstantOperandVal(2), BlendMask);
42385 DemandedElts.lshr(NumElts / 2) == 0) {
42390 if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
42472 DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
42490 unsigned EltIdx = (LoMask & 0x1) * (NumElts / 2);
42571 if (OpMask.size() != (unsigned)NumElts ||
42583 for (int i = 0; i != NumElts; ++i)
42587 if (isUndefInRange(OpMask, 0, NumElts)) {
42591 if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
42597 if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
42606 int Lo = Src * NumElts;
42607 APInt SrcElts = APInt::getZero(NumElts);
42608 for (int i = 0; i != NumElts; ++i)
42611 if (0 <= M && M < NumElts)
42632 SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
42633 for (int i = 0; i != NumElts; ++i)
43008 unsigned NumElts = SrcVT.getVectorNumElements();
43011 if (OriginalDemandedBits.countr_zero() >= NumElts)
43016 OriginalDemandedBits.getActiveBits() <= (NumElts / 2)) {
43023 APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
43029 Known.Zero.setHighBits(BitWidth - NumElts);
43039 Known.One.setLowBits(NumElts);
43041 Known.Zero.setLowBits(NumElts);
43166 int NumElts = DemandedElts.getBitWidth();
43245 if (ShuffleMask.size() == (unsigned)NumElts &&
43257 for (int i = 0; i != NumElts; ++i) {
43261 int OpIdx = M / NumElts;
43262 int EltIdx = M % NumElts;
43286 unsigned NumElts = DemandedElts.getBitWidth();
43295 APInt::getZero(NumElts));
43301 assert(0 <= M.value() && M.value() < (int)(Ops.size() * NumElts) &&
43303 DemandedSrcElts[M.value() / NumElts].setBit(M.value() % NumElts);
43347 unsigned NumElts = DemandedElts.getBitWidth();
43353 UndefElts = APInt::getZero(NumElts);
43695 unsigned NumElts = BV->getNumOperands();
43729 if (NumElts == 8)
43737 unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
43744 Ops.append(NumElts, Splat);
43746 for (unsigned i = 0; i != NumElts; ++i)
44062 unsigned NumElts = VT.getVectorNumElements();
44076 if (MovMskElts <= NumElts &&
44089 unsigned NumConcats = NumElts / MovMskElts;
44381 unsigned NumElts = MatchVT.getVectorNumElements();
44388 if (NumElts > 64 || !isPowerOf2_32(NumElts))
44408 EVT MovmskVT = EVT::getIntegerVT(Ctx, NumElts);
44412 while (NumElts > MaxElts) {
44416 NumElts /= 2;
44418 EVT MovmskVT = EVT::getIntegerVT(Ctx, NumElts);
44423 Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
44459 NumElts = MaskSrcVT.getVectorNumElements();
44461 assert((NumElts <= 32 || NumElts == 64) &&
44464 MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
44478 // all_of -> MOVMSK == ((1 << NumElts) - 1)
44479 CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
45012 unsigned NumElts = VecVT.getVectorNumElements();
45035 if (VT != MVT::i8 || NumElts < 4 || !isPowerOf2_32(NumElts))
45038 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts / 2);
45053 if (NumElts >= 8)
45077 if ((VecVT.getSizeInBits() % 128) != 0 || !isPowerOf2_32(NumElts))
45104 if (Opc == ISD::ADD && NumElts >= 4 && EltSizeInBits >= 16 &&
45137 if (NumElts > 8) {
45159 unsigned NumElts = VecVT.getVectorNumElements();
45160 SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
45389 unsigned NumElts = VT.getVectorNumElements();
45390 assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
45393 if (NumElts > EltSizeInBits) {
45398 assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
45399 unsigned Scale = NumElts / EltSizeInBits;
45407 } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits &&
45413 assert((EltSizeInBits % NumElts) == 0 && "Unexpected integer scale");
45414 unsigned Scale = EltSizeInBits / NumElts;
45416 EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts * Scale);
45418 ShuffleMask.append(NumElts * Scale, 0);
45427 ShuffleMask.append(NumElts, 0);
45433 for (unsigned i = 0; i != NumElts; ++i) {
45442 EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
45910 int NumElts = VT.getVectorNumElements();
45911 for (int i = 0; i != NumElts; ++i) {
45914 if (CondMask[i] < NumElts) {
47014 unsigned NumElts = VecVT.getVectorNumElements();
47019 NumElts <= CmpBits && CmpVal.isMask(NumElts);
47037 if (Vec.getOpcode() == ISD::BITCAST && NumElts <= CmpBits) {
47057 if (VecVT.is256BitVector() && NumElts <= CmpBits && IsOneUse) {
47063 APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : NumElts / 2);
47082 if (BC.getValueType().getVectorNumElements() <= NumElts) {
47138 Result.getValueType().getVectorNumElements() <= NumElts) {
47176 if (NumElts <= CmpBits &&
47181 canScaleShuffleElements(ShuffleMask, NumElts)) {
47195 if (NumElts <= CmpBits && Subtarget.hasAVX() &&
47200 MVT FloatVT = MVT::getVectorVT(FloatSVT, NumElts);
47575 unsigned NumElts = VT.getVectorNumElements();
47576 if ((NumElts % 2) != 0)
47579 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
47593 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts / 2);
47603 SmallVector<int, 16> ShuffleMask(NumElts);
47604 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
47606 ShuffleMask[2 * i + 1] = i + NumElts;
47612 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
47613 ShuffleMask[2 * i] = i + NumElts / 2;
47614 ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
47728 unsigned NumElts = VT.getVectorNumElements();
47729 if (NumElts == 1 || !isPowerOf2_32(NumElts))
47733 if (32 <= (2 * NumElts) && Subtarget.hasAVX512() && !Subtarget.hasBWI())
49249 unsigned NumElts = 128 / N00Type.getSizeInBits();
49250 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), N00Type, NumElts);
49251 EVT BoolVecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
49887 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
49888 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
49893 assert(SrcPartials[0].getBitWidth() == NumElts &&
50028 int NumElts = VT.getVectorNumElements();
50031 APInt DemandedElts = APInt::getAllOnes(NumElts);
50036 for (int I = 0; I != NumElts; ++I) {
50087 unsigned NumElts = SrcVecVT.getVectorNumElements();
50092 SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
50706 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
50707 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
50712 assert(SrcPartials[0].getBitWidth() == NumElts &&
50779 unsigned NumElts = VT.getVectorNumElements();
50780 unsigned HalfElts = NumElts / 2;
50781 APInt UpperElts = APInt::getHighBitsSet(NumElts, HalfElts);
50782 if (NumElts >= 16 && N1.getOpcode() == X86ISD::KSHIFTL &&
50790 if (NumElts >= 16 && N0.getOpcode() == X86ISD::KSHIFTL &&
50811 int NumElts = VT.getVectorNumElements();
50816 APInt DemandedElts = APInt::getZero(NumElts);
50817 for (int I = 0; I != NumElts; ++I)
51262 unsigned NumElts = RegVT.getVectorNumElements();
51263 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
51333 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
51334 for (unsigned i = 0; i < NumElts; ++i) {
51438 unsigned NumElts = VT.getVectorNumElements();
51441 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
51930 unsigned NumElts = VT.getVectorNumElements();
51950 scaleShuffleElements(SrcMask, NumElts, ScaledMask)) {
51956 scaleShuffleElements(SrcMask, 2 * NumElts, ScaledMask)) {
51958 ArrayRef<int> Mask = ArrayRef<int>(ScaledMask).slice(0, NumElts);
51986 for (unsigned i = 0; i != NumElts; ++i)
51992 for (unsigned i = 0; i != NumElts; ++i)
51997 if (isUndefOrInRange(LMask, 0, NumElts))
51999 else if (isUndefOrInRange(LMask, NumElts, NumElts * 2))
52002 if (isUndefOrInRange(RMask, 0, NumElts))
52004 else if (isUndefOrInRange(RMask, NumElts, NumElts * 2))
52018 PostShuffleMask.append(NumElts, SM_SentinelUndef);
52027 unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
52031 for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
52036 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
52037 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
52050 ((Base % NumElts) & ~(NumEltsPer128BitChunk - 1));
52055 if ((B && Base >= (int)NumElts) || (!B && i >= NumEltsPer64BitChunk))
52065 isSequentialOrUndefInRange(PostShuffleMask, 0, NumElts, 0);
53145 unsigned NumElts = SrcVT.getVectorNumElements();
53146 SmallVector<int, 32> ReverseMask(NumElts);
53147 for (unsigned I = 0; I != NumElts; ++I)
53148 ReverseMask[I] = (NumElts - 1) - I;
53467 int NumElts = VT.getVectorNumElements();
53509 for (int I = 0; I != NumElts; ++I)
53540 APInt DemandedElts = APInt::getAllOnes(NumElts);
53545 for (int I = 0; I != NumElts; ++I) {
54500 unsigned NumElts = SrcVT.getVectorNumElements();
54502 assert(VT == MVT::i32 && NumElts <= NumBits && "Unexpected MOVMSK types");
54511 for (unsigned Idx = 0; Idx != NumElts; ++Idx)
54528 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
54540 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
54564 ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
54588 for (unsigned Idx = 0; Idx != NumElts; ++Idx) {
54892 unsigned NumElts = VecWidth / DestWidth;
54893 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
55675 unsigned NumElts = VT.getVectorNumElements();
55678 APInt DemandedHiElts = APInt::getSplat(2 * NumElts, APInt(2, 2));
55694 for (int i = 0; i != (int)NumElts; ++i) {
55696 Mask.push_back(2 * (i + NumElts));
56016 unsigned NumElts = VT.getVectorNumElements();
56033 SmallVector<APInt> Results(NumElts);
56034 for (unsigned I = 0; I != NumElts; ++I) {
57430 unsigned NumElts = VT.getVectorNumElements();
57433 SmallVector<SDValue> Elts(Scale * NumElts, DAG.getConstant(0, DL, EltVT));
57434 for (unsigned I = 0; I != NumElts; ++I)
57534 unsigned NumElts = VT.getVectorNumElements();
57535 if (NumElts == 1 || !isPowerOf2_32(NumElts))
57543 if (NumElts < 8) {
57544 unsigned NumConcats = 8 / NumElts;
57545 SDValue Fill = NumElts == 4 ? DAG.getUNDEF(IntVT)
57554 std::max(4U, NumElts));
57564 if (NumElts < 4) {
57565 assert(NumElts == 2 && "Unexpected size");
57640 unsigned NumElts = VT.getVectorNumElements();
57646 if (NumElts == 8 && Src.getOpcode() == ISD::CONCAT_VECTORS &&
57679 if (NumElts == 1 || !isPowerOf2_32(NumElts))
57683 if (NumElts < 4)
57689 EVT::getVectorVT(*DAG.getContext(), MVT::i16, std::max(8U, NumElts));
57700 if (NumElts < 8) {