Lines Matching +full:1 +full:v8

103       Cost += 1;
146 if (!isa<ConstantInt>(BO->getOperand(1)))
149 unsigned ShAmt = cast<ConstantInt>(BO->getOperand(1))->getZExtValue();
188 if (Idx == 1 || !Inst)
214 if (Inst && Idx == 1 && Imm.getBitWidth() <= ST->getXLen() &&
234 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2())
244 ImmArgIdx = 1;
319 llvm::bit_floor(std::clamp<unsigned>(RVVRegisterWidthLMUL, 1, 8));
382 // vwaddu.vv v10, v8, v9
383 // li a0, -1 (ignored)
388 if (Mask[0] == 0 || Mask[0] == 1) {
391 // vnsrl.wi v10, v8, 0
400 if (LT.second.isFixedLengthVector() && LT.first == 1 &&
415 if (LT.second.isFixedLengthVector() && LT.first == 1 &&
435 if (!Mask.empty() && LT.first.isValid() && LT.first != 1 &&
454 I == NumRegs - 1 ? Mask.size() % SubVF : SubVF),
458 return (SingleSubVector ? 0 : 1) * SubVF + I % VF;
501 // vslidedown.vi v8, v9, 2
507 // vslideup.vi v8, v9, 2
515 // vmerge.vvm v8, v9, v8, v0
521 (1 + getRISCVInstructionCost({RISCV::VMV_S_X, RISCV::VMERGE_VVM},
527 if (LT.second.getScalarSizeInBits() == 1) {
530 // andi a0, a0, 1
532 // vmv.v.x v8, a0
533 // vmsne.vi v0, v8, 0
535 (1 + getRISCVInstructionCost({RISCV::VMV_V_X, RISCV::VMSNE_VI},
540 // vmv.v.i v8, 0
541 // vmerge.vim v8, v8, 1, v0
542 // vmv.x.s a0, v8
543 // andi a0, a0, 1
544 // vmv.v.x v8, a0
545 // vmsne.vi v0, v8, 0
548 (1 + getRISCVInstructionCost({RISCV::VMV_V_I, RISCV::VMERGE_VIM,
556 // vmv.v.x v8, a0
562 // vrgather.vi v9, v8, 0
574 Opcodes[1] = RISCV::VSLIDEUP_VI;
587 // addi a0, a0, -1
591 // vrgather.vv v9, v8, v10
595 LenCost = isInt<5>(LT.second.getVectorNumElements() - 1) ? 0 : 1;
598 isInt<5>(LT.second.getVectorNumElements() - 1))
599 Opcodes[1] = RISCV::VRSUB_VI;
603 InstructionCost ExtendCost = Tp->getElementType()->isIntegerTy(1) ? 3 : 0;
778 {Intrinsic::lrint, MVT::i32, 1},
779 {Intrinsic::lrint, MVT::i64, 1},
780 {Intrinsic::llrint, MVT::i64, 1},
901 // vrsub.vi v10, v8, 0
902 // vmax.vv v8, v8, v10
913 // vid.v v8 // considered hoisted
914 // vsaddu.vx v8, v8, a0
915 // vmsltu.vx v0, v8, a1
926 // (LT.first - 1) vector adds.
929 (LT.first - 1) *
931 return 1 + (LT.first - 1);
942 // cmp + select instructions to convert -1 to EVL.
944 if (ICA.getArgs().size() > 1 &&
945 cast<ConstantInt>(ICA.getArgs()[1])->isZero())
1053 if (SrcEltSize == 1) {
1056 // vmv.v.i v8, 0
1057 // vmerge.vim v8, v8, -1, v0
1061 if ((PowDiff < 1) || (PowDiff > 3))
1066 (ISD == ISD::SIGN_EXTEND) ? SExtOp[PowDiff - 1] : ZExtOp[PowDiff - 1];
1070 if (Dst->getScalarSizeInBits() == 1) {
1074 // vand.vi v8, v8, 1
1075 // vmsne.vi v0, v8, 0
1096 (DstEltSize > SrcEltSize) ? DstEltSize >> 1 : DstEltSize << 1;
1105 if (Src->getScalarSizeInBits() == 1 || Dst->getScalarSizeInBits() == 1) {
1109 // vmv.v.i v8, 0
1110 // vmerge.vim v8, v8, -1, v0
1111 // vfcvt.f.x.v v8, v8
1114 // vfncvt.rtz.x.f.w v9, v8
1115 // vand.vi v8, v9, 1
1116 // vmsne.vi v0, v8, 0
1119 if (std::abs(PowDiff) <= 1)
1120 return 1;
1153 if (Ty->getElementType()->isIntegerTy(1)) {
1179 ExtraCost = 1 +
1198 ExtraCost = 1 +
1241 (LT.first > 1) ? (LT.first - 1) *
1268 if (ElementTy->isIntegerTy(1)) {
1272 // vmnot.m v8, v0
1273 // vcpop.m a0, v8
1276 return (LT.first - 1) +
1286 return (LT.first - 1) +
1326 (LT.first > 1) ? (LT.first - 1) *
1354 return (LT.first - 1) +
1372 return 1;
1427 if (ValTy->getScalarSizeInBits() == 1) {
1428 // vmandn.mm v8, v8, v9
1430 // vmor.mm v0, v9, v8
1441 if (ValTy->getScalarSizeInBits() == 1) {
1444 // vmandn.mm v8, v8, v9
1446 // vmor.mm v0, v9, v8
1458 // vmerge.vvm v8, v9, v8, v0
1530 !isa<ConstantData>(U->getOperand(1)) &&
1545 return Opcode == Instruction::PHI ? 0 : 1;
1567 if (Index != -1U)
1593 if (Val->getScalarSizeInBits() == 1) {
1619 unsigned BaseCost = 1;
1620 // When insertelement we should add the index with 1 as the input of vslideup.
1621 unsigned SlideCost = Opcode == Instruction::InsertElement ? 2 : 1;
1623 if (Index != -1U) {
1635 SlideCost = 1; // With a constant index, we do not need to use addi.
1642 // vsetivli zero, 1, e64, m1, ta, mu (not count)
1643 // vslidedown.vx v8, v8, a0
1644 // vmv.x.s a0, v8
1646 // vsrl.vx v8, v8, a1
1647 // vmv.x.s a1, v8
1654 // addi a0, a2, 1
1656 // vslideup.vx v8, v12, a2
1709 ConstantMatCost += getConstantMatCost(1, Op2Info);
1725 Op = (Ty->getScalarSizeInBits() == 1) ? RISCV::VMAND_MM : RISCV::VAND_VV;
1929 // vectorization by returning 1.
1930 return std::max<unsigned>(1U, RegWidth.getFixedValue() / ElemWidth);
1935 // RISC-V specific here are "instruction number 1st priority".