| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AArch64/MCTargetDesc/ |
| H A D | AArch64MCCodeEmitter.cpp | 267 unsigned ShiftVal = AArch64_AM::getShiftValue(MO1.getImm()); in getAddSubImmOpValue() local 268 assert((ShiftVal == 0 || ShiftVal == 12) && in getAddSubImmOpValue() 271 return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << ShiftVal)); in getAddSubImmOpValue() 288 ShiftVal = 12; in getAddSubImmOpValue() 290 return ShiftVal == 0 ? 0 : (1 << ShiftVal); in getAddSubImmOpValue() 528 unsigned ShiftVal = AArch64_AM::getShiftValue(ShiftOpnd); in getImm8OptLsl() local 529 assert((ShiftVal == 0 || ShiftVal == 8) && in getImm8OptLsl() 534 return (Immediate & 0xff) | (ShiftVal == 0 ? 0 : (1 << ShiftVal)); in getImm8OptLsl() 555 unsigned ShiftVal = AArch64_AM::getShiftValue(MO.getImm()); in getMoveVecShifterOpValue() local 556 assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!"); in getMoveVecShifterOpValue() [all …]
|
| H A D | AArch64InstPrinter.cpp | 1009 unsigned ShiftVal = AArch64_AM::getArithShiftValue(Val); in printArithExtend() local 1021 if (ShiftVal != 0) in printArithExtend() 1022 O << ", lsl #" << ShiftVal; in printArithExtend() 1027 if (ShiftVal != 0) in printArithExtend() 1028 O << " #" << ShiftVal; in printArithExtend()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/RISCV/MCTargetDesc/ |
| H A D | RISCVMatInt.cpp | 121 for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) { in getIntMatCost() local 122 APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize); in getIntMatCost()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/include/llvm/CodeGen/GlobalISel/ |
| H A D | CombinerHelper.h | 257 bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal); 258 bool applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal); 269 unsigned &ShiftVal); 270 bool applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal);
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/CodeGen/GlobalISel/ |
| H A D | CombinerHelper.cpp | 1820 auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) { in matchShiftOfShiftedLogic() argument 1832 ShiftVal = MaybeImmVal->Value.getSExtValue(); in matchShiftOfShiftedLogic() 1898 unsigned &ShiftVal) { in matchCombineMulToShl() argument 1905 ShiftVal = MaybeImmVal->Value.exactLogBase2(); in matchCombineMulToShl() 1906 return (static_cast<int32_t>(ShiftVal) != -1); in matchCombineMulToShl() 1910 unsigned &ShiftVal) { in applyCombineMulToShl() argument 1914 auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal); in applyCombineMulToShl() 2172 unsigned &ShiftVal) { in matchCombineShiftToUnmerge() argument 2191 ShiftVal = MaybeImmVal->Value.getSExtValue(); in matchCombineShiftToUnmerge() 2192 return ShiftVal >= Size / 2 && ShiftVal < Size; in matchCombineShiftToUnmerge() [all …]
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AArch64/ |
| H A D | AArch64FastISel.cpp | 1234 uint64_t ShiftVal = cast<ConstantInt>(MulRHS)->getValue().logBase2(); in emitAddSub() local 1239 ShiftVal, SetFlags, WantResult); in emitAddSub() 1256 uint64_t ShiftVal = C->getZExtValue(); in emitAddSub() local 1262 ShiftVal, SetFlags, WantResult); in emitAddSub() 1603 uint64_t ShiftVal = cast<ConstantInt>(MulRHS)->getValue().logBase2(); in emitLogicalOp() local 1608 ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, RHSReg, ShiftVal); in emitLogicalOp() 1618 uint64_t ShiftVal = C->getZExtValue(); in emitLogicalOp() local 1622 ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, RHSReg, ShiftVal); in emitLogicalOp() 4577 uint64_t ShiftVal = C->getValue().logBase2(); in selectMul() local 4605 emitLSL_ri(VT, SrcVT, Src0Reg, ShiftVal, IsZExt); in selectMul() [all …]
|
| H A D | AArch64TargetTransformInfo.cpp | 79 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { in getIntImmCost() local 80 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); in getIntImmCost()
|
| H A D | AArch64ISelDAGToDAG.cpp | 481 unsigned ShiftVal = CSD->getZExtValue(); in isWorthFoldingSHL() local 482 if (ShiftVal > 3) in isWorthFoldingSHL() 767 unsigned ShiftVal = 0; in SelectArithExtendedRegister() local 774 ShiftVal = CSD->getZExtValue(); in SelectArithExtendedRegister() 775 if (ShiftVal > 4) in SelectArithExtendedRegister() 803 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N), in SelectArithExtendedRegister() 1017 unsigned ShiftVal = CSD->getZExtValue(); in SelectExtendedSHL() local 1019 if (ShiftVal != 0 && ShiftVal != LegalShiftVal) in SelectExtendedSHL()
|
| H A D | AArch64InstrInfo.cpp | 895 unsigned ShiftVal = AArch64_AM::getShiftValue(Imm); in isFalkorShiftExtFast() local 896 if (ShiftVal == 0) in isFalkorShiftExtFast() 898 return AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL && ShiftVal <= 5; in isFalkorShiftExtFast() 922 unsigned ShiftVal = AArch64_AM::getShiftValue(Imm); in isFalkorShiftExtFast() local 923 return ShiftVal == 0 || in isFalkorShiftExtFast() 924 (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 31); in isFalkorShiftExtFast() 930 unsigned ShiftVal = AArch64_AM::getShiftValue(Imm); in isFalkorShiftExtFast() local 931 return ShiftVal == 0 || in isFalkorShiftExtFast() 932 (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 63); in isFalkorShiftExtFast()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/ARM/ |
| H A D | ARMParallelDSP.cpp | 792 Value *ShiftVal = ConstantInt::get(LoadTy, OffsetTy->getBitWidth()); in CreateWideLoad() local 793 Value *Top = IRB.CreateLShr(WideLoad, ShiftVal); in CreateWideLoad()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/WebAssembly/ |
| H A D | WebAssemblyISelLowering.cpp | 2010 auto ShiftVal = DAG.getSplatValue(Op.getOperand(1)); in LowerShift() local 2011 if (!ShiftVal) in LowerShift() 2015 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32); in LowerShift() 2032 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal); in LowerShift()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/SystemZ/ |
| H A D | SystemZISelLowering.cpp | 2323 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { in isSimpleShift() argument 2332 ShiftVal = Amount; in isSimpleShift() 2481 unsigned NewCCMask, ShiftVal; in adjustForTestUnderMask() local 2484 isSimpleShift(NewC.Op0, ShiftVal) && in adjustForTestUnderMask() 2485 (MaskVal >> ShiftVal != 0) && in adjustForTestUnderMask() 2486 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal && in adjustForTestUnderMask() 2488 MaskVal >> ShiftVal, in adjustForTestUnderMask() 2489 CmpVal >> ShiftVal, in adjustForTestUnderMask() 2492 MaskVal >>= ShiftVal; in adjustForTestUnderMask() 2495 isSimpleShift(NewC.Op0, ShiftVal) && in adjustForTestUnderMask() [all …]
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Transforms/InstCombine/ |
| H A D | InstCombineCasts.cpp | 488 ConstantInt *ShiftVal = nullptr; in foldVecTruncToExtElt() local 491 m_ConstantInt(ShiftVal)))) || in foldVecTruncToExtElt() 498 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0; in foldVecTruncToExtElt()
|
| H A D | InstCombineCompares.cpp | 2050 const APInt *ShiftVal; in foldICmpShlConstant() local 2051 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal))) in foldICmpShlConstant() 2052 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal); in foldICmpShlConstant() 2198 const APInt *ShiftVal; in foldICmpShrConstant() local 2199 if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal))) in foldICmpShrConstant() 2200 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftVal); in foldICmpShrConstant()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/X86/ |
| H A D | X86InstCombineIntrinsic.cpp | 389 APInt ShiftVal = COp->getValue(); in simplifyX86varShift() local 390 if (ShiftVal.uge(BitWidth)) { in simplifyX86varShift() 396 ShiftAmts.push_back((int)ShiftVal.getZExtValue()); in simplifyX86varShift()
|
| H A D | X86TargetTransformInfo.cpp | 4121 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { in getIntImmCost() local 4122 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); in getIntImmCost()
|
| H A D | X86ISelLowering.cpp | 7828 uint64_t ShiftVal = N.getConstantOperandVal(1); in getFauxShuffleMask() local 7830 if (NumBitsPerElt <= ShiftVal) { in getFauxShuffleMask() 7836 if ((ShiftVal % 8) != 0) in getFauxShuffleMask() 7839 uint64_t ByteShift = ShiftVal / 8; in getFauxShuffleMask() 8354 SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8); in getVShift() local 8355 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal)); in getVShift() 18815 int ShiftVal = (IdxVal % 4) * 8; in LowerEXTRACT_VECTOR_ELT() local 18816 if (ShiftVal != 0) in LowerEXTRACT_VECTOR_ELT() 18818 DAG.getConstant(ShiftVal, dl, MVT::i8)); in LowerEXTRACT_VECTOR_ELT() 18826 int ShiftVal = (IdxVal % 2) * 8; in LowerEXTRACT_VECTOR_ELT() local [all …]
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/ |
| H A D | AMDGPUISelDAGToDAG.cpp | 2170 uint32_t ShiftVal = Shift->getZExtValue(); in SelectS_BFE() local 2177 Srl.getOperand(0), ShiftVal, WidthVal)); in SelectS_BFE() 2192 uint32_t ShiftVal = Shift->getZExtValue(); in SelectS_BFE() local 2193 uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal; in SelectS_BFE() 2199 And.getOperand(0), ShiftVal, WidthVal)); in SelectS_BFE()
|
| H A D | AMDGPUISelLowering.cpp | 4058 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32); in PerformDAGCombine() local 4060 BitsFrom, ShiftVal); in PerformDAGCombine()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/IR/ |
| H A D | AutoUpgrade.cpp | 1159 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue(); in UpgradeX86ALIGNIntrinsics() local 1168 ShiftVal &= (NumElts - 1); in UpgradeX86ALIGNIntrinsics() 1172 if (ShiftVal >= 32) in UpgradeX86ALIGNIntrinsics() 1177 if (ShiftVal > 16) { in UpgradeX86ALIGNIntrinsics() 1178 ShiftVal -= 16; in UpgradeX86ALIGNIntrinsics() 1187 unsigned Idx = ShiftVal + i; in UpgradeX86ALIGNIntrinsics()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AArch64/GISel/ |
| H A D | AArch64InstructionSelector.cpp | 5740 unsigned ShiftVal = AArch64_AM::getShifterImm(ShType, Val); in selectShiftedRegister() local 5743 [=](MachineInstrBuilder &MIB) { MIB.addImm(ShiftVal); }}}; in selectShiftedRegister() 5831 uint64_t ShiftVal = 0; in selectArithExtendedRegister() local 5848 ShiftVal = *MaybeShiftVal; in selectArithExtendedRegister() 5849 if (ShiftVal > 4) in selectArithExtendedRegister() 5885 MIB.addImm(getArithExtendImm(Ext, ShiftVal)); in selectArithExtendedRegister()
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/Mips/ |
| H A D | MipsFastISel.cpp | 1990 uint64_t ShiftVal = C->getZExtValue(); in selectShift() local 2006 emitInst(Opcode, ResultReg).addReg(Op0Reg).addImm(ShiftVal); in selectShift()
|
| /netbsd-src/external/apache2/llvm/dist/clang/lib/CodeGen/ |
| H A D | CGBuiltin.cpp | 13325 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; in EmitX86BuiltinExpr() local 13333 if (ShiftVal >= 32) in EmitX86BuiltinExpr() 13338 if (ShiftVal > 16) { in EmitX86BuiltinExpr() 13339 ShiftVal -= 16; in EmitX86BuiltinExpr() 13348 unsigned Idx = ShiftVal + i; in EmitX86BuiltinExpr() 13367 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; in EmitX86BuiltinExpr() local 13370 ShiftVal &= (2 * NumElts) - 1; in EmitX86BuiltinExpr() 13374 Indices[i] = i + ShiftVal; in EmitX86BuiltinExpr() 13453 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; in EmitX86BuiltinExpr() local 13459 if (ShiftVal >= 16) in EmitX86BuiltinExpr() [all …]
|
| /netbsd-src/external/apache2/llvm/dist/llvm/lib/Analysis/ |
| H A D | ValueTracking.cpp | 2408 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); in isKnownNonZero() local 2410 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) in isKnownNonZero() 2413 if (Known.countMinTrailingZeros() >= ShiftVal) in isKnownNonZero()
|