/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/ |
H A D | MachineIRBuilder.h | 1158 /// Build and insert `Res = G_INSERT_SUBVECTOR Src0, Src1, Idx`. 1161 /// \pre \p Res, \p Src0, and \p Src1 must be generic virtual registers with 1165 MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, 1736 MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, 1739 return buildInstr(TargetOpcode::G_ADD, {Dst}, {Src0, Src1}, Flags); 1753 MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, 1756 return buildInstr(TargetOpcode::G_SUB, {Dst}, {Src0, Src1}, Flags); 1769 MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, 1772 return buildInstr(TargetOpcode::G_MUL, {Dst}, {Src0, Src1}, Flags); 1784 MachineInstrBuilder buildAbds(const DstOp &Dst, const SrcOp &Src0, in buildXor() argument 1762 buildAnd(const DstOp & Dst,const SrcOp & Src0,const SrcOp & Src1) buildAnd() argument 1792 buildNot(const DstOp & Dst,const SrcOp & Src0) buildNot() argument 1800 buildNeg(const DstOp & Dst,const SrcOp & Src0) buildNeg() argument 1806 buildCTPOP(const DstOp & Dst,const SrcOp & Src0) buildCTPOP() argument 1811 buildCTLZ(const DstOp & Dst,const SrcOp & Src0) buildCTLZ() argument 1816 buildCTLZ_ZERO_UNDEF(const DstOp & Dst,const SrcOp & Src0) buildCTLZ_ZERO_UNDEF() argument 1821 buildCTTZ(const DstOp & Dst,const SrcOp & Src0) buildCTTZ() argument 1826 buildCTTZ_ZERO_UNDEF(const DstOp & Dst,const SrcOp & Src0) buildCTTZ_ZERO_UNDEF() argument 1831 buildBSwap(const DstOp & Dst,const SrcOp & Src0) buildBSwap() argument 1950 buildFCopysign(const DstOp & Dst,const SrcOp & Src0,const SrcOp & Src1) buildFCopysign() argument 1956 buildUITOFP(const DstOp & Dst,const SrcOp & Src0) buildUITOFP() argument 1961 buildSITOFP(const DstOp & Dst,const SrcOp & Src0) buildSITOFP() argument 1966 buildFPTOUI(const DstOp & Dst,const SrcOp & Src0) buildFPTOUI() argument 1971 buildFPTOSI(const DstOp & Dst,const SrcOp & Src0) buildFPTOSI() argument 1984 buildSMin(const DstOp & Dst,const SrcOp & Src0,const SrcOp & Src1) buildSMin() argument 1990 buildSMax(const DstOp & Dst,const SrcOp & Src0,const SrcOp & Src1) buildSMax() argument 1996 buildUMin(const DstOp & Dst,const SrcOp & Src0,const SrcOp & Src1) buildUMin() argument 2002 buildUMax(const DstOp & Dst,const SrcOp & Src0,const SrcOp & Src1) buildUMax() argument [all...] |
H A D | GISelKnownBits.h | 38 void computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known, 42 unsigned computeNumSignBitsMin(Register Src0, Register Src1,
|
H A D | MIPatternMatch.h | 784 Src0Ty Src0; 788 TernaryOp_match(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) 789 : Src0(Src0), Src1(Src1), Src2(Src2) {} 795 return (Src0.match(MRI, TmpMI->getOperand(1).getReg()) && 806 m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) { 808 TargetOpcode::G_INSERT_VECTOR_ELT>(Src0, Src1, Src2); 813 m_GISelect(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) { 815 Src0, Src1, Src2);
|
/llvm-project/llvm/unittests/CodeGen/GlobalISel/ |
H A D | PatternMatchTest.cpp | 48 std::optional<ValueAndVReg> Src0; in TEST_F() local 49 bool match = mi_match(MIBCst.getReg(0), *MRI, m_GCst(Src0)); in TEST_F() 51 EXPECT_EQ(Src0->VReg, MIBCst.getReg(0)); in TEST_F() 122 Register Src0, Src1, Src2; in TEST_F() local 124 m_GAdd(m_Reg(Src0), m_Reg(Src1))); in TEST_F() 126 EXPECT_EQ(Src0, Copies[0]); in TEST_F() 134 m_GMul(m_Reg(Src0), m_Reg(Src1))); in TEST_F() 136 EXPECT_EQ(Src0, MIBAdd.getReg(0)); in TEST_F() 141 m_GMul(m_GAdd(m_Reg(Src0), m_Reg(Src1)), m_Reg(Src2))); in TEST_F() 143 EXPECT_EQ(Src0, Copie in TEST_F() 464 Register Src0; TEST_F() local 527 Register Src0; TEST_F() local 544 Register Src0, Src1; TEST_F() local [all...] |
/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | R600ExpandSpecialInstrs.cpp | 146 Register Src0 = in runOnMachineFunction() local 152 (void) Src0; in runOnMachineFunction() 154 if ((TRI.getEncodingValue(Src0) & 0xff) < 127 && in runOnMachineFunction() 156 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1)); in runOnMachineFunction() 198 Register Src0 = in runOnMachineFunction() local 211 Src0 = TRI.getSubReg(Src0, SubRegIndex); in runOnMachineFunction() 217 Src1 = TRI.getSubReg(Src0, SubRegIndex1); in runOnMachineFunction() 218 Src0 = TRI.getSubReg(Src0, SubRegIndex0); in runOnMachineFunction() 252 TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1); in runOnMachineFunction()
|
H A D | SIShrinkInstructions.cpp | 101 // Try to fold Src0 in foldImmediates() 102 MachineOperand &Src0 = MI.getOperand(Src0Idx); in foldImmediates() 103 if (Src0.isReg()) { in foldImmediates() 104 Register Reg = Src0.getReg(); in foldImmediates() 113 Src0.ChangeToImmediate(MovSrc.getImm()); in foldImmediates() 116 Src0.ChangeToFrameIndex(MovSrc.getIndex()); in foldImmediates() 119 Src0.ChangeToGA(MovSrc.getGlobal(), MovSrc.getOffset(), in foldImmediates() 253 const MachineOperand &Src0 = MI.getOperand(0); in shrinkScalarCompare() 254 if (!Src0.isReg()) in shrinkScalarCompare() 428 MachineOperand &Src0 in shrinkMadFma() 96 MachineOperand &Src0 = MI.getOperand(Src0Idx); foldImmediates() local 243 const MachineOperand &Src0 = MI.getOperand(0); shrinkScalarCompare() local 418 MachineOperand &Src0 = *TII->getNamedOperand(MI, AMDGPU::OpName::src0); shrinkMadFma() local 512 MachineOperand *Src0 = &MI.getOperand(1); shrinkScalarLogicOp() local 847 MachineOperand *Src0 = &MI.getOperand(1); runOnMachineFunction() local [all...] |
H A D | AMDGPUInstCombineIntrinsic.cpp | 45 static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1, in fmed3AMDGCN() argument 47 APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2); in fmed3AMDGCN() 49 APFloat::cmpResult Cmp0 = Max3.compare(Src0); in fmed3AMDGCN() 57 return maxnum(Src0, Src2); in fmed3AMDGCN() 59 return maxnum(Src0, Src1); in fmed3AMDGCN() 642 Value *Src0 = II.getArgOperand(0); in instCombineIntrinsic() 647 II.getModule(), Intrinsic::is_fpclass, Src0->getType())); in instCombineIntrinsic() 656 if (isa<PoisonValue>(Src0) || isa<PoisonValue>(Src1)) in instCombineIntrinsic() 664 if (IC.getSimplifyQuery().isUndefValue(Src0)) { in instCombineIntrinsic() 697 if (Value *Src0 in instCombineIntrinsic() 601 Value *Src0 = II.getArgOperand(0); instCombineIntrinsic() local 631 Value *Src0 = II.getArgOperand(0); instCombineIntrinsic() local 660 Value *Src0 = II.getArgOperand(0); instCombineIntrinsic() local 763 Value *Src0 = II.getArgOperand(0); instCombineIntrinsic() local 852 Value *Src0 = II.getArgOperand(0); instCombineIntrinsic() local [all...] |
H A D | GCNVOPDUtils.cpp | 83 const MachineOperand &Src0 = MI.getOperand(VOPD::Component::SRC0); in checkVOPDRegConstraints() local 84 if (Src0.isReg()) { in checkVOPDRegConstraints() 85 if (!TRI->isVectorRegister(MRI, Src0.getReg())) { in checkVOPDRegConstraints() 86 if (!is_contained(UniqueScalarRegs, Src0.getReg())) in checkVOPDRegConstraints() 87 UniqueScalarRegs.push_back(Src0.getReg()); in checkVOPDRegConstraints() 91 addLiteral(Src0); in checkVOPDRegConstraints()
|
H A D | SIOptimizeExecMasking.cpp | 555 MachineOperand &Src0 = SaveExecInst->getOperand(1); in optimizeExecSequence() 560 if (Src0.isReg() && Src0.getReg() == CopyFromExec) { in optimizeExecSequence() 566 OtherOp = &Src0; in optimizeExecSequence() 602 MachineOperand *Src0 = TII->getNamedOperand(VCmp, AMDGPU::OpName::src0); in optimizeVCMPSaveExecSequence() 628 Builder.add(*Src0); in optimizeVCMPSaveExecSequence() 636 if (Src0->isReg()) 637 MRI->clearKillFlags(Src0->getReg()); 700 MachineOperand *Src0 = TII->getNamedOperand(*VCmp, AMDGPU::OpName::src0); in tryRecordVCmpxAndSaveexecSequence() 701 if (Src0 in tryRecordVCmpxAndSaveexecSequence() 536 MachineOperand &Src0 = SaveExecInst->getOperand(1); optimizeExecSequence() local 583 MachineOperand *Src0 = TII->getNamedOperand(VCmp, AMDGPU::OpName::src0); optimizeVCMPSaveExecSequence() local 681 MachineOperand *Src0 = TII->getNamedOperand(*VCmp, AMDGPU::OpName::src0); tryRecordVCmpxAndSaveexecSequence() local [all...] |
H A D | AMDGPUCombinerHelper.cpp | 426 Register Src0, in matchExpandPromotedF16FMed3() 434 return isFPExtFromF16OrConst(MRI, Src0) && isFPExtFromF16OrConst(MRI, Src1) && in applyExpandPromotedF16FMed3() 439 Register Src0, in applyExpandPromotedF16FMed3() 444 Src0 = Builder.buildFPTrunc(LLT::scalar(16), Src0).getReg(0); in applyExpandPromotedF16FMed3() 448 LLT Ty = MRI.getType(Src0); 449 auto A1 = Builder.buildFMinNumIEEE(Ty, Src0, Src1); 450 auto B1 = Builder.buildFMaxNumIEEE(Ty, Src0, Src1); 419 matchExpandPromotedF16FMed3(MachineInstr & MI,Register Src0,Register Src1,Register Src2) matchExpandPromotedF16FMed3() argument 432 applyExpandPromotedF16FMed3(MachineInstr & MI,Register Src0,Register Src1,Register Src2) applyExpandPromotedF16FMed3() argument
|
H A D | SIFoldOperands.cpp | 262 MachineOperand *Src0 = &Def->getOperand(1); in tryFoldImmWithOpSel() 268 if (!Src0->isFI() && !Src1->isFI()) in tryFoldImmWithOpSel() 271 if (Src0->isFI()) in tryFoldImmWithOpSel() 272 std::swap(Src0, Src1); in tryFoldImmWithOpSel() 274 const bool UseVOP3 = !Src0->isImm() || TII->isInlineConstant(*Src0); in tryFoldImmWithOpSel() 292 Add.add(*Src0).add(*Src1).setMIFlags(Def->getFlags()); in tryFoldImmWithOpSel() 308 .add(*Src0) in tryFoldImmWithOpSel() 737 // Special case for s_fmac_f32 if we are trying to fold into Src0 or Src1. in tryToFoldACImm() 739 // If folding for Src0 happen in tryToFoldACImm() 1218 MachineOperand *Src0 = getImmOrMaterializedImm(MI->getOperand(Src0Idx)); tryConstantFoldOp() local 1316 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); tryFoldCndMask() local 1354 MachineOperand *Src0 = getImmOrMaterializedImm(MI.getOperand(1)); tryFoldZeroHighBits() local 1529 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); isClamp() local 1666 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); isOMod() local 1703 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); isOMod() local [all...] |
H A D | AMDGPUPostLegalizerCombiner.cpp | 324 Register Src0; in matchCvtF32UByteN() local 326 IsShr = mi_match(SrcReg, MRI, m_GLShr(m_Reg(Src0), m_ICst(ShiftAmt))); in matchCvtF32UByteN() 327 if (IsShr || mi_match(SrcReg, MRI, m_GShl(m_Reg(Src0), m_ICst(ShiftAmt)))) { in matchCvtF32UByteN() 336 MatchInfo.CvtVal = Src0; in matchCvtF32UByteN() 419 Register Src0 = MI.getOperand(1).getReg(); in matchCombine_s_mul_u64() local 421 if (MRI.getType(Src0) != LLT::scalar(64)) in matchCombine_s_mul_u64() 425 KB->getKnownBits(Src0).countMinLeadingZeros() >= 32) { in matchCombine_s_mul_u64() 431 KB->computeNumSignBits(Src0) >= 33) { in matchCombine_s_mul_u64()
|
H A D | GCNDPPCombine.cpp | 294 auto *Src0 = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0); in createDPPInst() 295 assert(Src0); in createDPPInst() 297 if (!TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src0)) { in createDPPInst() 302 DPPInst.add(*Src0); in createDPPInst() 327 "Src0 and Src1 operands should have the same size"); in createDPPInst() 688 auto *Src0 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0); in combineDPPMov() 690 if (Use != Src0 && !(Use == Src1 && OrigMI.isCommutable())) { // [1] in combineDPPMov() 696 assert(Src0 && "Src1 without Src0?"); in combineDPPMov() 697 if ((Use == Src0 in combineDPPMov() 290 auto *Src0 = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0); createDPPInst() local 684 auto *Src0 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0); combineDPPMov() local [all...] |
H A D | SIPeepholeSDWA.cpp | 587 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() 588 auto Imm = foldToImm(*Src0); in matchSDWAOperand() 627 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() 628 auto Imm = foldToImm(*Src0); in matchSDWAOperand() 694 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() 697 if (!Src0->isReg() || Src0->getReg().isPhysical() || in matchSDWAOperand() 702 Src0, Dst, SrcSel, false, false, Opcode != AMDGPU::V_BFE_U32_e64); in matchSDWAOperand() 711 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() 714 auto Imm = foldToImm(*Src0); in matchSDWAOperand() 582 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); matchSDWAOperand() local 623 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); matchSDWAOperand() local 692 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); matchSDWAOperand() local 709 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); matchSDWAOperand() local 1008 if (MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0)) { isConvertibleToSDWA() local 1061 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); convertToSDWA() local [all...] |
H A D | AMDGPUCombinerHelper.h | 38 bool matchExpandPromotedF16FMed3(MachineInstr &MI, Register Src0, 40 void applyExpandPromotedF16FMed3(MachineInstr &MI, Register Src0,
|
H A D | SIInstrInfo.cpp | 2705 MachineOperand &Src0, in expandMovDPP64() 2783 // pre-gfx10 However, most test cases need literals in Src0 for VOP in commuteInstructionImpl() 2784 // FIXME: After gfx9, literal can be in place other than Src0 in commuteInstructionImpl() 2830 MachineOperand &Src0 = MI.getOperand(Src0Idx); in findCommutedOpIndices() 2832 if (!isLegalToSwap(MI, Src0Idx, &Src0, Src1Idx, &Src1)) { in findCommutedOpIndices() 2836 if (Src0.isReg() && Src1.isReg()) { in findCommutedOpIndices() 2840 } else if (Src0.isReg() && !Src1.isReg()) { in findCommutedOpIndices() 2841 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); in findCommutedOpIndices() 2842 } else if (!Src0.isReg() && Src1.isReg()) { in findCommutedOpIndices() 2843 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); in findCommutedOpIndices() 2720 swapSourceModifiers(MachineInstr & MI,MachineOperand & Src0,unsigned Src0OpName,MachineOperand & Src1,unsigned Src1OpName) const swapSourceModifiers() argument 2788 MachineOperand &Src0 = MI.getOperand(Src0Idx); commuteInstructionImpl() local 3512 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); foldImmediate() local 3917 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); convertToThreeAddress() local 3930 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); convertToThreeAddress() local 5004 const MachineOperand &Src0 = MI.getOperand(Src0Idx); verifyInstruction() local 5026 const MachineOperand &Src0 = MI.getOperand(Src0Idx); verifyInstruction() local 5095 const MachineOperand &Src0 = MI.getOperand(Src0Idx); verifyInstruction() local 5849 MachineOperand &Src0 = MI.getOperand(Src0Idx); legalizeOperandsVOP2() local 6666 Register Src0 = MI.getOperand(1).getReg(); legalizeOperands() local 6752 MachineOperand &Src0 = MI.getOperand(Src0Idx); legalizeOperands() local 7219 MachineOperand &Src0 = Inst.getOperand(2); moveToVALUImpl() local 7544 MachineOperand &Src0 = Inst.getOperand(1); lowerSelect() local 7653 MachineOperand &Src0 = Inst.getOperand(1); lowerScalarXnor() local 7719 MachineOperand &Src0 = Inst.getOperand(1); splitScalarNotBinop() local 7748 MachineOperand &Src0 = Inst.getOperand(1); splitScalarBinOpN2() local 7775 MachineOperand &Src0 = Inst.getOperand(1); splitScalar64BitUnaryOp() local 7840 MachineOperand &Src0 = Inst.getOperand(1); splitScalarSMulU64() local 7949 MachineOperand &Src0 = Inst.getOperand(1); splitScalarSMulPseudo() local 8008 MachineOperand &Src0 = Inst.getOperand(1); splitScalar64BitBinaryOp() local 8075 MachineOperand &Src0 = Inst.getOperand(1); splitScalar64BitXnor() local 8305 MachineOperand &Src0 = Inst.getOperand(1); movePackToVALU() local [all...] |
H A D | AMDGPURegBankCombiner.cpp | 314 MachineInstr *Src0 = getDefIgnoringCopies(MI.getOperand(1).getReg(), MRI); in matchFPMed3ToClamp() 318 if (isFCst(Src0) && !isFCst(Src1)) in matchFPMed3ToClamp() 319 std::swap(Src0, Src1); in matchFPMed3ToClamp() 322 if (isFCst(Src0) && !isFCst(Src1)) in matchFPMed3ToClamp() 323 std::swap(Src0, Src1); in matchFPMed3ToClamp() 327 Register Val = Src0->getOperand(0).getReg(); in matchFPMed3ToClamp() 316 MachineInstr *Src0 = getDefIgnoringCopies(MI.getOperand(1).getReg(), MRI); matchFPMed3ToClamp() local
|
H A D | SIModeRegister.cpp | 185 MachineOperand Src0 = MI.getOperand(1); in getInstructionMode() 188 B.add(Src0); // re-add src0 operand in getInstructionMode() 182 MachineOperand Src0 = MI.getOperand(1); getInstructionMode() local 199 MachineOperand Src0 = MI.getOperand(1); getInstructionMode() local
|
H A D | SIISelLowering.cpp | 5081 MachineOperand &Src0 = MI.getOperand(2); in EmitInstrWithCustomInserter() 5089 .add(Src0) in EmitInstrWithCustomInserter() 5108 MachineOperand &Src0 = MI.getOperand(1); in EmitInstrWithCustomInserter() 5115 .add(Src0) in EmitInstrWithCustomInserter() 5126 MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); in EmitInstrWithCustomInserter() 5128 MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); in EmitInstrWithCustomInserter() 5162 MachineOperand &Src0 = MI.getOperand(1); in EmitInstrWithCustomInserter() 5168 .add(Src0) in EmitInstrWithCustomInserter() 5184 const TargetRegisterClass *Src0RC = Src0.isReg() in EmitInstrWithCustomInserter() 5185 ? MRI.getRegClass(Src0 in EmitInstrWithCustomInserter() 4962 MachineOperand &Src0 = MI.getOperand(2); EmitInstrWithCustomInserter() local 4985 MachineOperand &Src0 = MI.getOperand(1); EmitInstrWithCustomInserter() local 5037 MachineOperand &Src0 = MI.getOperand(1); EmitInstrWithCustomInserter() local 5119 MachineOperand &Src0 = MI.getOperand(2); EmitInstrWithCustomInserter() local 5276 const MachineOperand &Src0 = MI.getOperand(1); EmitInstrWithCustomInserter() local 6055 SDValue Src0 = N->getOperand(1); lowerFCMPIntrinsic() local 6124 __anon7a836d960102(SDValue Src0, SDValue Src1, SDValue Src2, MVT ValT) lowerLaneOp() argument 6160 SDValue Src0 = N->getOperand(1); lowerLaneOp() local 6312 SDValue Src0 = N->getOperand(1); ReplaceNodeResults() local 6324 SDValue Src0 = N->getOperand(1); ReplaceNodeResults() local 8616 SDValue Src0 = Param->isAllOnes() ? Numerator : Denominator; LowerINTRINSIC_WO_CHAIN() local 9495 SDValue Src0 = Op.getOperand(4); LowerINTRINSIC_VOID() local 10571 SDValue Src0 = Op.getOperand(0); LowerFDIV16() local 13271 SDValue Src0 = N->getOperand(0); performFMed3Combine() local 13308 SDValue Src0 = N->getOperand(0); performCvtPkRTZCombine() local 13812 placeSources(ByteProvider<SDValue> & Src0,ByteProvider<SDValue> & Src1,SmallVectorImpl<DotSrc> & Src0s,SmallVectorImpl<DotSrc> & Src1s,int Step) placeSources() argument 13974 checkDot4MulSignedness(const SDValue & N,ByteProvider<SDValue> & Src0,ByteProvider<SDValue> & Src1,const SDValue & S0Op,const SDValue & S1Op,const SelectionDAG & DAG) checkDot4MulSignedness() argument 14069 auto Src0 = handleMulOperand(TempNode->getOperand(MulIdx)->getOperand(0)); performAddCombine() local 14092 auto Src0 = performAddCombine() local 14135 SDValue Src0, Src1; performAddCombine() local 15060 SDValue Src0 = Node->getOperand(1); PostISelFolding() local [all...] |
H A D | SIFixSGPRCopies.cpp | 719 MachineOperand &Src0 = MI.getOperand(Src0Idx); in runOnMachineFunction() 723 if ((Src0.isReg() && TRI->isSGPRReg(*MRI, Src0.getReg()) && in runOnMachineFunction() 724 Src0.getReg() != AMDGPU::M0) && in runOnMachineFunction() 733 for (MachineOperand *MO : {&Src0, &Src1}) { in runOnMachineFunction() 709 MachineOperand &Src0 = MI.getOperand(Src0Idx); runOnMachineFunction() local
|
H A D | SILoadStoreOptimizer.cpp | 1300 const auto *Src0 = TII->getNamedOperand(*CI.I, OpName); in mergeRead2Pair() 1304 .add(*Src0) in mergeRead2Pair() 2113 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0); in promoteConstantOffsetToImm() 2116 auto Offset0P = extractConstOffset(*Src0); in promoteConstantOffsetToImm() 2122 BaseLo = *Src0; in promoteConstantOffsetToImm() 2128 Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0); in promoteConstantOffsetToImm() 2131 if (Src0->isImm()) in promoteConstantOffsetToImm() 2132 std::swap(Src0, Src1); in promoteConstantOffsetToImm() 2134 if (!Src1->isImm() || Src0->isImm()) in promoteConstantOffsetToImm() 2138 BaseHi = *Src0; in promoteConstantOffsetToImm() 1241 const auto *Src0 = TII->getNamedOperand(*CI.I, OpName); copyFromSrcRegs() local 2019 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0); processBaseWithConstOffset() local [all...] |
/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64AdvSIMDScalarPass.cpp | 298 unsigned Src0 = 0, SubReg0; in transformInstruction() local 309 Src0 = MOSrc0->getReg(); in transformInstruction() 341 if (!Src0) { in transformInstruction() 343 Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass); in transformInstruction() 344 insertCopy(TII, MI, Src0, OrigSrc0, KillSrc0); in transformInstruction() 363 .addReg(Src0, getKillRegState(KillSrc0), SubReg0) in transformInstruction()
|
/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCExpandAtomicPseudoInsts.cpp | 52 Register Dest0, Register Dest1, Register Src0, in PairedCopy() 56 if (Dest0 == Src1 && Dest1 == Src0) { in PairedCopy() 61 } else if (Dest0 != Src0 || Dest1 != Src1) { in PairedCopy() 62 if (Dest0 == Src1 || Dest1 != Src0) { in PairedCopy() 64 BuildMI(MBB, MBBI, DL, OR, Dest0).addReg(Src0).addReg(Src0); in PairedCopy() 66 BuildMI(MBB, MBBI, DL, OR, Dest0).addReg(Src0).addReg(Src0); in PairedCopy() 53 PairedCopy(const PPCInstrInfo * TII,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,Register Dest0,Register Dest1,Register Src0,Register Src1) PairedCopy() argument
|
/llvm-project/llvm/lib/Transforms/InstCombine/ |
H A D | InstCombineCalls.cpp | 959 Value *Src0 = II.getArgOperand(0); in foldIntrinsicIsFPClass() 972 if (match(Src0, m_FNeg(m_Value(FNegSrc)))) { in foldIntrinsicIsFPClass() 980 if (match(Src0, m_FAbs(m_Value(FAbsSrc)))) { in foldIntrinsicIsFPClass() 991 Constant *Inf = ConstantFP::getInfinity(Src0->getType()); in foldIntrinsicIsFPClass() 997 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Src0); in foldIntrinsicIsFPClass() 1010 ConstantFP::getInfinity(Src0->getType(), OrderedMask == fcNegInf); in foldIntrinsicIsFPClass() 1011 Value *EqInf = IsUnordered ? Builder.CreateFCmpUEQ(Src0, Inf) in foldIntrinsicIsFPClass() 1012 : Builder.CreateFCmpOEQ(Src0, Inf); in foldIntrinsicIsFPClass() 1024 Constant *Inf = ConstantFP::getInfinity(Src0->getType(), in foldIntrinsicIsFPClass() 1026 Value *NeInf = IsUnordered ? Builder.CreateFCmpUNE(Src0, In in foldIntrinsicIsFPClass() 905 Value *Src0 = II.getArgOperand(0); foldIntrinsicIsFPClass() local 2434 Value *Src0 = II->getArgOperand(0); visitCallInst() local [all...] |
/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | GISelKnownBits.cpp | 110 /// Compute known bits for the intersection of \p Src0 and \p Src1 111 void GISelKnownBits::computeKnownBitsMin(Register Src0, Register Src1, 123 computeKnownBitsImpl(Src0, Known2, DemandedElts, Depth); in computeKnownBitsMin() 635 /// Compute number of sign bits for the intersection of \p Src0 and \p Src1 in computeNumSignBitsFromRangeMetadata() 636 unsigned GISelKnownBits::computeNumSignBitsMin(Register Src0, Register Src1, in computeNumSignBitsFromRangeMetadata() 643 return std::min(computeNumSignBits(Src0, DemandedElts, Depth), Src1SignBits); in computeNumSignBitsFromRangeMetadata() 112 computeKnownBitsMin(Register Src0,Register Src1,KnownBits & Known,const APInt & DemandedElts,unsigned Depth) computeKnownBitsMin() argument 619 computeNumSignBitsMin(Register Src0,Register Src1,const APInt & DemandedElts,unsigned Depth) computeNumSignBitsMin() argument
|