/netbsd-src/external/apache2/llvm/dist/llvm/include/llvm/CodeGen/GlobalISel/ |
H A D | MachineIRBuilder.h | 1396 MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, 1399 return buildInstr(TargetOpcode::G_ADD, {Dst}, {Src0, Src1}, Flags); 1413 MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, 1416 return buildInstr(TargetOpcode::G_SUB, {Dst}, {Src0, Src1}, Flags); 1429 MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, 1432 return buildInstr(TargetOpcode::G_MUL, {Dst}, {Src0, Src1}, Flags); 1435 MachineInstrBuilder buildUMulH(const DstOp &Dst, const SrcOp &Src0, 1438 return buildInstr(TargetOpcode::G_UMULH, {Dst}, {Src0, Src1}, Flags); 1441 MachineInstrBuilder buildSMulH(const DstOp &Dst, const SrcOp &Src0, 1444 return buildInstr(TargetOpcode::G_SMULH, {Dst}, {Src0, Src1}, Flags); [all …]
|
H A D | MIPatternMatch.h | 520 Src0Ty Src0; 524 TernaryOp_match(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) 525 : Src0(Src0), Src1(Src1), Src2(Src2) {} 531 return (Src0.match(MRI, TmpMI->getOperand(1).getReg()) && 542 m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) { 544 TargetOpcode::G_INSERT_VECTOR_ELT>(Src0, Src1, Src2); 549 m_GISelect(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) { 551 Src0, Src1, Src2);
|
H A D | GISelKnownBits.h | 38 void computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known, 42 unsigned computeNumSignBitsMin(Register Src0, Register Src1,
|
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPUInstCombineIntrinsic.cpp | 43 static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1, in fmed3AMDGCN() argument 45 APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2); in fmed3AMDGCN() 47 APFloat::cmpResult Cmp0 = Max3.compare(Src0); in fmed3AMDGCN() 55 return maxnum(Src0, Src2); in fmed3AMDGCN() 57 return maxnum(Src0, Src1); in fmed3AMDGCN() 278 Value *Src0 = II.getArgOperand(0); in instCombineIntrinsic() local 282 if (isa<UndefValue>(Src0)) { in instCombineIntrinsic() 306 Value *FCmp = IC.Builder.CreateFCmpUNO(Src0, Src0); in instCombineIntrinsic() 314 IC.Builder.CreateFCmpOEQ(Src0, ConstantFP::get(Src0->getType(), 0.0)); in instCombineIntrinsic() 322 isKnownNeverNaN(Src0, &IC.getTargetLibraryInfo())) { in instCombineIntrinsic() [all …]
|
H A D | R600ExpandSpecialInstrs.cpp | 146 Register Src0 = in runOnMachineFunction() local 152 (void) Src0; in runOnMachineFunction() 154 if ((TRI.getEncodingValue(Src0) & 0xff) < 127 && in runOnMachineFunction() 156 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1)); in runOnMachineFunction() 198 Register Src0 = in runOnMachineFunction() local 211 Src0 = TRI.getSubReg(Src0, SubRegIndex); in runOnMachineFunction() 217 Src1 = TRI.getSubReg(Src0, SubRegIndex1); in runOnMachineFunction() 218 Src0 = TRI.getSubReg(Src0, SubRegIndex0); in runOnMachineFunction() 252 TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1); in runOnMachineFunction()
|
H A D | SIShrinkInstructions.cpp | 69 MachineOperand &Src0 = MI.getOperand(Src0Idx); in foldImmediates() local 70 if (Src0.isReg()) { in foldImmediates() 71 Register Reg = Src0.getReg(); in foldImmediates() 81 Src0.ChangeToImmediate(MovSrc.getImm()); in foldImmediates() 84 Src0.ChangeToFrameIndex(MovSrc.getIndex()); in foldImmediates() 87 Src0.ChangeToGA(MovSrc.getGlobal(), MovSrc.getOffset(), in foldImmediates() 178 const MachineOperand &Src0 = MI.getOperand(0); in shrinkScalarCompare() local 179 if (!Src0.isReg()) in shrinkScalarCompare() 315 MachineOperand *Src0 = &MI.getOperand(1); in shrinkScalarLogicOp() local 317 MachineOperand *SrcReg = Src0; in shrinkScalarLogicOp() [all …]
|
H A D | SIFoldOperands.cpp | 1056 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx)); in tryConstantFoldOp() local 1060 Src0->isImm()) { in tryConstantFoldOp() 1061 MI->getOperand(1).ChangeToImmediate(~Src0->getImm()); in tryConstantFoldOp() 1071 if (!Src0->isImm() && !Src1->isImm()) in tryConstantFoldOp() 1077 if (Src0->isImm() && Src1->isImm()) { in tryConstantFoldOp() 1079 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) in tryConstantFoldOp() 1096 if (Src0->isImm() && !Src1->isImm()) { in tryConstantFoldOp() 1097 std::swap(Src0, Src1); in tryConstantFoldOp() 1158 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in tryFoldCndMask() local 1160 if (!Src1->isIdenticalTo(*Src0)) { in tryFoldCndMask() [all …]
|
H A D | GCNDPPCombine.cpp | 238 auto *Src0 = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0); in createDPPInst() local 239 assert(Src0); in createDPPInst() 240 if (!TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src0)) { in createDPPInst() 245 DPPInst.add(*Src0); in createDPPInst() 540 auto *Src0 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0); in combineDPPMov() local 542 if (Use != Src0 && !(Use == Src1 && OrigMI.isCommutable())) { // [1] in combineDPPMov() 547 assert(Src0 && "Src1 without Src0?"); in combineDPPMov() 548 if (Src1 && Src1->isIdenticalTo(*Src0)) { in combineDPPMov() 558 if (Use == Src0) { in combineDPPMov()
|
H A D | SIInstrInfo.cpp | 2020 MachineOperand &Src0, in swapSourceModifiers() argument 2085 MachineOperand &Src0 = MI.getOperand(Src0Idx); in commuteInstructionImpl() local 2089 if (Src0.isReg() && Src1.isReg()) { in commuteInstructionImpl() 2090 if (isOperandLegal(MI, Src1Idx, &Src0)) { in commuteInstructionImpl() 2096 } else if (Src0.isReg() && !Src1.isReg()) { in commuteInstructionImpl() 2099 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); in commuteInstructionImpl() 2100 } else if (!Src0.isReg() && Src1.isReg()) { in commuteInstructionImpl() 2101 if (isOperandLegal(MI, Src1Idx, &Src0)) in commuteInstructionImpl() 2102 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); in commuteInstructionImpl() 2109 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, in commuteInstructionImpl() [all …]
|
H A D | SIPeepholeSDWA.cpp | 537 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() local 538 auto Imm = foldToImm(*Src0); in matchSDWAOperand() 577 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() local 578 auto Imm = foldToImm(*Src0); in matchSDWAOperand() 645 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() local 648 if (Src0->getReg().isPhysical() || Dst->getReg().isPhysical()) in matchSDWAOperand() 652 Src0, Dst, SrcSel, false, false, Opcode != AMDGPU::V_BFE_U32_e64); in matchSDWAOperand() 661 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() local 664 auto Imm = foldToImm(*Src0); in matchSDWAOperand() 668 ValSrc = Src0; in matchSDWAOperand() [all …]
|
H A D | SIOptimizeExecMasking.cpp | 428 MachineOperand &Src0 = SaveExecInst->getOperand(1); in runOnMachineFunction() local 433 if (Src0.isReg() && Src0.getReg() == CopyFromExec) { in runOnMachineFunction() 439 OtherOp = &Src0; in runOnMachineFunction()
|
H A D | AMDGPUPostLegalizerCombiner.cpp | 211 Register Src0; in matchCvtF32UByteN() local 213 bool IsShr = mi_match(SrcReg, MRI, m_GLShr(m_Reg(Src0), m_ICst(ShiftAmt))); in matchCvtF32UByteN() 214 if (IsShr || mi_match(SrcReg, MRI, m_GShl(m_Reg(Src0), m_ICst(ShiftAmt)))) { in matchCvtF32UByteN() 223 MatchInfo.CvtVal = Src0; in matchCvtF32UByteN()
|
H A D | SIFixSGPRCopies.cpp | 720 MachineOperand &Src0 = MI.getOperand(Src0Idx); in runOnMachineFunction() local 724 if ((Src0.isReg() && TRI->isSGPRReg(*MRI, Src0.getReg()) && in runOnMachineFunction() 725 Src0.getReg() != AMDGPU::M0) && in runOnMachineFunction() 734 for (MachineOperand *MO : {&Src0, &Src1}) { in runOnMachineFunction()
|
H A D | SILoadStoreOptimizer.cpp | 1461 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); in mergeTBufferStorePair() local 1465 .add(*Src0) in mergeTBufferStorePair() 1616 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); in mergeBufferStorePair() local 1620 .add(*Src0) in mergeBufferStorePair() 1790 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0); in processBaseWithConstOffset() local 1793 auto Offset0P = extractConstOffset(*Src0); in processBaseWithConstOffset() 1799 BaseLo = *Src0; in processBaseWithConstOffset() 1802 Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0); in processBaseWithConstOffset() 1805 if (Src0->isImm()) in processBaseWithConstOffset() 1806 std::swap(Src0, Src1); in processBaseWithConstOffset() [all …]
|
H A D | SIISelLowering.cpp | 3890 MachineOperand &Src0 = MI.getOperand(2); in EmitInstrWithCustomInserter() local 3896 BuildMI(*BB, MI, DL, TII->get(Opc), Dest0.getReg()).add(Src0).add(Src1); in EmitInstrWithCustomInserter() 3914 MachineOperand &Src0 = MI.getOperand(1); in EmitInstrWithCustomInserter() local 3921 MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); in EmitInstrWithCustomInserter() 3923 MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); in EmitInstrWithCustomInserter() 3962 MachineOperand &Src0 = MI.getOperand(1); in EmitInstrWithCustomInserter() local 3965 const TargetRegisterClass *Src0RC = Src0.isReg() in EmitInstrWithCustomInserter() 3966 ? MRI.getRegClass(Src0.getReg()) in EmitInstrWithCustomInserter() 3978 MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); in EmitInstrWithCustomInserter() 3983 MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); in EmitInstrWithCustomInserter() [all …]
|
H A D | AMDGPUInstructionSelector.cpp | 603 Register Src0 = MI.getOperand(1).getReg(); in selectG_BUILD_VECTOR_TRUNC() local 605 if (MRI->getType(Src0) != S32) in selectG_BUILD_VECTOR_TRUNC() 615 getConstantVRegValWithLookThrough(Src0, *MRI, true, true, true); in selectG_BUILD_VECTOR_TRUNC() 636 RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI); in selectG_BUILD_VECTOR_TRUNC() 653 Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16)))); in selectG_BUILD_VECTOR_TRUNC() 763 Register Src0 = MI.getOperand(2).getReg(); in selectInterpP1F16() local 767 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI)) in selectInterpP1F16() 790 .addReg(Src0) // $src0 in selectInterpP1F16() 884 Register Src0 = ChooseDenom != 0 ? Numer : Denom; in selectDivScale() local 889 .addUse(Src0) // $src0 in selectDivScale() [all …]
|
H A D | AMDGPUPromoteAlloca.cpp | 962 Value *Src0 = CI->getOperand(0); in handleAlloca() local 963 Type *EltTy = Src0->getType()->getPointerElementType(); in handleAlloca()
|
H A D | GCNHazardRecognizer.cpp | 896 auto *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0); in fixVcmpxPermlaneHazards() local 897 Register Reg = Src0->getReg(); in fixVcmpxPermlaneHazards() 898 bool IsUndef = Src0->isUndef(); in fixVcmpxPermlaneHazards()
|
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AArch64/ |
H A D | AArch64AdvSIMDScalarPass.cpp | 298 unsigned Src0 = 0, SubReg0; in transformInstruction() local 309 Src0 = MOSrc0->getReg(); in transformInstruction() 341 if (!Src0) { in transformInstruction() 343 Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass); in transformInstruction() 344 insertCopy(TII, MI, Src0, OrigSrc0, KillSrc0); in transformInstruction() 363 .addReg(Src0, getKillRegState(KillSrc0), SubReg0) in transformInstruction()
|
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Transforms/Scalar/ |
H A D | InferAddressSpaces.cpp | 656 Constant *Src0 = CE->getOperand(1); in cloneConstantExprWithNewAddressSpace() local 658 if (Src0->getType()->getPointerAddressSpace() == in cloneConstantExprWithNewAddressSpace() 662 CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType), in cloneConstantExprWithNewAddressSpace() 842 Value *Src0 = Op.getOperand(1); in updateAddressSpace() local 845 auto I = InferredAddrSpace.find(Src0); in updateAddressSpace() 847 I->second : Src0->getType()->getPointerAddressSpace(); in updateAddressSpace() 853 auto *C0 = dyn_cast<Constant>(Src0); in updateAddressSpace()
|
H A D | ScalarizeMaskedMemIntrin.cpp | 150 Value *Src0 = CI->getArgOperand(3); in scalarizeMaskedLoad() local 182 Value *VResult = Src0; in scalarizeMaskedLoad() 420 Value *Src0 = CI->getArgOperand(3); in scalarizeMaskedGather() local 434 Value *VResult = Src0; in scalarizeMaskedGather()
|
/netbsd-src/external/apache2/llvm/dist/llvm/lib/CodeGen/GlobalISel/ |
H A D | GISelKnownBits.cpp | 98 void GISelKnownBits::computeKnownBitsMin(Register Src0, Register Src1, in computeKnownBitsMin() argument 110 computeKnownBitsImpl(Src0, Known2, DemandedElts, Depth); in computeKnownBitsMin() 508 unsigned GISelKnownBits::computeNumSignBitsMin(Register Src0, Register Src1, in computeNumSignBitsMin() argument 515 return std::min(computeNumSignBits(Src0, DemandedElts, Depth), Src1SignBits); in computeNumSignBitsMin()
|
H A D | CSEMIRBuilder.cpp | 199 const SrcOp &Src0 = SrcOps[0]; in buildInstr() local 202 ConstantFoldExtOp(Opc, Src0.getReg(), Src1.getImm(), *getMRI())) in buildInstr()
|
/netbsd-src/external/apache2/llvm/dist/clang/lib/CodeGen/ |
H A D | CGBuiltin.cpp | 444 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitUnaryMaybeConstrainedFPBuiltin() local 448 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); in emitUnaryMaybeConstrainedFPBuiltin() 449 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 }); in emitUnaryMaybeConstrainedFPBuiltin() 451 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); in emitUnaryMaybeConstrainedFPBuiltin() 452 return CGF.Builder.CreateCall(F, Src0); in emitUnaryMaybeConstrainedFPBuiltin() 461 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitBinaryMaybeConstrainedFPBuiltin() local 466 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); in emitBinaryMaybeConstrainedFPBuiltin() 467 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 }); in emitBinaryMaybeConstrainedFPBuiltin() 469 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); in emitBinaryMaybeConstrainedFPBuiltin() 470 return CGF.Builder.CreateCall(F, { Src0, Src1 }); in emitBinaryMaybeConstrainedFPBuiltin() [all …]
|
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/X86/ |
H A D | X86ISelDAGToDAG.cpp | 4386 SDValue Src0 = N0; in tryVPTESTM() local 4397 Src0 = N0Temp.getOperand(0); in tryVPTESTM() 4435 bool CanFoldLoads = Src0 != Src1; in tryVPTESTM() 4444 FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src0, Tmp0, Tmp1, in tryVPTESTM() 4447 std::swap(Src0, Src1); in tryVPTESTM() 4468 Src0 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src0); in tryVPTESTM() 4491 SDValue Ops[] = { InMask, Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, in tryVPTESTM() 4495 SDValue Ops[] = { Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, in tryVPTESTM() 4506 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, InMask, Src0, Src1); in tryVPTESTM() 4508 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, Src0, Src1); in tryVPTESTM()
|