Home
last modified time | relevance | path

Searched refs:AMDGPU (Results 1 – 25 of 221) sorted by relevance

123456789

/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/
H A DSIRegisterInfo.cpp88 Register TmpVGPR = AMDGPU::NoRegister;
94 Register SavedExecReg = AMDGPU::NoRegister;
123 ExecReg = AMDGPU::EXEC_LO; in SGPRSpillBuilder()
124 MovOpc = AMDGPU::S_MOV_B32; in SGPRSpillBuilder()
125 NotOpc = AMDGPU::S_NOT_B32; in SGPRSpillBuilder()
127 ExecReg = AMDGPU::EXEC; in SGPRSpillBuilder()
128 MovOpc = AMDGPU::S_MOV_B64; in SGPRSpillBuilder()
129 NotOpc = AMDGPU::S_NOT_B64; in SGPRSpillBuilder()
132 assert(SuperReg != AMDGPU::M0 && "m0 should never spill"); in SGPRSpillBuilder()
133 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && in SGPRSpillBuilder()
[all …]
H A DSIOptimizeExecMasking.cpp57 case AMDGPU::COPY: in isCopyFromExec()
58 case AMDGPU::S_MOV_B64: in isCopyFromExec()
59 case AMDGPU::S_MOV_B64_term: in isCopyFromExec()
60 case AMDGPU::S_MOV_B32: in isCopyFromExec()
61 case AMDGPU::S_MOV_B32_term: { in isCopyFromExec()
64 Src.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC)) in isCopyFromExec()
69 return AMDGPU::NoRegister; in isCopyFromExec()
75 case AMDGPU::COPY: in isCopyToExec()
76 case AMDGPU::S_MOV_B64: in isCopyToExec()
77 case AMDGPU::S_MOV_B32: { in isCopyToExec()
[all …]
H A DAMDGPURegisterBankInfo.cpp117 if (Opc == AMDGPU::G_ANYEXT || Opc == AMDGPU::G_ZEXT || in applyBank()
118 Opc == AMDGPU::G_SEXT) { in applyBank()
125 if (SrcBank == &AMDGPU::VCCRegBank) { in applyBank()
129 assert(NewBank == &AMDGPU::VGPRRegBank); in applyBank()
134 auto True = B.buildConstant(S32, Opc == AMDGPU::G_SEXT ? -1 : 1); in applyBank()
148 if (Opc == AMDGPU::G_TRUNC) { in applyBank()
151 assert(DstBank != &AMDGPU::VCCRegBank); in applyBank()
166 assert(NewBank == &AMDGPU::VGPRRegBank && in applyBank()
168 assert((MI.getOpcode() != AMDGPU::G_TRUNC && in applyBank()
169 MI.getOpcode() != AMDGPU::G_ANYEXT) && in applyBank()
[all …]
H A DSIInstrInfo.cpp42 namespace AMDGPU { namespace
65 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), in SIInstrInfo()
87 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); in nodesHaveSameOperandValue()
88 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); in nodesHaveSameOperandValue()
114 case AMDGPU::V_MOV_B32_e32: in isReallyTriviallyReMaterializable()
115 case AMDGPU::V_MOV_B32_e64: in isReallyTriviallyReMaterializable()
116 case AMDGPU::V_MOV_B64_PSEUDO: in isReallyTriviallyReMaterializable()
117 case AMDGPU::V_ACCVGPR_READ_B32_e64: in isReallyTriviallyReMaterializable()
118 case AMDGPU::V_ACCVGPR_WRITE_B32_e64: in isReallyTriviallyReMaterializable()
155 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); in areLoadsFromSameBasePtr()
[all …]
H A DSIPeepholeSDWA.cpp112 using namespace AMDGPU::SDWA;
308 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) { in getSrcMods()
309 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { in getSrcMods()
312 } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) { in getSrcMods()
313 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) { in getSrcMods()
343 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in convertToSDWA()
344 MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel); in convertToSDWA()
346 TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); in convertToSDWA()
350 Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in convertToSDWA()
351 SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel); in convertToSDWA()
[all …]
H A DGCNDPPCombine.cpp126 if (const auto *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) { in isShrinkable()
135 if (!hasNoImmOrEqual(MI, AMDGPU::OpName::src0_modifiers, 0, Mask) || in isShrinkable()
136 !hasNoImmOrEqual(MI, AMDGPU::OpName::src1_modifiers, 0, Mask) || in isShrinkable()
137 !hasNoImmOrEqual(MI, AMDGPU::OpName::clamp, 0) || in isShrinkable()
138 !hasNoImmOrEqual(MI, AMDGPU::OpName::omod, 0)) { in isShrinkable()
146 auto DPP32 = AMDGPU::getDPPOp32(Op); in getDPPOp()
149 auto E32 = AMDGPU::getVOPe32(Op); in getDPPOp()
150 DPP32 = (E32 == -1) ? -1 : AMDGPU::getDPPOp32(E32); in getDPPOp()
166 case AMDGPU::IMPLICIT_DEF: in getOldOpndValue()
168 case AMDGPU::COPY: in getOldOpndValue()
[all …]
H A DSIFoldOperands.cpp132 case AMDGPU::V_MAC_F32_e64: in macToMad()
133 return AMDGPU::V_MAD_F32_e64; in macToMad()
134 case AMDGPU::V_MAC_F16_e64: in macToMad()
135 return AMDGPU::V_MAD_F16_e64; in macToMad()
136 case AMDGPU::V_FMAC_F32_e64: in macToMad()
137 return AMDGPU::V_FMA_F32_e64; in macToMad()
138 case AMDGPU::V_FMAC_F16_e64: in macToMad()
139 return AMDGPU::V_FMA_F16_gfx9_e64; in macToMad()
140 case AMDGPU::V_FMAC_LEGACY_F32_e64: in macToMad()
141 return AMDGPU::V_FMA_LEGACY_F32_e64; in macToMad()
[all …]
H A DAMDGPUInstructionSelector.cpp88 return RB->getID() == AMDGPU::VCCRegBankID; in isVCC()
95 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); in constrainCopyLikeIntrin()
126 if (SrcReg == AMDGPU::SCC) { in selectCOPY()
146 STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; in selectCOPY()
157 TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32; in selectCOPY()
161 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg) in selectCOPY()
243 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) in getSubOperand64()
259 case AMDGPU::sub0: in getSubOperand64()
261 case AMDGPU::sub1: in getSubOperand64()
268 case AMDGPU::G_AND: in getLogicalBitOpcode()
[all …]
H A DSILoadStoreOptimizer.cpp288 return AMDGPU::getMUBUFElements(Opc); in getOpcodeWidth()
292 TII.getNamedOperand(MI, AMDGPU::OpName::dmask)->getImm(); in getOpcodeWidth()
296 return AMDGPU::getMTBUFElements(Opc); in getOpcodeWidth()
300 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: in getOpcodeWidth()
302 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM: in getOpcodeWidth()
304 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM: in getOpcodeWidth()
306 case AMDGPU::DS_READ_B32: LLVM_FALLTHROUGH; in getOpcodeWidth()
307 case AMDGPU::DS_READ_B32_gfx9: LLVM_FALLTHROUGH; in getOpcodeWidth()
308 case AMDGPU::DS_WRITE_B32: LLVM_FALLTHROUGH; in getOpcodeWidth()
309 case AMDGPU::DS_WRITE_B32_gfx9: in getOpcodeWidth()
[all …]
H A DGCNHazardRecognizer.cpp35 MaxLookAhead = MF.getRegInfo().isPhysRegUsed(AMDGPU::AGPR0) ? 19 : 5; in GCNHazardRecognizer()
52 return Opcode == AMDGPU::V_DIV_FMAS_F32_e64 || Opcode == AMDGPU::V_DIV_FMAS_F64_e64; in isDivFMas()
56 return Opcode == AMDGPU::S_GETREG_B32; in isSGetReg()
61 case AMDGPU::S_SETREG_B32: in isSSetReg()
62 case AMDGPU::S_SETREG_B32_mode: in isSSetReg()
63 case AMDGPU::S_SETREG_IMM32_B32: in isSSetReg()
64 case AMDGPU::S_SETREG_IMM32_B32_mode: in isSSetReg()
71 return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32; in isRWLane()
75 return Opcode == AMDGPU::S_RFE_B64; in isRFE()
80 case AMDGPU::S_MOVRELS_B32: in isSMovRel()
[all …]
H A DSIShrinkInstructions.cpp66 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); in foldImmediates()
186 int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode()); in shrinkScalarCompare()
192 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) { in shrinkScalarCompare()
196 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ? in shrinkScalarCompare()
197 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32; in shrinkScalarCompare()
216 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode()); in shrinkMIMG()
217 if (!Info || Info->MIMGEncoding != AMDGPU::MIMGEncGfx10NSA) in shrinkMIMG()
225 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0); in shrinkMIMG()
230 RC = &AMDGPU::VReg_64RegClass; in shrinkMIMG()
232 RC = &AMDGPU::VReg_96RegClass; in shrinkMIMG()
[all …]
H A DAMDGPUArgumentUsageInfo.cpp95 &AMDGPU::SGPR_128RegClass, LLT::vector(4, 32)); in getPreloadedValue()
99 &AMDGPU::SGPR_64RegClass, in getPreloadedValue()
103 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue()
106 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue()
109 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue()
113 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue()
116 &AMDGPU::SGPR_64RegClass, in getPreloadedValue()
120 &AMDGPU::SGPR_64RegClass, in getPreloadedValue()
124 &AMDGPU::SGPR_64RegClass, LLT::scalar(64)); in getPreloadedValue()
127 &AMDGPU::SGPR_64RegClass, LLT::scalar(64)); in getPreloadedValue()
[all …]
H A DAMDGPUAsmPrinter.cpp40 using namespace llvm::AMDGPU;
78 static uint32_t getFPMode(AMDGPU::SIModeRegisterDefaults Mode) { in getFPMode()
188 return (MBB->back().getOpcode() != AMDGPU::S_SETPC_B64); in isBlockOnlyReachableByFallthrough()
354 if ((AMDGPU::isGFX10Plus(STI) || AMDGPU::isGFX90A(STI)) && in doFinalization()
725 Info.UsesFlatScratch = MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_LO) || in analyzeResourceUsage()
726 MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_HI) || in analyzeResourceUsage()
737 (!hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR) && in analyzeResourceUsage()
738 !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_LO) && in analyzeResourceUsage()
739 !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_HI))) { in analyzeResourceUsage()
753 Info.UsesVCC = MRI.isPhysRegUsed(AMDGPU::VCC_LO) || in analyzeResourceUsage()
[all …]
H A DSIInsertWaitcnts.cpp159 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Inst.getOpcode()); in getVmemType()
160 return AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode)->Sampler in getVmemType()
165 void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) { in addWait()
249 void simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const;
252 AMDGPU::Waitcnt &Wait) const;
253 void applyWaitcnt(const AMDGPU::Waitcnt &Wait);
361 AMDGPU::IsaVersion IV;
448 AMDGPU::Waitcnt &Wait, const MachineInstr *MI);
468 unsigned Reg = TRI->getEncodingValue(AMDGPU::getMCReg(Op.getReg(), *ST)); in getRegInterval()
525 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr); in updateByEvent()
[all …]
H A DSIOptimizeExecMaskingPreRA.cpp127 return Opc == AMDGPU::S_CBRANCH_VCCZ || in optimizeVcndVcmpPair()
128 Opc == AMDGPU::S_CBRANCH_VCCNZ; }); in optimizeVcndVcmpPair()
133 TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister, *I, *MRI, LIS); in optimizeVcndVcmpPair()
150 if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 || in optimizeVcndVcmpPair()
151 Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) || in optimizeVcndVcmpPair()
155 MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0); in optimizeVcndVcmpPair()
156 MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1); in optimizeVcndVcmpPair()
164 if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64) in optimizeVcndVcmpPair()
167 if (TII->hasModifiersSet(*Sel, AMDGPU::OpName::src0_modifiers) || in optimizeVcndVcmpPair()
168 TII->hasModifiersSet(*Sel, AMDGPU::OpName::src1_modifiers)) in optimizeVcndVcmpPair()
[all …]
H A DSILowerControlFlow.cpp160 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef()); in setImpSCCDefDead()
192 U->getOpcode() != AMDGPU::SI_END_CF) in isSimpleIf()
204 assert(Cond.getSubReg() == AMDGPU::NoSubRegister); in emitIf()
207 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef()); in emitIf()
226 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg) in emitIf()
261 MachineInstr *NewBr = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) in emitIf()
280 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC); in emitIf()
331 BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) in emitElse()
352 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC); in emitElse()
410 BuildMI(MBB, BranchPt, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) in emitLoop()
[all …]
H A DSIWholeQuadMode.cpp441 case AMDGPU::EXEC: in markOperand()
442 case AMDGPU::EXEC_LO: in markOperand()
464 markDefs(MI, LR, *RegUnit, AMDGPU::NoSubRegister, Flag, Worklist); in markOperand()
516 } else if (Opcode == AMDGPU::WQM) { in scanInstructions()
521 } else if (Opcode == AMDGPU::SOFT_WQM) { in scanInstructions()
525 } else if (Opcode == AMDGPU::STRICT_WWM) { in scanInstructions()
533 } else if (Opcode == AMDGPU::STRICT_WQM) { in scanInstructions()
541 } else if (Opcode == AMDGPU::V_SET_INACTIVE_B32 || in scanInstructions()
542 Opcode == AMDGPU::V_SET_INACTIVE_B64) { in scanInstructions()
564 if (Opcode == AMDGPU::SI_PS_LIVE || Opcode == AMDGPU::SI_LIVE_MASK) { in scanInstructions()
[all …]
H A DSIFrameLowering.cpp86 MF.getRegInfo(), LiveRegs, AMDGPU::SReg_32_XM0_XEXECRegClass, true); in getVGPRSpillLaneOrTempRegister()
124 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR in buildPrologSpill()
125 : AMDGPU::BUFFER_STORE_DWORD_OFFSET; in buildPrologSpill()
146 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR in buildEpilogRestore()
147 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET; in buildEpilogRestore()
165 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); in buildGitPtr()
166 Register TargetLo = TRI->getSubReg(TargetReg, AMDGPU::sub0); in buildGitPtr()
167 Register TargetHi = TRI->getSubReg(TargetReg, AMDGPU::sub1); in buildGitPtr()
174 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64); in buildGitPtr()
214 Register FlatScrInit = AMDGPU::NoRegister; in emitEntryFunctionFlatScratchInit()
[all …]
H A DR600RegisterInfo.td3 let Namespace = "AMDGPU";
16 let Namespace = "AMDGPU";
22 let Namespace = "AMDGPU";
31 let Namespace = "AMDGPU";
152 def R600_ArrayBase : RegisterClass <"AMDGPU", [f32, i32], 32,
165 def R600_Addr : RegisterClass <"AMDGPU", [i32], 32, (add (sequence "Addr%u_X", 0, 127))>;
170 def R600_Addr_Y : RegisterClass <"AMDGPU", [i32], 32, (add Addr0_Y)>;
171 def R600_Addr_Z : RegisterClass <"AMDGPU", [i32], 32, (add Addr0_Z)>;
172 def R600_Addr_W : RegisterClass <"AMDGPU", [i32], 32, (add Addr0_W)>;
174 def R600_LDS_SRC_REG : RegisterClass<"AMDGPU", [i32], 32,
[all …]
H A DSIPreEmitPeephole.cpp82 const unsigned ExecReg = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC; in optimizeVccBranch()
83 const unsigned And = IsWave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; in optimizeVccBranch()
84 const unsigned AndN2 = IsWave32 ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_ANDN2_B64; in optimizeVccBranch()
85 const unsigned Mov = IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; in optimizeVccBranch()
152 if (!ReadsCond && A->registerDefIsDead(AMDGPU::SCC)) { in optimizeVccBranch()
167 bool IsVCCZ = MI.getOpcode() == AMDGPU::S_CBRANCH_VCCZ; in optimizeVccBranch()
174 MI.setDesc(TII->get(AMDGPU::S_BRANCH)); in optimizeVccBranch()
202 MI.setDesc(TII->get(AMDGPU::S_BRANCH)); in optimizeVccBranch()
213 TII->get(IsVCCZ ? AMDGPU::S_CBRANCH_EXECZ : AMDGPU::S_CBRANCH_EXECNZ)); in optimizeVccBranch()
227 MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in optimizeSetGPR()
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/AsmParser/
H A DAMDGPUAsmParser.cpp39 using namespace llvm::AMDGPU;
254 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16); in isRegOrImmWithInt16InputMods()
258 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32); in isRegOrImmWithInt32InputMods()
262 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64); in isRegOrImmWithInt64InputMods()
266 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16); in isRegOrImmWithFP16InputMods()
270 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32); in isRegOrImmWithFP32InputMods()
274 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::f64); in isRegOrImmWithFP64InputMods()
278 return isRegClass(AMDGPU::VGPR_32RegClassID) || in isVReg()
279 isRegClass(AMDGPU::VReg_64RegClassID) || in isVReg()
280 isRegClass(AMDGPU::VReg_96RegClassID) || in isVReg()
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/MCTargetDesc/
H A DSIMCCodeEmitter.cpp135 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) in getLit16Encoding()
171 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) in getLit32Encoding()
207 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) in getLit64Encoding()
234 case AMDGPU::OPERAND_REG_IMM_INT32: in getLitEncoding()
235 case AMDGPU::OPERAND_REG_IMM_FP32: in getLitEncoding()
236 case AMDGPU::OPERAND_REG_INLINE_C_INT32: in getLitEncoding()
237 case AMDGPU::OPERAND_REG_INLINE_C_FP32: in getLitEncoding()
238 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: in getLitEncoding()
239 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: in getLitEncoding()
240 case AMDGPU::OPERAND_REG_IMM_V2INT32: in getLitEncoding()
[all …]
H A DAMDGPUInstPrinter.cpp25 using namespace llvm::AMDGPU;
155 if (AMDGPU::isGFX10Plus(STI)) { in printFlatOffset()
212 if ((Imm & CPol::DLC) && AMDGPU::isGFX10Plus(STI)) in printCPol()
214 if ((Imm & CPol::SCC) && AMDGPU::isGFX90A(STI)) in printCPol()
242 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByEncoding(Dim); in printDim()
261 if (STI.hasFeature(AMDGPU::FeatureR128A16)) in printR128A16()
302 using namespace llvm::AMDGPU::MTBUFFormat; in printSymbolicFormat()
305 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::format); in printSymbolicFormat()
309 if (AMDGPU::isGFX10Plus(STI)) { in printSymbolicFormat()
345 case AMDGPU::FP_REG: in printRegOperand()
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/Disassembler/
H A DAMDGPUDisassembler.cpp36 (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
37 : AMDGPU::EncValues::SGPR_MAX_SI)
48 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10Plus()) in AMDGPUDisassembler()
62 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); in insertNamedMCOperand()
276 unsigned Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0); in IsAGPROperand()
278 return Reg >= AMDGPU::AGPR0 && Reg <= AMDGPU::AGPR255; in IsAGPROperand()
296 uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0 in decodeOperand_AVLdSt_Any()
297 : AMDGPU::OpName::vdata; in decodeOperand_AVLdSt_Any()
299 int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataNameIdx); in decodeOperand_AVLdSt_Any()
301 int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); in decodeOperand_AVLdSt_Any()
[all …]
/netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/Utils/
H A DAMDGPUBaseInfo.cpp89 namespace AMDGPU { namespace
684 STI->getFeatureBits().test(AMDGPU::FeatureXNACK)); in getNumExtraSGPRs()
826 if (AMDGPU::isGFX90A(*STI)) { in getDefaultAmdhsaKernelDescriptor()
1021 IdSymbolic[Id] && (Id != ID_XNACK_MASK || !AMDGPU::isGFX10_BEncoding(STI)); in isValidHwreg()
1397 return STI.getFeatureBits()[AMDGPU::FeatureXNACK]; in hasXNACK()
1401 return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC]; in hasSRAMECC()
1405 …return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128] && !STI.getFeatureBits()[AMDGPU::FeatureR128… in hasMIMG_R128()
1409 return STI.getFeatureBits()[AMDGPU::FeatureGFX10A16]; in hasGFX10A16()
1413 return STI.getFeatureBits()[AMDGPU::FeatureG16]; in hasG16()
1417 return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]; in hasPackedD16()
[all …]

123456789