| /openbsd-src/gnu/llvm/llvm/lib/Target/AMDGPU/ |
| H A D | AMDGPUCallingConv.td | 135 (add (sequence "VGPR%u", 40, 47), 136 (sequence "VGPR%u", 56, 63), 137 (sequence "VGPR%u", 72, 79), 138 (sequence "VGPR%u", 88, 95), 139 (sequence "VGPR%u", 104, 111), 140 (sequence "VGPR%u", 120, 127), 141 (sequence "VGPR%u", 136, 143), 142 (sequence "VGPR%u", 152, 159), 143 (sequence "VGPR%u", 168, 175), 144 (sequence "VGPR%u", 184, 191), [all …]
|
| H A D | SIRegisterInfo.td | 163 // of the result or reading just 16 bits of a 32 bit VGPR. 165 // Non-VGPR register classes use it as we need to have matching subregisters 354 // VGPR registers 356 defm VGPR#Index : 413 // Give all SGPR classes higher priority than VGPR classes, because 598 (add (sequence "VGPR%u_LO16", 0, 255))> { 606 (add (sequence "VGPR%u_HI16", 0, 255))> { 613 // VGPR 32-bit registers 616 (add (sequence "VGPR%u", 0, 255))> { 625 (add (sequence "VGPR%u", 0, 127))> { [all …]
|
| H A D | SIRegisterInfo.h | 55 Register VGPR; member 59 SpilledReg(Register R, int L) : VGPR(R), Lane(L) {} in SpilledReg() 62 bool hasReg() { return VGPR != 0; } in hasReg()
|
| H A D | AMDGPURegisterBanks.td | 13 def VGPRRegBank : RegisterBank<"VGPR",
|
| H A D | SIFrameLowering.cpp | 99 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane << '\n';); in getVGPRSpillLaneOrTempRegister() 268 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill[I].VGPR) in saveToVGPRLane() 271 .addReg(Spill[I].VGPR, RegState::Undef); in saveToVGPRLane() 314 .addReg(Spill[I].VGPR) in restoreFromVGPRLane() 927 Register VGPR = Reg.first; in emitCSRSpillStores() local 930 VGPR, FI, FrameReg); in emitCSRSpillStores() 1032 Register VGPR = Reg.first; in emitCSRSpillRestores() local 1035 VGPR, FI, FrameReg); in emitCSRSpillRestores()
|
| H A D | SIMachineFunctionInfo.cpp | 278 void SIMachineFunctionInfo::allocateWWMSpill(MachineFunction &MF, Register VGPR, in allocateWWMSpill() argument 281 if (isEntryFunction() || WWMSpills.count(VGPR)) in allocateWWMSpill() 285 VGPR, MF.getFrameInfo().CreateSpillStackObject(Size, Alignment))); in allocateWWMSpill()
|
| H A D | SIInstrInfo.td | 83 SDTCisVT<2, i32>, // vindex(VGPR) 84 SDTCisVT<3, i32>, // voffset(VGPR) 101 SDTCisVT<2, i32>, // vindex(VGPR) 102 SDTCisVT<3, i32>, // voffset(VGPR) 119 SDTCisVT<2, i32>, // vindex(VGPR) 120 SDTCisVT<3, i32>, // voffset(VGPR) 147 SDTCisVT<2, i32>, // vindex(VGPR) 148 SDTCisVT<3, i32>, // voffset(VGPR) 172 SDTCisVT<3, i32>, // vindex(VGPR) 173 SDTCisVT<4, i32>, // voffset(VGPR) [all …]
|
| H A D | AMDGPUGenRegisterBankInfo.def | 70 {0, 1, VGPRRegBank}, // VGPR begin
|
| H A D | SIPeepholeSDWA.cpp | 1172 Register VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); in legalizeScalarOperands() local 1174 TII->get(AMDGPU::V_MOV_B32_e32), VGPR); in legalizeScalarOperands() 1180 Op.ChangeToRegister(VGPR, false); in legalizeScalarOperands()
|
| /openbsd-src/gnu/llvm/llvm/include/llvm/IR/ |
| H A D | IntrinsicsAMDGPU.td | 506 // The pointer argument is assumed to be dynamically uniform if a VGPR. 799 P_.RetTypes, // vdata(VGPR) -- for load/atomic-with-return 801 !foreach(arg, P_.DataArgs, arg.Type), // vdata(VGPR) -- for store/atomic 803 P_.AddrTypes, // vaddr(VGPR) 998 llvm_i32_ty, // vindex(VGPR) 999 llvm_i32_ty, // offset(SGPR/VGPR/imm) 1019 [data_ty, // vdata(VGPR) 1021 llvm_i32_ty, // vindex(VGPR) 1022 llvm_i32_ty, // offset(SGPR/VGPR/imm) 1040 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) [all …]
|
| /openbsd-src/gnu/llvm/llvm/docs/AMDGPU/ |
| H A D | gfx1030_vaddr_a5639c.rst | 15 …and may be specified using either :ref:`standard VGPR syntax<amdgpu_synid_v>` or special :ref:`NSA…
|
| H A D | gfx1013_vaddr_a5639c.rst | 15 …and may be specified using either :ref:`standard VGPR syntax<amdgpu_synid_v>` or special :ref:`NSA…
|
| H A D | gfx10_vaddr_a5639c.rst | 15 …and may be specified using either :ref:`standard VGPR syntax<amdgpu_synid_v>` or special :ref:`NSA…
|
| H A D | gfx11_vaddr_a5639c.rst | 15 …and may be specified using either :ref:`standard VGPR syntax<amdgpu_synid_v>` or special :ref:`NSA…
|
| H A D | gfx1030_vaddr_c5ab43.rst | 15 …and may be specified using either :ref:`standard VGPR syntax<amdgpu_synid_v>` or special :ref:`NSA…
|
| H A D | gfx1013_vaddr_c5ab43.rst | 15 …and may be specified using either :ref:`standard VGPR syntax<amdgpu_synid_v>` or special :ref:`NSA…
|
| H A D | gfx11_vaddr_0bfea4.rst | 15 …and may be specified using either :ref:`standard VGPR syntax<amdgpu_synid_v>` or special :ref:`NSA…
|
| H A D | gfx8_hwreg.rst | 51 HW_REG_GPR_ALLOC Per-wave SGPR and VGPR allocation.
|
| H A D | gfx7_hwreg.rst | 51 HW_REG_GPR_ALLOC Per-wave SGPR and VGPR allocation.
|
| H A D | gfx90a_hwreg.rst | 51 HW_REG_GPR_ALLOC Per-wave SGPR and VGPR allocation.
|
| H A D | gfx9_hwreg.rst | 51 HW_REG_GPR_ALLOC Per-wave SGPR and VGPR allocation.
|
| H A D | gfx11_hwreg.rst | 52 HW_REG_GPR_ALLOC Per-wave SGPR and VGPR allocation.
|
| H A D | gfx1030_hwreg.rst | 52 HW_REG_GPR_ALLOC Per-wave SGPR and VGPR allocation.
|
| H A D | gfx10_hwreg.rst | 52 HW_REG_GPR_ALLOC Per-wave SGPR and VGPR allocation.
|
| H A D | gfx940_hwreg.rst | 51 HW_REG_GPR_ALLOC Per-wave SGPR and VGPR allocation.
|