| /freebsd-src/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
| H A D | AMDGPUCallingConv.td | 137 (add (sequence "VGPR%u", 40, 47), 138 (sequence "VGPR%u", 56, 63), 139 (sequence "VGPR%u", 72, 79), 140 (sequence "VGPR%u", 88, 95), 141 (sequence "VGPR%u", 104, 111), 142 (sequence "VGPR%u", 120, 127), 143 (sequence "VGPR%u", 136, 143), 144 (sequence "VGPR%u", 152, 159), 145 (sequence "VGPR%u", 168, 175), 146 (sequence "VGPR [all...] |
| H A D | SIRegisterInfo.td | 348 // VGPR registers 350 defm VGPR#Index : 407 // Give all SGPR classes higher priority than VGPR classes, because 596 (add (interleave (sequence "VGPR%u_LO16", 0, 255), 597 (sequence "VGPR%u_HI16", 0, 255)))> { 602 // This is the base class for VGPR{128..255}_{LO16,HI16}. 610 (add (interleave (sequence "VGPR%u_LO16", 0, 127), 611 (sequence "VGPR%u_HI16", 0, 127)))> { 616 // This is the base class for VGPR{0..127}_{LO16,HI16}. 620 // VGPR 3 [all...] |
| H A D | SIRegisterInfo.h | 57 Register VGPR; member 61 SpilledReg(Register R, int L) : VGPR(R), Lane(L) {} in SpilledReg() 64 bool hasReg() { return VGPR != 0; } in hasReg() 98 static bool isChainScratchRegister(Register VGPR);
|
| H A D | AMDGPURegisterBanks.td | 13 def VGPRRegBank : RegisterBank<"VGPR",
|
| H A D | SIMachineFunctionInfo.cpp | 108 MayNeedAGPRs = false; // We will select all MAI with VGPR operands. in SIMachineFunctionInfo() 168 // VGPR available at all times. For now, reserve highest available VGPR. After in SIMachineFunctionInfo() 169 // RA, shift it to the lowest available unused VGPR if the one exist. in SIMachineFunctionInfo() 281 void SIMachineFunctionInfo::allocateWWMSpill(MachineFunction &MF, Register VGPR, in allocateWWMSpill() 284 if (isEntryFunction() || WWMSpills.count(VGPR)) in allocateWWMSpill() 292 if (isChainFunction() && SIRegisterInfo::isChainScratchRegister(VGPR)) in splitWWMSpillRegisters() 296 VGPR, MF.getFrameInfo().CreateSpillStackObject(Size, Alignment))); in splitWWMSpillRegisters() 335 // Update various tables with the new VGPR. in shiftSpillPhysVGPRsToLowestRange() 374 // case, will be shifted back to the lowest range after VGPR allocatio in allocatePhysicalVGPRForSGPRSpills() 273 allocateWWMSpill(MachineFunction & MF,Register VGPR,uint64_t Size,Align Alignment) allocateWWMSpill() argument [all...] |
| H A D | SIFrameLowering.cpp | 101 // SGPR, so we're forced to take another VGPR to use for the spill. in getVGPRSpillLaneOrTempRegister() 108 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane in getVGPRSpillLaneOrTempRegister() 279 Spill[I].VGPR) in saveToVGPRLane() 282 .addReg(Spill[I].VGPR, RegState::Undef); in saveToVGPRLane() 325 .addReg(Spill[I].VGPR) in restoreFromVGPRLane() 944 Register VGPR = Reg.first; in emitCSRSpillStores() 947 VGPR, FI, FrameReg); in emitCSRSpillStores() 1047 Register VGPR = Reg.first; in emitCSRSpillRestores() 1050 VGPR, FI, FrameReg); in emitCSRSpillRestores() 1135 // memory or to a VGPR lan in emitPrologue() 938 Register VGPR = Reg.first; emitCSRSpillStores() local 1041 Register VGPR = Reg.first; emitCSRSpillRestores() local [all...] |
| H A D | SIInstrInfo.td | 100 SDTCisVT<2, i32>, // vindex(VGPR) 101 SDTCisVT<3, i32>, // voffset(VGPR) 118 SDTCisVT<2, i32>, // vindex(VGPR) 119 SDTCisVT<3, i32>, // voffset(VGPR) 136 SDTCisVT<2, i32>, // vindex(VGPR) 137 SDTCisVT<3, i32>, // voffset(VGPR) 174 SDTCisVT<2, i32>, // vindex(VGPR) 175 SDTCisVT<3, i32>, // voffset(VGPR) 200 SDTCisVT<3, i32>, // vindex(VGPR) 201 SDTCisVT<4, i32>, // voffset(VGPR) [all...] |
| H A D | SISchedule.td | 305 // Add 1 stall cycle for VGPR read. 338 // Add 1 stall cycle for VGPR read.
|
| H A D | AMDGPUGenRegisterBankInfo.def | 70 {0, 1, VGPRRegBank}, // VGPR begin
|
| H A D | SIPeepholeSDWA.cpp | 1226 Register VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); in runOnMachineFunction() 1228 TII->get(AMDGPU::V_MOV_B32_e32), VGPR); in runOnMachineFunction() 1234 Op.ChangeToRegister(VGPR, false); in runOnMachineFunction() 1176 Register VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); legalizeScalarOperands() local
|
| H A D | SIRegisterInfo.cpp | 51 // This is mostly to spill SGPRs to memory. Spilling SGPRs into VGPR lanes emits 54 // When spilling to memory, the SGPRs are written into VGPR lanes and the VGPR 56 // For this, a VGPR is required where the needed lanes can be clobbered. The 57 // RegScavenger can provide a VGPR where currently active lanes can be 61 // - Try to scavenge VGPR 88 // The SGPRs are written into this VGPR, which is then written to scratch 152 // Tries to scavenge SGPRs to save EXEC and a VGPR. Uses v0 if no VGPR is 157 // buffer_store_dword v1 ; Write scavenged VGPR t 442 isChainScratchRegister(Register VGPR) isChainScratchRegister() argument [all...] |
| H A D | VOP1Instructions.td | 367 // Restrict src0 to be VGPR 733 // Restrict src0 to be VGPR 1343 // Copy of v_mov_b32 with $vdst as a use operand for use with VGPR 1351 // Copy of v_mov_b32 for use with VGPR indexing mode. An implicit use of the
|
| H A D | AMDGPU.td | 277 "Scratch instructions with a VGPR offset and a negative immediate offset that is not a multiple of 4 read wrong memory on GFX10" 438 "Has VGPR mode register indexing" 935 "Has single-use VGPR hint instructions" 959 "VMEM instructions of the same type write VGPR results in order"
|
| H A D | SIMachineFunctionInfo.h | 347 // 2. Spill to a VGPR lane. 348 // 3. Spill to memory via. a scratch VGPR. 497 // To track virtual VGPR + lane index for each subregister of the SGPR spilled 501 // To track physical VGPR + lane index for CSR SGPR spills and special SGPRs 515 // the VGPR and its stack slot index. 538 // AGPRs used for VGPR spills. 648 // Check if \p FI is allocated for any SGPR spill to a VGPR lane during PEI. 691 void allocateWWMSpill(MachineFunction &MF, Register VGPR, uint64_t Size = 4, 797 // Add special VGPR inputs
|
| H A D | SIInstructions.td | 854 // These variants of V_INDIRECT_REG_READ/WRITE use VGPR indexing. By using these 856 // that switch the VGPR indexing mode. Spills to accvgprs could be effected by 959 // VGPR or AGPR spill instructions. In case of AGPR spilling a temp register 960 // needs to be used and an extra instruction to move between VGPR and AGPR. 3396 // Avoid pointlessly materializing a constant in VGPR.
|
| H A D | VOPInstructions.td | 443 bits<10> vdst; // VGPR or AGPR, but not SGPR. vdst{8} is not encoded in the instruction.
|
| H A D | VOP3Instructions.td | 490 // blocking folding SGPR->VGPR copies later. 780 // GISel-specific pattern that avoids creating a SGPR->VGPR copy if 781 // $src2 is a VGPR.
|
| H A D | FLATInstructions.td | 60 bits<1> has_sve = 0; // Scratch VGPR Enable
|
| /freebsd-src/contrib/llvm-project/llvm/include/llvm/IR/ |
| H A D | IntrinsicsAMDGPU.td | 559 // The pointer argument is assumed to be dynamically uniform if a VGPR. 855 P_.RetTypes, // vdata(VGPR) -- for load/atomic-with-return 857 !foreach(arg, P_.DataArgs, arg.Type), // vdata(VGPR) -- for store/atomic 859 P_.AddrTypes, // vaddr(VGPR) 1127 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) 1144 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) 1157 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) 1175 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) 1188 llvm_i32_ty, // vindex(VGPR) 1189 llvm_i32_ty, // offset(VGPR/im [all...] |