1 //===----------------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AMDGPUArgumentUsageInfo.h" 10 #include "AMDGPU.h" 11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 12 #include "SIRegisterInfo.h" 13 #include "llvm/CodeGen/TargetRegisterInfo.h" 14 #include "llvm/IR/Function.h" 15 #include "llvm/Support/NativeFormatting.h" 16 #include "llvm/Support/raw_ostream.h" 17 18 using namespace llvm; 19 20 #define DEBUG_TYPE "amdgpu-argument-reg-usage-info" 21 22 INITIALIZE_PASS(AMDGPUArgumentUsageInfo, DEBUG_TYPE, 23 "Argument Register Usage Information Storage", false, true) 24 25 void ArgDescriptor::print(raw_ostream &OS, 26 const TargetRegisterInfo *TRI) const { 27 if (!isSet()) { 28 OS << "<not set>\n"; 29 return; 30 } 31 32 if (isRegister()) 33 OS << "Reg " << printReg(getRegister(), TRI); 34 else 35 OS << "Stack offset " << getStackOffset(); 36 37 if (isMasked()) { 38 OS << " & "; 39 llvm::write_hex(OS, Mask, llvm::HexPrintStyle::PrefixLower); 40 } 41 42 OS << '\n'; 43 } 44 45 char AMDGPUArgumentUsageInfo::ID = 0; 46 47 const AMDGPUFunctionArgInfo AMDGPUArgumentUsageInfo::ExternFunctionInfo{}; 48 49 // Hardcoded registers from fixed function ABI 50 const AMDGPUFunctionArgInfo AMDGPUArgumentUsageInfo::FixedABIFunctionInfo 51 = AMDGPUFunctionArgInfo::fixedABILayout(); 52 53 bool AMDGPUArgumentUsageInfo::doInitialization(Module &M) { 54 return false; 55 } 56 57 bool AMDGPUArgumentUsageInfo::doFinalization(Module &M) { 58 ArgInfoMap.clear(); 59 return false; 60 } 61 62 // TODO: Print preload kernargs? 63 void AMDGPUArgumentUsageInfo::print(raw_ostream &OS, const Module *M) const { 64 for (const auto &FI : ArgInfoMap) { 65 OS << "Arguments for " << FI.first->getName() << '\n' 66 << " PrivateSegmentBuffer: " << FI.second.PrivateSegmentBuffer 67 << " DispatchPtr: " << FI.second.DispatchPtr 68 << " QueuePtr: " << FI.second.QueuePtr 69 << " KernargSegmentPtr: " << FI.second.KernargSegmentPtr 70 << " DispatchID: " << FI.second.DispatchID 71 << " FlatScratchInit: " << FI.second.FlatScratchInit 72 << " PrivateSegmentSize: " << FI.second.PrivateSegmentSize 73 << " WorkGroupIDX: " << FI.second.WorkGroupIDX 74 << " WorkGroupIDY: " << FI.second.WorkGroupIDY 75 << " WorkGroupIDZ: " << FI.second.WorkGroupIDZ 76 << " WorkGroupInfo: " << FI.second.WorkGroupInfo 77 << " LDSKernelId: " << FI.second.LDSKernelId 78 << " PrivateSegmentWaveByteOffset: " 79 << FI.second.PrivateSegmentWaveByteOffset 80 << " ImplicitBufferPtr: " << FI.second.ImplicitBufferPtr 81 << " ImplicitArgPtr: " << FI.second.ImplicitArgPtr 82 << " WorkItemIDX " << FI.second.WorkItemIDX 83 << " WorkItemIDY " << FI.second.WorkItemIDY 84 << " WorkItemIDZ " << FI.second.WorkItemIDZ 85 << '\n'; 86 } 87 } 88 89 std::tuple<const ArgDescriptor *, const TargetRegisterClass *, LLT> 90 AMDGPUFunctionArgInfo::getPreloadedValue( 91 AMDGPUFunctionArgInfo::PreloadedValue Value) const { 92 switch (Value) { 93 case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER: { 94 return std::tuple(PrivateSegmentBuffer ? &PrivateSegmentBuffer : nullptr, 95 &AMDGPU::SGPR_128RegClass, LLT::fixed_vector(4, 32)); 96 } 97 case AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR: 98 return std::tuple(ImplicitBufferPtr ? &ImplicitBufferPtr : nullptr, 99 &AMDGPU::SGPR_64RegClass, 100 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); 101 case AMDGPUFunctionArgInfo::WORKGROUP_ID_X: 102 return std::tuple(WorkGroupIDX ? &WorkGroupIDX : nullptr, 103 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); 104 case AMDGPUFunctionArgInfo::WORKGROUP_ID_Y: 105 return std::tuple(WorkGroupIDY ? &WorkGroupIDY : nullptr, 106 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); 107 case AMDGPUFunctionArgInfo::WORKGROUP_ID_Z: 108 return std::tuple(WorkGroupIDZ ? &WorkGroupIDZ : nullptr, 109 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); 110 case AMDGPUFunctionArgInfo::LDS_KERNEL_ID: 111 return std::tuple(LDSKernelId ? &LDSKernelId : nullptr, 112 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); 113 case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET: 114 return std::tuple( 115 PrivateSegmentWaveByteOffset ? &PrivateSegmentWaveByteOffset : nullptr, 116 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); 117 case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_SIZE: 118 return {PrivateSegmentSize ? &PrivateSegmentSize : nullptr, 119 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)}; 120 case AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR: 121 return std::tuple(KernargSegmentPtr ? &KernargSegmentPtr : nullptr, 122 &AMDGPU::SGPR_64RegClass, 123 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); 124 case AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR: 125 return std::tuple(ImplicitArgPtr ? &ImplicitArgPtr : nullptr, 126 &AMDGPU::SGPR_64RegClass, 127 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); 128 case AMDGPUFunctionArgInfo::DISPATCH_ID: 129 return std::tuple(DispatchID ? &DispatchID : nullptr, 130 &AMDGPU::SGPR_64RegClass, LLT::scalar(64)); 131 case AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT: 132 return std::tuple(FlatScratchInit ? &FlatScratchInit : nullptr, 133 &AMDGPU::SGPR_64RegClass, LLT::scalar(64)); 134 case AMDGPUFunctionArgInfo::DISPATCH_PTR: 135 return std::tuple(DispatchPtr ? &DispatchPtr : nullptr, 136 &AMDGPU::SGPR_64RegClass, 137 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); 138 case AMDGPUFunctionArgInfo::QUEUE_PTR: 139 return std::tuple(QueuePtr ? &QueuePtr : nullptr, &AMDGPU::SGPR_64RegClass, 140 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); 141 case AMDGPUFunctionArgInfo::WORKITEM_ID_X: 142 return std::tuple(WorkItemIDX ? &WorkItemIDX : nullptr, 143 &AMDGPU::VGPR_32RegClass, LLT::scalar(32)); 144 case AMDGPUFunctionArgInfo::WORKITEM_ID_Y: 145 return std::tuple(WorkItemIDY ? &WorkItemIDY : nullptr, 146 &AMDGPU::VGPR_32RegClass, LLT::scalar(32)); 147 case AMDGPUFunctionArgInfo::WORKITEM_ID_Z: 148 return std::tuple(WorkItemIDZ ? &WorkItemIDZ : nullptr, 149 &AMDGPU::VGPR_32RegClass, LLT::scalar(32)); 150 } 151 llvm_unreachable("unexpected preloaded value type"); 152 } 153 154 AMDGPUFunctionArgInfo AMDGPUFunctionArgInfo::fixedABILayout() { 155 AMDGPUFunctionArgInfo AI; 156 AI.PrivateSegmentBuffer 157 = ArgDescriptor::createRegister(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3); 158 AI.DispatchPtr = ArgDescriptor::createRegister(AMDGPU::SGPR4_SGPR5); 159 AI.QueuePtr = ArgDescriptor::createRegister(AMDGPU::SGPR6_SGPR7); 160 161 // Do not pass kernarg segment pointer, only pass increment version in its 162 // place. 163 AI.ImplicitArgPtr = ArgDescriptor::createRegister(AMDGPU::SGPR8_SGPR9); 164 AI.DispatchID = ArgDescriptor::createRegister(AMDGPU::SGPR10_SGPR11); 165 166 // Skip FlatScratchInit/PrivateSegmentSize 167 AI.WorkGroupIDX = ArgDescriptor::createRegister(AMDGPU::SGPR12); 168 AI.WorkGroupIDY = ArgDescriptor::createRegister(AMDGPU::SGPR13); 169 AI.WorkGroupIDZ = ArgDescriptor::createRegister(AMDGPU::SGPR14); 170 AI.LDSKernelId = ArgDescriptor::createRegister(AMDGPU::SGPR15); 171 172 const unsigned Mask = 0x3ff; 173 AI.WorkItemIDX = ArgDescriptor::createRegister(AMDGPU::VGPR31, Mask); 174 AI.WorkItemIDY = ArgDescriptor::createRegister(AMDGPU::VGPR31, Mask << 10); 175 AI.WorkItemIDZ = ArgDescriptor::createRegister(AMDGPU::VGPR31, Mask << 20); 176 return AI; 177 } 178 179 const AMDGPUFunctionArgInfo & 180 AMDGPUArgumentUsageInfo::lookupFuncArgInfo(const Function &F) const { 181 auto I = ArgInfoMap.find(&F); 182 if (I == ArgInfoMap.end()) 183 return FixedABIFunctionInfo; 184 return I->second; 185 } 186