1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass eliminates LDS uses from non-kernel functions. 10 // 11 // The strategy is to create a new struct with a field for each LDS variable 12 // and allocate that struct at the same address for every kernel. Uses of the 13 // original LDS variables are then replaced with compile time offsets from that 14 // known address. AMDGPUMachineFunction allocates the LDS global. 15 // 16 // Local variables with constant annotation or non-undef initializer are passed 17 // through unchanged for simplification or error diagnostics in later passes. 18 // 19 // To reduce the memory overhead variables that are only used by kernels are 20 // excluded from this transform. The analysis to determine whether a variable 21 // is only used by a kernel is cheap and conservative so this may allocate 22 // a variable in every kernel when it was not strictly necessary to do so. 23 // 24 // A possible future refinement is to specialise the structure per-kernel, so 25 // that fields can be elided based on more expensive analysis. 26 // 27 //===----------------------------------------------------------------------===// 28 29 #include "AMDGPU.h" 30 #include "Utils/AMDGPUBaseInfo.h" 31 #include "Utils/AMDGPUMemoryUtils.h" 32 #include "llvm/ADT/STLExtras.h" 33 #include "llvm/Analysis/CallGraph.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/IRBuilder.h" 37 #include "llvm/IR/InlineAsm.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/MDBuilder.h" 40 #include "llvm/InitializePasses.h" 41 #include "llvm/Pass.h" 42 #include "llvm/Support/CommandLine.h" 43 #include "llvm/Support/Debug.h" 44 #include "llvm/Support/OptimizedStructLayout.h" 45 #include "llvm/Transforms/Utils/ModuleUtils.h" 46 #include <vector> 47 48 #define DEBUG_TYPE "amdgpu-lower-module-lds" 49 50 using namespace llvm; 51 52 static cl::opt<bool> SuperAlignLDSGlobals( 53 "amdgpu-super-align-lds-globals", 54 cl::desc("Increase alignment of LDS if it is not on align boundary"), 55 cl::init(true), cl::Hidden); 56 57 namespace { 58 class AMDGPULowerModuleLDS : public ModulePass { 59 60 static void removeFromUsedList(Module &M, StringRef Name, 61 SmallPtrSetImpl<Constant *> &ToRemove) { 62 GlobalVariable *GV = M.getNamedGlobal(Name); 63 if (!GV || ToRemove.empty()) { 64 return; 65 } 66 67 SmallVector<Constant *, 16> Init; 68 auto *CA = cast<ConstantArray>(GV->getInitializer()); 69 for (auto &Op : CA->operands()) { 70 // ModuleUtils::appendToUsed only inserts Constants 71 Constant *C = cast<Constant>(Op); 72 if (!ToRemove.contains(C->stripPointerCasts())) { 73 Init.push_back(C); 74 } 75 } 76 77 if (Init.size() == CA->getNumOperands()) { 78 return; // none to remove 79 } 80 81 GV->eraseFromParent(); 82 83 for (Constant *C : ToRemove) { 84 C->removeDeadConstantUsers(); 85 } 86 87 if (!Init.empty()) { 88 ArrayType *ATy = 89 ArrayType::get(Type::getInt8PtrTy(M.getContext()), Init.size()); 90 GV = 91 new llvm::GlobalVariable(M, ATy, false, GlobalValue::AppendingLinkage, 92 ConstantArray::get(ATy, Init), Name); 93 GV->setSection("llvm.metadata"); 94 } 95 } 96 97 static void 98 removeFromUsedLists(Module &M, 99 const std::vector<GlobalVariable *> &LocalVars) { 100 SmallPtrSet<Constant *, 32> LocalVarsSet; 101 for (GlobalVariable *LocalVar : LocalVars) 102 if (Constant *C = dyn_cast<Constant>(LocalVar->stripPointerCasts())) 103 LocalVarsSet.insert(C); 104 removeFromUsedList(M, "llvm.used", LocalVarsSet); 105 removeFromUsedList(M, "llvm.compiler.used", LocalVarsSet); 106 } 107 108 static void markUsedByKernel(IRBuilder<> &Builder, Function *Func, 109 GlobalVariable *SGV) { 110 // The llvm.amdgcn.module.lds instance is implicitly used by all kernels 111 // that might call a function which accesses a field within it. This is 112 // presently approximated to 'all kernels' if there are any such functions 113 // in the module. This implicit use is redefined as an explicit use here so 114 // that later passes, specifically PromoteAlloca, account for the required 115 // memory without any knowledge of this transform. 116 117 // An operand bundle on llvm.donothing works because the call instruction 118 // survives until after the last pass that needs to account for LDS. It is 119 // better than inline asm as the latter survives until the end of codegen. A 120 // totally robust solution would be a function with the same semantics as 121 // llvm.donothing that takes a pointer to the instance and is lowered to a 122 // no-op after LDS is allocated, but that is not presently necessary. 123 124 LLVMContext &Ctx = Func->getContext(); 125 126 Builder.SetInsertPoint(Func->getEntryBlock().getFirstNonPHI()); 127 128 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx), {}); 129 130 Function *Decl = 131 Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {}); 132 133 Value *UseInstance[1] = {Builder.CreateInBoundsGEP( 134 SGV->getValueType(), SGV, ConstantInt::get(Type::getInt32Ty(Ctx), 0))}; 135 136 Builder.CreateCall(FTy, Decl, {}, 137 {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)}, 138 ""); 139 } 140 141 public: 142 static char ID; 143 144 AMDGPULowerModuleLDS() : ModulePass(ID) { 145 initializeAMDGPULowerModuleLDSPass(*PassRegistry::getPassRegistry()); 146 } 147 148 bool runOnModule(Module &M) override { 149 CallGraph CG = CallGraph(M); 150 bool Changed = superAlignLDSGlobals(M); 151 Changed |= processUsedLDS(CG, M); 152 153 for (Function &F : M.functions()) { 154 if (F.isDeclaration()) 155 continue; 156 157 // Only lower compute kernels' LDS. 158 if (!AMDGPU::isKernel(F.getCallingConv())) 159 continue; 160 Changed |= processUsedLDS(CG, M, &F); 161 } 162 163 return Changed; 164 } 165 166 private: 167 // Increase the alignment of LDS globals if necessary to maximise the chance 168 // that we can use aligned LDS instructions to access them. 169 static bool superAlignLDSGlobals(Module &M) { 170 const DataLayout &DL = M.getDataLayout(); 171 bool Changed = false; 172 if (!SuperAlignLDSGlobals) { 173 return Changed; 174 } 175 176 for (auto &GV : M.globals()) { 177 if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) { 178 // Only changing alignment of LDS variables 179 continue; 180 } 181 if (!GV.hasInitializer()) { 182 // cuda/hip extern __shared__ variable, leave alignment alone 183 continue; 184 } 185 186 Align Alignment = AMDGPU::getAlign(DL, &GV); 187 TypeSize GVSize = DL.getTypeAllocSize(GV.getValueType()); 188 189 if (GVSize > 8) { 190 // We might want to use a b96 or b128 load/store 191 Alignment = std::max(Alignment, Align(16)); 192 } else if (GVSize > 4) { 193 // We might want to use a b64 load/store 194 Alignment = std::max(Alignment, Align(8)); 195 } else if (GVSize > 2) { 196 // We might want to use a b32 load/store 197 Alignment = std::max(Alignment, Align(4)); 198 } else if (GVSize > 1) { 199 // We might want to use a b16 load/store 200 Alignment = std::max(Alignment, Align(2)); 201 } 202 203 if (Alignment != AMDGPU::getAlign(DL, &GV)) { 204 Changed = true; 205 GV.setAlignment(Alignment); 206 } 207 } 208 return Changed; 209 } 210 211 bool processUsedLDS(CallGraph const &CG, Module &M, Function *F = nullptr) { 212 LLVMContext &Ctx = M.getContext(); 213 const DataLayout &DL = M.getDataLayout(); 214 215 // Find variables to move into new struct instance 216 std::vector<GlobalVariable *> FoundLocalVars = 217 AMDGPU::findVariablesToLower(M, F); 218 219 if (FoundLocalVars.empty()) { 220 // No variables to rewrite, no changes made. 221 return false; 222 } 223 224 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 225 LayoutFields.reserve(FoundLocalVars.size()); 226 for (GlobalVariable *GV : FoundLocalVars) { 227 OptimizedStructLayoutField F(GV, DL.getTypeAllocSize(GV->getValueType()), 228 AMDGPU::getAlign(DL, GV)); 229 LayoutFields.emplace_back(F); 230 } 231 232 performOptimizedStructLayout(LayoutFields); 233 234 std::vector<GlobalVariable *> LocalVars; 235 LocalVars.reserve(FoundLocalVars.size()); // will be at least this large 236 { 237 // This usually won't need to insert any padding, perhaps avoid the alloc 238 uint64_t CurrentOffset = 0; 239 for (size_t I = 0; I < LayoutFields.size(); I++) { 240 GlobalVariable *FGV = static_cast<GlobalVariable *>( 241 const_cast<void *>(LayoutFields[I].Id)); 242 Align DataAlign = LayoutFields[I].Alignment; 243 244 uint64_t DataAlignV = DataAlign.value(); 245 if (uint64_t Rem = CurrentOffset % DataAlignV) { 246 uint64_t Padding = DataAlignV - Rem; 247 248 // Append an array of padding bytes to meet alignment requested 249 // Note (o + (a - (o % a)) ) % a == 0 250 // (offset + Padding ) % align == 0 251 252 Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding); 253 LocalVars.push_back(new GlobalVariable( 254 M, ATy, false, GlobalValue::InternalLinkage, UndefValue::get(ATy), 255 "", nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, 256 false)); 257 CurrentOffset += Padding; 258 } 259 260 LocalVars.push_back(FGV); 261 CurrentOffset += LayoutFields[I].Size; 262 } 263 } 264 265 std::vector<Type *> LocalVarTypes; 266 LocalVarTypes.reserve(LocalVars.size()); 267 std::transform( 268 LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes), 269 [](const GlobalVariable *V) -> Type * { return V->getValueType(); }); 270 271 std::string VarName( 272 F ? (Twine("llvm.amdgcn.kernel.") + F->getName() + ".lds").str() 273 : "llvm.amdgcn.module.lds"); 274 StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t"); 275 276 Align StructAlign = 277 AMDGPU::getAlign(DL, LocalVars[0]); 278 279 GlobalVariable *SGV = new GlobalVariable( 280 M, LDSTy, false, GlobalValue::InternalLinkage, UndefValue::get(LDSTy), 281 VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, 282 false); 283 SGV->setAlignment(StructAlign); 284 if (!F) { 285 appendToCompilerUsed( 286 M, {static_cast<GlobalValue *>( 287 ConstantExpr::getPointerBitCastOrAddrSpaceCast( 288 cast<Constant>(SGV), Type::getInt8PtrTy(Ctx)))}); 289 } 290 291 // The verifier rejects used lists containing an inttoptr of a constant 292 // so remove the variables from these lists before replaceAllUsesWith 293 removeFromUsedLists(M, LocalVars); 294 295 // Create alias.scope and their lists. Each field in the new structure 296 // does not alias with all other fields. 297 SmallVector<MDNode *> AliasScopes; 298 SmallVector<Metadata *> NoAliasList; 299 if (LocalVars.size() > 1) { 300 MDBuilder MDB(Ctx); 301 AliasScopes.reserve(LocalVars.size()); 302 MDNode *Domain = MDB.createAnonymousAliasScopeDomain(); 303 for (size_t I = 0; I < LocalVars.size(); I++) { 304 MDNode *Scope = MDB.createAnonymousAliasScope(Domain); 305 AliasScopes.push_back(Scope); 306 } 307 NoAliasList.append(&AliasScopes[1], AliasScopes.end()); 308 } 309 310 // Replace uses of ith variable with a constantexpr to the ith field of the 311 // instance that will be allocated by AMDGPUMachineFunction 312 Type *I32 = Type::getInt32Ty(Ctx); 313 for (size_t I = 0; I < LocalVars.size(); I++) { 314 GlobalVariable *GV = LocalVars[I]; 315 Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)}; 316 Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx); 317 if (F) { 318 // Replace all constant uses with instructions if they belong to the 319 // current kernel. 320 for (User *U : make_early_inc_range(GV->users())) { 321 if (ConstantExpr *C = dyn_cast<ConstantExpr>(U)) 322 AMDGPU::replaceConstantUsesInFunction(C, F); 323 } 324 325 GV->removeDeadConstantUsers(); 326 327 GV->replaceUsesWithIf(GEP, [F](Use &U) { 328 Instruction *I = dyn_cast<Instruction>(U.getUser()); 329 return I && I->getFunction() == F; 330 }); 331 } else { 332 GV->replaceAllUsesWith(GEP); 333 } 334 if (GV->use_empty()) { 335 GV->eraseFromParent(); 336 } 337 338 uint64_t Off = DL.getStructLayout(LDSTy)->getElementOffset(I); 339 Align A = commonAlignment(StructAlign, Off); 340 341 if (I) 342 NoAliasList[I - 1] = AliasScopes[I - 1]; 343 MDNode *NoAlias = 344 NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList); 345 MDNode *AliasScope = 346 AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]}); 347 348 refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias); 349 } 350 351 // This ensures the variable is allocated when called functions access it. 352 // It also lets other passes, specifically PromoteAlloca, accurately 353 // calculate how much LDS will be used by the kernel after lowering. 354 if (!F) { 355 IRBuilder<> Builder(Ctx); 356 for (Function &Func : M.functions()) { 357 if (!Func.isDeclaration() && AMDGPU::isKernelCC(&Func)) { 358 const CallGraphNode *N = CG[&Func]; 359 const bool CalleesRequireModuleLDS = N->size() > 0; 360 361 if (CalleesRequireModuleLDS) { 362 // If a function this kernel might call requires module LDS, 363 // annotate the kernel to let later passes know it will allocate 364 // this structure, even if not apparent from the IR. 365 markUsedByKernel(Builder, &Func, SGV); 366 } else { 367 // However if we are certain this kernel cannot call a function that 368 // requires module LDS, annotate the kernel so the backend can elide 369 // the allocation without repeating callgraph walks. 370 Func.addFnAttr("amdgpu-elide-module-lds"); 371 } 372 } 373 } 374 } 375 return true; 376 } 377 378 void refineUsesAlignmentAndAA(Value *Ptr, Align A, const DataLayout &DL, 379 MDNode *AliasScope, MDNode *NoAlias, 380 unsigned MaxDepth = 5) { 381 if (!MaxDepth || (A == 1 && !AliasScope)) 382 return; 383 384 for (User *U : Ptr->users()) { 385 if (auto *I = dyn_cast<Instruction>(U)) { 386 if (AliasScope && I->mayReadOrWriteMemory()) { 387 MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope); 388 AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope) 389 : AliasScope); 390 I->setMetadata(LLVMContext::MD_alias_scope, AS); 391 392 MDNode *NA = I->getMetadata(LLVMContext::MD_noalias); 393 NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias); 394 I->setMetadata(LLVMContext::MD_noalias, NA); 395 } 396 } 397 398 if (auto *LI = dyn_cast<LoadInst>(U)) { 399 LI->setAlignment(std::max(A, LI->getAlign())); 400 continue; 401 } 402 if (auto *SI = dyn_cast<StoreInst>(U)) { 403 if (SI->getPointerOperand() == Ptr) 404 SI->setAlignment(std::max(A, SI->getAlign())); 405 continue; 406 } 407 if (auto *AI = dyn_cast<AtomicRMWInst>(U)) { 408 // None of atomicrmw operations can work on pointers, but let's 409 // check it anyway in case it will or we will process ConstantExpr. 410 if (AI->getPointerOperand() == Ptr) 411 AI->setAlignment(std::max(A, AI->getAlign())); 412 continue; 413 } 414 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) { 415 if (AI->getPointerOperand() == Ptr) 416 AI->setAlignment(std::max(A, AI->getAlign())); 417 continue; 418 } 419 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) { 420 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType()); 421 APInt Off(BitWidth, 0); 422 if (GEP->getPointerOperand() == Ptr) { 423 Align GA; 424 if (GEP->accumulateConstantOffset(DL, Off)) 425 GA = commonAlignment(A, Off.getLimitedValue()); 426 refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias, 427 MaxDepth - 1); 428 } 429 continue; 430 } 431 if (auto *I = dyn_cast<Instruction>(U)) { 432 if (I->getOpcode() == Instruction::BitCast || 433 I->getOpcode() == Instruction::AddrSpaceCast) 434 refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1); 435 } 436 } 437 } 438 }; 439 440 } // namespace 441 char AMDGPULowerModuleLDS::ID = 0; 442 443 char &llvm::AMDGPULowerModuleLDSID = AMDGPULowerModuleLDS::ID; 444 445 INITIALIZE_PASS(AMDGPULowerModuleLDS, DEBUG_TYPE, 446 "Lower uses of LDS variables from non-kernel functions", false, 447 false) 448 449 ModulePass *llvm::createAMDGPULowerModuleLDSPass() { 450 return new AMDGPULowerModuleLDS(); 451 } 452 453 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M, 454 ModuleAnalysisManager &) { 455 return AMDGPULowerModuleLDS().runOnModule(M) ? PreservedAnalyses::none() 456 : PreservedAnalyses::all(); 457 } 458