1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass eliminates LDS uses from non-kernel functions. 10 // 11 // The strategy is to create a new struct with a field for each LDS variable 12 // and allocate that struct at the same address for every kernel. Uses of the 13 // original LDS variables are then replaced with compile time offsets from that 14 // known address. AMDGPUMachineFunction allocates the LDS global. 15 // 16 // Local variables with constant annotation or non-undef initializer are passed 17 // through unchanged for simplification or error diagnostics in later passes. 18 // 19 // To reduce the memory overhead variables that are only used by kernels are 20 // excluded from this transform. The analysis to determine whether a variable 21 // is only used by a kernel is cheap and conservative so this may allocate 22 // a variable in every kernel when it was not strictly necessary to do so. 23 // 24 // A possible future refinement is to specialise the structure per-kernel, so 25 // that fields can be elided based on more expensive analysis. 26 // 27 //===----------------------------------------------------------------------===// 28 29 #include "AMDGPU.h" 30 #include "Utils/AMDGPUBaseInfo.h" 31 #include "Utils/AMDGPUMemoryUtils.h" 32 #include "llvm/ADT/BitVector.h" 33 #include "llvm/ADT/DenseMap.h" 34 #include "llvm/ADT/STLExtras.h" 35 #include "llvm/Analysis/CallGraph.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/IRBuilder.h" 39 #include "llvm/IR/InlineAsm.h" 40 #include "llvm/IR/Instructions.h" 41 #include "llvm/IR/MDBuilder.h" 42 #include "llvm/InitializePasses.h" 43 #include "llvm/Pass.h" 44 #include "llvm/Support/CommandLine.h" 45 #include "llvm/Support/Debug.h" 46 #include "llvm/Support/OptimizedStructLayout.h" 47 #include "llvm/Transforms/Utils/ModuleUtils.h" 48 #include <tuple> 49 #include <vector> 50 51 #define DEBUG_TYPE "amdgpu-lower-module-lds" 52 53 using namespace llvm; 54 55 static cl::opt<bool> SuperAlignLDSGlobals( 56 "amdgpu-super-align-lds-globals", 57 cl::desc("Increase alignment of LDS if it is not on align boundary"), 58 cl::init(true), cl::Hidden); 59 60 namespace { 61 class AMDGPULowerModuleLDS : public ModulePass { 62 63 static void removeFromUsedList(Module &M, StringRef Name, 64 SmallPtrSetImpl<Constant *> &ToRemove) { 65 GlobalVariable *GV = M.getNamedGlobal(Name); 66 if (!GV || ToRemove.empty()) { 67 return; 68 } 69 70 SmallVector<Constant *, 16> Init; 71 auto *CA = cast<ConstantArray>(GV->getInitializer()); 72 for (auto &Op : CA->operands()) { 73 // ModuleUtils::appendToUsed only inserts Constants 74 Constant *C = cast<Constant>(Op); 75 if (!ToRemove.contains(C->stripPointerCasts())) { 76 Init.push_back(C); 77 } 78 } 79 80 if (Init.size() == CA->getNumOperands()) { 81 return; // none to remove 82 } 83 84 GV->eraseFromParent(); 85 86 for (Constant *C : ToRemove) { 87 C->removeDeadConstantUsers(); 88 } 89 90 if (!Init.empty()) { 91 ArrayType *ATy = 92 ArrayType::get(Type::getInt8PtrTy(M.getContext()), Init.size()); 93 GV = 94 new llvm::GlobalVariable(M, ATy, false, GlobalValue::AppendingLinkage, 95 ConstantArray::get(ATy, Init), Name); 96 GV->setSection("llvm.metadata"); 97 } 98 } 99 100 static void 101 removeFromUsedLists(Module &M, 102 const std::vector<GlobalVariable *> &LocalVars) { 103 SmallPtrSet<Constant *, 32> LocalVarsSet; 104 for (GlobalVariable *LocalVar : LocalVars) 105 if (Constant *C = dyn_cast<Constant>(LocalVar->stripPointerCasts())) 106 LocalVarsSet.insert(C); 107 removeFromUsedList(M, "llvm.used", LocalVarsSet); 108 removeFromUsedList(M, "llvm.compiler.used", LocalVarsSet); 109 } 110 111 static void markUsedByKernel(IRBuilder<> &Builder, Function *Func, 112 GlobalVariable *SGV) { 113 // The llvm.amdgcn.module.lds instance is implicitly used by all kernels 114 // that might call a function which accesses a field within it. This is 115 // presently approximated to 'all kernels' if there are any such functions 116 // in the module. This implicit use is redefined as an explicit use here so 117 // that later passes, specifically PromoteAlloca, account for the required 118 // memory without any knowledge of this transform. 119 120 // An operand bundle on llvm.donothing works because the call instruction 121 // survives until after the last pass that needs to account for LDS. It is 122 // better than inline asm as the latter survives until the end of codegen. A 123 // totally robust solution would be a function with the same semantics as 124 // llvm.donothing that takes a pointer to the instance and is lowered to a 125 // no-op after LDS is allocated, but that is not presently necessary. 126 127 LLVMContext &Ctx = Func->getContext(); 128 129 Builder.SetInsertPoint(Func->getEntryBlock().getFirstNonPHI()); 130 131 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx), {}); 132 133 Function *Decl = 134 Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {}); 135 136 Value *UseInstance[1] = {Builder.CreateInBoundsGEP( 137 SGV->getValueType(), SGV, ConstantInt::get(Type::getInt32Ty(Ctx), 0))}; 138 139 Builder.CreateCall(FTy, Decl, {}, 140 {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)}, 141 ""); 142 } 143 144 public: 145 static char ID; 146 147 AMDGPULowerModuleLDS() : ModulePass(ID) { 148 initializeAMDGPULowerModuleLDSPass(*PassRegistry::getPassRegistry()); 149 } 150 151 bool runOnModule(Module &M) override { 152 LLVMContext &Ctx = M.getContext(); 153 CallGraph CG = CallGraph(M); 154 bool Changed = superAlignLDSGlobals(M); 155 156 std::vector<GlobalVariable *> ModuleScopeVariables = 157 AMDGPU::findVariablesToLower(M, nullptr); 158 if (!ModuleScopeVariables.empty()) { 159 GlobalVariable *SGV = 160 processUsedLDS(CG, M, ModuleScopeVariables, nullptr); 161 162 // This ensures the variable is allocated when called functions access it. 163 // It also lets other passes, specifically PromoteAlloca, accurately 164 // calculate how much LDS will be used by the kernel after lowering. 165 166 IRBuilder<> Builder(Ctx); 167 for (Function &Func : M.functions()) { 168 if (!Func.isDeclaration() && AMDGPU::isKernelCC(&Func)) { 169 const CallGraphNode *N = CG[&Func]; 170 const bool CalleesRequireModuleLDS = N->size() > 0; 171 172 if (CalleesRequireModuleLDS) { 173 // If a function this kernel might call requires module LDS, 174 // annotate the kernel to let later passes know it will allocate 175 // this structure, even if not apparent from the IR. 176 markUsedByKernel(Builder, &Func, SGV); 177 } else { 178 // However if we are certain this kernel cannot call a function that 179 // requires module LDS, annotate the kernel so the backend can elide 180 // the allocation without repeating callgraph walks. 181 Func.addFnAttr("amdgpu-elide-module-lds"); 182 } 183 } 184 } 185 186 Changed = true; 187 } 188 189 for (Function &F : M.functions()) { 190 if (F.isDeclaration()) 191 continue; 192 193 // Only lower compute kernels' LDS. 194 if (!AMDGPU::isKernel(F.getCallingConv())) 195 continue; 196 197 std::vector<GlobalVariable *> KernelUsedVariables = 198 AMDGPU::findVariablesToLower(M, &F); 199 if (!KernelUsedVariables.empty()) { 200 processUsedLDS(CG, M, KernelUsedVariables, &F); 201 Changed = true; 202 } 203 } 204 205 return Changed; 206 } 207 208 private: 209 // Increase the alignment of LDS globals if necessary to maximise the chance 210 // that we can use aligned LDS instructions to access them. 211 static bool superAlignLDSGlobals(Module &M) { 212 const DataLayout &DL = M.getDataLayout(); 213 bool Changed = false; 214 if (!SuperAlignLDSGlobals) { 215 return Changed; 216 } 217 218 for (auto &GV : M.globals()) { 219 if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) { 220 // Only changing alignment of LDS variables 221 continue; 222 } 223 if (!GV.hasInitializer()) { 224 // cuda/hip extern __shared__ variable, leave alignment alone 225 continue; 226 } 227 228 Align Alignment = AMDGPU::getAlign(DL, &GV); 229 TypeSize GVSize = DL.getTypeAllocSize(GV.getValueType()); 230 231 if (GVSize > 8) { 232 // We might want to use a b96 or b128 load/store 233 Alignment = std::max(Alignment, Align(16)); 234 } else if (GVSize > 4) { 235 // We might want to use a b64 load/store 236 Alignment = std::max(Alignment, Align(8)); 237 } else if (GVSize > 2) { 238 // We might want to use a b32 load/store 239 Alignment = std::max(Alignment, Align(4)); 240 } else if (GVSize > 1) { 241 // We might want to use a b16 load/store 242 Alignment = std::max(Alignment, Align(2)); 243 } 244 245 if (Alignment != AMDGPU::getAlign(DL, &GV)) { 246 Changed = true; 247 GV.setAlignment(Alignment); 248 } 249 } 250 return Changed; 251 } 252 253 std::tuple<GlobalVariable *, DenseMap<GlobalVariable *, Constant *>> 254 createLDSVariableReplacement( 255 Module &M, std::string VarName, 256 std::vector<GlobalVariable *> const &LDSVarsToTransform) { 257 // Create a struct instance containing LDSVarsToTransform and map from those 258 // variables to ConstantExprGEP 259 // Variables may be introduced to meet alignment requirements. No aliasing 260 // metadata is useful for these as they have no uses. Erased before return. 261 262 LLVMContext &Ctx = M.getContext(); 263 const DataLayout &DL = M.getDataLayout(); 264 assert(!LDSVarsToTransform.empty()); 265 266 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 267 LayoutFields.reserve(LDSVarsToTransform.size()); 268 for (GlobalVariable *GV : LDSVarsToTransform) { 269 OptimizedStructLayoutField F(GV, DL.getTypeAllocSize(GV->getValueType()), 270 AMDGPU::getAlign(DL, GV)); 271 LayoutFields.emplace_back(F); 272 } 273 274 performOptimizedStructLayout(LayoutFields); 275 276 std::vector<GlobalVariable *> LocalVars; 277 BitVector IsPaddingField; 278 LocalVars.reserve(LDSVarsToTransform.size()); // will be at least this large 279 IsPaddingField.reserve(LDSVarsToTransform.size()); 280 { 281 uint64_t CurrentOffset = 0; 282 for (size_t I = 0; I < LayoutFields.size(); I++) { 283 GlobalVariable *FGV = static_cast<GlobalVariable *>( 284 const_cast<void *>(LayoutFields[I].Id)); 285 Align DataAlign = LayoutFields[I].Alignment; 286 287 uint64_t DataAlignV = DataAlign.value(); 288 if (uint64_t Rem = CurrentOffset % DataAlignV) { 289 uint64_t Padding = DataAlignV - Rem; 290 291 // Append an array of padding bytes to meet alignment requested 292 // Note (o + (a - (o % a)) ) % a == 0 293 // (offset + Padding ) % align == 0 294 295 Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding); 296 LocalVars.push_back(new GlobalVariable( 297 M, ATy, false, GlobalValue::InternalLinkage, UndefValue::get(ATy), 298 "", nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, 299 false)); 300 IsPaddingField.push_back(true); 301 CurrentOffset += Padding; 302 } 303 304 LocalVars.push_back(FGV); 305 IsPaddingField.push_back(false); 306 CurrentOffset += LayoutFields[I].Size; 307 } 308 } 309 310 std::vector<Type *> LocalVarTypes; 311 LocalVarTypes.reserve(LocalVars.size()); 312 std::transform( 313 LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes), 314 [](const GlobalVariable *V) -> Type * { return V->getValueType(); }); 315 316 StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t"); 317 318 Align StructAlign = 319 AMDGPU::getAlign(DL, LocalVars[0]); 320 321 GlobalVariable *SGV = new GlobalVariable( 322 M, LDSTy, false, GlobalValue::InternalLinkage, UndefValue::get(LDSTy), 323 VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, 324 false); 325 SGV->setAlignment(StructAlign); 326 327 DenseMap<GlobalVariable *, Constant *> Map; 328 Type *I32 = Type::getInt32Ty(Ctx); 329 for (size_t I = 0; I < LocalVars.size(); I++) { 330 GlobalVariable *GV = LocalVars[I]; 331 Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)}; 332 Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx, true); 333 if (IsPaddingField[I]) { 334 assert(GV->use_empty()); 335 GV->eraseFromParent(); 336 } else { 337 Map[GV] = GEP; 338 } 339 } 340 assert(Map.size() == LDSVarsToTransform.size()); 341 return {SGV, std::move(Map)}; 342 } 343 344 GlobalVariable * 345 processUsedLDS(CallGraph const &CG, Module &M, 346 std::vector<GlobalVariable *> const &LDSVarsToTransform, 347 Function *F) { 348 LLVMContext &Ctx = M.getContext(); 349 const DataLayout &DL = M.getDataLayout(); 350 351 std::string VarName( 352 F ? (Twine("llvm.amdgcn.kernel.") + F->getName() + ".lds").str() 353 : "llvm.amdgcn.module.lds"); 354 355 GlobalVariable *SGV; 356 DenseMap<GlobalVariable *, Constant *> LDSVarToConstantGEP; 357 std::tie(SGV, LDSVarToConstantGEP) = 358 createLDSVariableReplacement(M, VarName, LDSVarsToTransform); 359 360 if (!F) { 361 appendToCompilerUsed( 362 M, {static_cast<GlobalValue *>( 363 ConstantExpr::getPointerBitCastOrAddrSpaceCast( 364 cast<Constant>(SGV), Type::getInt8PtrTy(Ctx)))}); 365 } 366 367 // The verifier rejects used lists containing an inttoptr of a constant 368 // so remove the variables from these lists before replaceAllUsesWith 369 removeFromUsedLists(M, LDSVarsToTransform); 370 371 // Create alias.scope and their lists. Each field in the new structure 372 // does not alias with all other fields. 373 SmallVector<MDNode *> AliasScopes; 374 SmallVector<Metadata *> NoAliasList; 375 const size_t NumberVars = LDSVarsToTransform.size(); 376 if (NumberVars > 1) { 377 MDBuilder MDB(Ctx); 378 AliasScopes.reserve(NumberVars); 379 MDNode *Domain = MDB.createAnonymousAliasScopeDomain(); 380 for (size_t I = 0; I < NumberVars; I++) { 381 MDNode *Scope = MDB.createAnonymousAliasScope(Domain); 382 AliasScopes.push_back(Scope); 383 } 384 NoAliasList.append(&AliasScopes[1], AliasScopes.end()); 385 } 386 387 // Replace uses of ith variable with a constantexpr to the corresponding 388 // field of the instance that will be allocated by AMDGPUMachineFunction 389 for (size_t I = 0; I < NumberVars; I++) { 390 GlobalVariable *GV = LDSVarsToTransform[I]; 391 Constant *GEP = LDSVarToConstantGEP[GV]; 392 393 if (F) { 394 // Replace all constant uses with instructions if they belong to the 395 // current kernel. 396 for (User *U : make_early_inc_range(GV->users())) { 397 if (ConstantExpr *C = dyn_cast<ConstantExpr>(U)) 398 AMDGPU::replaceConstantUsesInFunction(C, F); 399 } 400 401 GV->removeDeadConstantUsers(); 402 403 GV->replaceUsesWithIf(GEP, [F](Use &U) { 404 Instruction *I = dyn_cast<Instruction>(U.getUser()); 405 return I && I->getFunction() == F; 406 }); 407 } else { 408 GV->replaceAllUsesWith(GEP); 409 } 410 if (GV->use_empty()) { 411 GV->eraseFromParent(); 412 } 413 414 APInt APOff(DL.getIndexTypeSizeInBits(GEP->getType()), 0); 415 GEP->stripAndAccumulateInBoundsConstantOffsets(DL, APOff); 416 uint64_t Offset = APOff.getZExtValue(); 417 418 Align A = commonAlignment(SGV->getAlign().valueOrOne(), Offset); 419 420 if (I) 421 NoAliasList[I - 1] = AliasScopes[I - 1]; 422 MDNode *NoAlias = 423 NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList); 424 MDNode *AliasScope = 425 AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]}); 426 427 refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias); 428 } 429 430 return SGV; 431 } 432 433 void refineUsesAlignmentAndAA(Value *Ptr, Align A, const DataLayout &DL, 434 MDNode *AliasScope, MDNode *NoAlias, 435 unsigned MaxDepth = 5) { 436 if (!MaxDepth || (A == 1 && !AliasScope)) 437 return; 438 439 for (User *U : Ptr->users()) { 440 if (auto *I = dyn_cast<Instruction>(U)) { 441 if (AliasScope && I->mayReadOrWriteMemory()) { 442 MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope); 443 AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope) 444 : AliasScope); 445 I->setMetadata(LLVMContext::MD_alias_scope, AS); 446 447 MDNode *NA = I->getMetadata(LLVMContext::MD_noalias); 448 NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias); 449 I->setMetadata(LLVMContext::MD_noalias, NA); 450 } 451 } 452 453 if (auto *LI = dyn_cast<LoadInst>(U)) { 454 LI->setAlignment(std::max(A, LI->getAlign())); 455 continue; 456 } 457 if (auto *SI = dyn_cast<StoreInst>(U)) { 458 if (SI->getPointerOperand() == Ptr) 459 SI->setAlignment(std::max(A, SI->getAlign())); 460 continue; 461 } 462 if (auto *AI = dyn_cast<AtomicRMWInst>(U)) { 463 // None of atomicrmw operations can work on pointers, but let's 464 // check it anyway in case it will or we will process ConstantExpr. 465 if (AI->getPointerOperand() == Ptr) 466 AI->setAlignment(std::max(A, AI->getAlign())); 467 continue; 468 } 469 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) { 470 if (AI->getPointerOperand() == Ptr) 471 AI->setAlignment(std::max(A, AI->getAlign())); 472 continue; 473 } 474 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) { 475 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType()); 476 APInt Off(BitWidth, 0); 477 if (GEP->getPointerOperand() == Ptr) { 478 Align GA; 479 if (GEP->accumulateConstantOffset(DL, Off)) 480 GA = commonAlignment(A, Off.getLimitedValue()); 481 refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias, 482 MaxDepth - 1); 483 } 484 continue; 485 } 486 if (auto *I = dyn_cast<Instruction>(U)) { 487 if (I->getOpcode() == Instruction::BitCast || 488 I->getOpcode() == Instruction::AddrSpaceCast) 489 refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1); 490 } 491 } 492 } 493 }; 494 495 } // namespace 496 char AMDGPULowerModuleLDS::ID = 0; 497 498 char &llvm::AMDGPULowerModuleLDSID = AMDGPULowerModuleLDS::ID; 499 500 INITIALIZE_PASS(AMDGPULowerModuleLDS, DEBUG_TYPE, 501 "Lower uses of LDS variables from non-kernel functions", false, 502 false) 503 504 ModulePass *llvm::createAMDGPULowerModuleLDSPass() { 505 return new AMDGPULowerModuleLDS(); 506 } 507 508 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M, 509 ModuleAnalysisManager &) { 510 return AMDGPULowerModuleLDS().runOnModule(M) ? PreservedAnalyses::none() 511 : PreservedAnalyses::all(); 512 } 513