1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass eliminates local data store, LDS, uses from non-kernel functions. 10 // LDS is contiguous memory allocated per kernel execution. 11 // 12 // Background. 13 // 14 // The programming model is global variables, or equivalently function local 15 // static variables, accessible from kernels or other functions. For uses from 16 // kernels this is straightforward - assign an integer to the kernel for the 17 // memory required by all the variables combined, allocate them within that. 18 // For uses from functions there are performance tradeoffs to choose between. 19 // 20 // This model means the GPU runtime can specify the amount of memory allocated. 21 // If this is more than the kernel assumed, the excess can be made available 22 // using a language specific feature, which IR represents as a variable with 23 // no initializer. This feature is referred to here as "Dynamic LDS" and is 24 // lowered slightly differently to the normal case. 25 // 26 // Consequences of this GPU feature: 27 // - memory is limited and exceeding it halts compilation 28 // - a global accessed by one kernel exists independent of other kernels 29 // - a global exists independent of simultaneous execution of the same kernel 30 // - the address of the global may be different from different kernels as they 31 // do not alias, which permits only allocating variables they use 32 // - if the address is allowed to differ, functions need help to find it 33 // 34 // Uses from kernels are implemented here by grouping them in a per-kernel 35 // struct instance. This duplicates the variables, accurately modelling their 36 // aliasing properties relative to a single global representation. It also 37 // permits control over alignment via padding. 38 // 39 // Uses from functions are more complicated and the primary purpose of this 40 // IR pass. Several different lowering are chosen between to meet requirements 41 // to avoid allocating any LDS where it is not necessary, as that impacts 42 // occupancy and may fail the compilation, while not imposing overhead on a 43 // feature whose primary advantage over global memory is performance. The basic 44 // design goal is to avoid one kernel imposing overhead on another. 45 // 46 // Implementation. 47 // 48 // LDS variables with constant annotation or non-undef initializer are passed 49 // through unchanged for simplification or error diagnostics in later passes. 50 // Non-undef initializers are not yet implemented for LDS. 51 // 52 // LDS variables that are always allocated at the same address can be found 53 // by lookup at that address. Otherwise runtime information/cost is required. 54 // 55 // The simplest strategy possible is to group all LDS variables in a single 56 // struct and allocate that struct in every kernel such that the original 57 // variables are always at the same address. LDS is however a limited resource 58 // so this strategy is unusable in practice. It is not implemented here. 59 // 60 // Strategy | Precise allocation | Zero runtime cost | General purpose | 61 // --------+--------------------+-------------------+-----------------+ 62 // Module | No | Yes | Yes | 63 // Table | Yes | No | Yes | 64 // Kernel | Yes | Yes | No | 65 // Hybrid | Yes | Partial | Yes | 66 // 67 // "Module" spends LDS memory to save cycles. "Table" spends cycles and global 68 // memory to save LDS. "Kernel" is as fast as kernel allocation but only works 69 // for variables that are known reachable from a single kernel. "Hybrid" picks 70 // between all three. When forced to choose between LDS and cycles we minimise 71 // LDS use. 72 73 // The "module" lowering implemented here finds LDS variables which are used by 74 // non-kernel functions and creates a new struct with a field for each of those 75 // LDS variables. Variables that are only used from kernels are excluded. 76 // 77 // The "table" lowering implemented here has three components. 78 // First kernels are assigned a unique integer identifier which is available in 79 // functions it calls through the intrinsic amdgcn_lds_kernel_id. The integer 80 // is passed through a specific SGPR, thus works with indirect calls. 81 // Second, each kernel allocates LDS variables independent of other kernels and 82 // writes the addresses it chose for each variable into an array in consistent 83 // order. If the kernel does not allocate a given variable, it writes undef to 84 // the corresponding array location. These arrays are written to a constant 85 // table in the order matching the kernel unique integer identifier. 86 // Third, uses from non-kernel functions are replaced with a table lookup using 87 // the intrinsic function to find the address of the variable. 88 // 89 // "Kernel" lowering is only applicable for variables that are unambiguously 90 // reachable from exactly one kernel. For those cases, accesses to the variable 91 // can be lowered to ConstantExpr address of a struct instance specific to that 92 // one kernel. This is zero cost in space and in compute. It will raise a fatal 93 // error on any variable that might be reachable from multiple kernels and is 94 // thus most easily used as part of the hybrid lowering strategy. 95 // 96 // Hybrid lowering is a mixture of the above. It uses the zero cost kernel 97 // lowering where it can. It lowers the variable accessed by the greatest 98 // number of kernels using the module strategy as that is free for the first 99 // variable. Any futher variables that can be lowered with the module strategy 100 // without incurring LDS memory overhead are. The remaining ones are lowered 101 // via table. 102 // 103 // Consequences 104 // - No heuristics or user controlled magic numbers, hybrid is the right choice 105 // - Kernels that don't use functions (or have had them all inlined) are not 106 // affected by any lowering for kernels that do. 107 // - Kernels that don't make indirect function calls are not affected by those 108 // that do. 109 // - Variables which are used by lots of kernels, e.g. those injected by a 110 // language runtime in most kernels, are expected to have no overhead 111 // - Implementations that instantiate templates per-kernel where those templates 112 // use LDS are expected to hit the "Kernel" lowering strategy 113 // - The runtime properties impose a cost in compiler implementation complexity 114 // 115 // Dynamic LDS implementation 116 // Dynamic LDS is lowered similarly to the "table" strategy above and uses the 117 // same intrinsic to identify which kernel is at the root of the dynamic call 118 // graph. This relies on the specified behaviour that all dynamic LDS variables 119 // alias one another, i.e. are at the same address, with respect to a given 120 // kernel. Therefore this pass creates new dynamic LDS variables for each kernel 121 // that allocates any dynamic LDS and builds a table of addresses out of those. 122 // The AMDGPUPromoteAlloca pass skips kernels that use dynamic LDS. 123 // The corresponding optimisation for "kernel" lowering where the table lookup 124 // is elided is not implemented. 125 // 126 // 127 // Implementation notes / limitations 128 // A single LDS global variable represents an instance per kernel that can reach 129 // said variables. This pass essentially specialises said variables per kernel. 130 // Handling ConstantExpr during the pass complicated this significantly so now 131 // all ConstantExpr uses of LDS variables are expanded to instructions. This 132 // may need amending when implementing non-undef initialisers. 133 // 134 // Lowering is split between this IR pass and the back end. This pass chooses 135 // where given variables should be allocated and marks them with metadata, 136 // MD_absolute_symbol. The backend places the variables in coincidentally the 137 // same location and raises a fatal error if something has gone awry. This works 138 // in practice because the only pass between this one and the backend that 139 // changes LDS is PromoteAlloca and the changes it makes do not conflict. 140 // 141 // Addresses are written to constant global arrays based on the same metadata. 142 // 143 // The backend lowers LDS variables in the order of traversal of the function. 144 // This is at odds with the deterministic layout required. The workaround is to 145 // allocate the fixed-address variables immediately upon starting the function 146 // where they can be placed as intended. This requires a means of mapping from 147 // the function to the variables that it allocates. For the module scope lds, 148 // this is via metadata indicating whether the variable is not required. If a 149 // pass deletes that metadata, a fatal error on disagreement with the absolute 150 // symbol metadata will occur. For kernel scope and dynamic, this is by _name_ 151 // correspondence between the function and the variable. It requires the 152 // kernel to have a name (which is only a limitation for tests in practice) and 153 // for nothing to rename the corresponding symbols. This is a hazard if the pass 154 // is run multiple times during debugging. Alternative schemes considered all 155 // involve bespoke metadata. 156 // 157 // If the name correspondence can be replaced, multiple distinct kernels that 158 // have the same memory layout can map to the same kernel id (as the address 159 // itself is handled by the absolute symbol metadata) and that will allow more 160 // uses of the "kernel" style faster lowering and reduce the size of the lookup 161 // tables. 162 // 163 // There is a test that checks this does not fire for a graphics shader. This 164 // lowering is expected to work for graphics if the isKernel test is changed. 165 // 166 // The current markUsedByKernel is sufficient for PromoteAlloca but is elided 167 // before codegen. Replacing this with an equivalent intrinsic which lasts until 168 // shortly after the machine function lowering of LDS would help break the name 169 // mapping. The other part needed is probably to amend PromoteAlloca to embed 170 // the LDS variables it creates in the same struct created here. That avoids the 171 // current hazard where a PromoteAlloca LDS variable might be allocated before 172 // the kernel scope (and thus error on the address check). Given a new invariant 173 // that no LDS variables exist outside of the structs managed here, and an 174 // intrinsic that lasts until after the LDS frame lowering, it should be 175 // possible to drop the name mapping and fold equivalent memory layouts. 176 // 177 //===----------------------------------------------------------------------===// 178 179 #include "AMDGPU.h" 180 #include "AMDGPUTargetMachine.h" 181 #include "Utils/AMDGPUBaseInfo.h" 182 #include "Utils/AMDGPUMemoryUtils.h" 183 #include "llvm/ADT/BitVector.h" 184 #include "llvm/ADT/DenseMap.h" 185 #include "llvm/ADT/DenseSet.h" 186 #include "llvm/ADT/STLExtras.h" 187 #include "llvm/ADT/SetOperations.h" 188 #include "llvm/Analysis/CallGraph.h" 189 #include "llvm/CodeGen/TargetPassConfig.h" 190 #include "llvm/IR/Constants.h" 191 #include "llvm/IR/DerivedTypes.h" 192 #include "llvm/IR/IRBuilder.h" 193 #include "llvm/IR/InlineAsm.h" 194 #include "llvm/IR/Instructions.h" 195 #include "llvm/IR/IntrinsicsAMDGPU.h" 196 #include "llvm/IR/MDBuilder.h" 197 #include "llvm/IR/ReplaceConstant.h" 198 #include "llvm/InitializePasses.h" 199 #include "llvm/Pass.h" 200 #include "llvm/Support/CommandLine.h" 201 #include "llvm/Support/Debug.h" 202 #include "llvm/Support/Format.h" 203 #include "llvm/Support/OptimizedStructLayout.h" 204 #include "llvm/Support/raw_ostream.h" 205 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 206 #include "llvm/Transforms/Utils/ModuleUtils.h" 207 208 #include <vector> 209 210 #include <cstdio> 211 212 #define DEBUG_TYPE "amdgpu-lower-module-lds" 213 214 using namespace llvm; 215 using namespace AMDGPU; 216 217 namespace { 218 219 cl::opt<bool> SuperAlignLDSGlobals( 220 "amdgpu-super-align-lds-globals", 221 cl::desc("Increase alignment of LDS if it is not on align boundary"), 222 cl::init(true), cl::Hidden); 223 224 enum class LoweringKind { module, table, kernel, hybrid }; 225 cl::opt<LoweringKind> LoweringKindLoc( 226 "amdgpu-lower-module-lds-strategy", 227 cl::desc("Specify lowering strategy for function LDS access:"), cl::Hidden, 228 cl::init(LoweringKind::hybrid), 229 cl::values( 230 clEnumValN(LoweringKind::table, "table", "Lower via table lookup"), 231 clEnumValN(LoweringKind::module, "module", "Lower via module struct"), 232 clEnumValN( 233 LoweringKind::kernel, "kernel", 234 "Lower variables reachable from one kernel, otherwise abort"), 235 clEnumValN(LoweringKind::hybrid, "hybrid", 236 "Lower via mixture of above strategies"))); 237 238 template <typename T> std::vector<T> sortByName(std::vector<T> &&V) { 239 llvm::sort(V.begin(), V.end(), [](const auto *L, const auto *R) { 240 return L->getName() < R->getName(); 241 }); 242 return {std::move(V)}; 243 } 244 245 class AMDGPULowerModuleLDS { 246 const AMDGPUTargetMachine &TM; 247 248 static void 249 removeLocalVarsFromUsedLists(Module &M, 250 const DenseSet<GlobalVariable *> &LocalVars) { 251 // The verifier rejects used lists containing an inttoptr of a constant 252 // so remove the variables from these lists before replaceAllUsesWith 253 SmallPtrSet<Constant *, 8> LocalVarsSet; 254 for (GlobalVariable *LocalVar : LocalVars) 255 LocalVarsSet.insert(cast<Constant>(LocalVar->stripPointerCasts())); 256 257 removeFromUsedLists( 258 M, [&LocalVarsSet](Constant *C) { return LocalVarsSet.count(C); }); 259 260 for (GlobalVariable *LocalVar : LocalVars) 261 LocalVar->removeDeadConstantUsers(); 262 } 263 264 static void markUsedByKernel(Function *Func, GlobalVariable *SGV) { 265 // The llvm.amdgcn.module.lds instance is implicitly used by all kernels 266 // that might call a function which accesses a field within it. This is 267 // presently approximated to 'all kernels' if there are any such functions 268 // in the module. This implicit use is redefined as an explicit use here so 269 // that later passes, specifically PromoteAlloca, account for the required 270 // memory without any knowledge of this transform. 271 272 // An operand bundle on llvm.donothing works because the call instruction 273 // survives until after the last pass that needs to account for LDS. It is 274 // better than inline asm as the latter survives until the end of codegen. A 275 // totally robust solution would be a function with the same semantics as 276 // llvm.donothing that takes a pointer to the instance and is lowered to a 277 // no-op after LDS is allocated, but that is not presently necessary. 278 279 // This intrinsic is eliminated shortly before instruction selection. It 280 // does not suffice to indicate to ISel that a given global which is not 281 // immediately used by the kernel must still be allocated by it. An 282 // equivalent target specific intrinsic which lasts until immediately after 283 // codegen would suffice for that, but one would still need to ensure that 284 // the variables are allocated in the anticpated order. 285 BasicBlock *Entry = &Func->getEntryBlock(); 286 IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt()); 287 288 Function *Decl = 289 Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {}); 290 291 Value *UseInstance[1] = { 292 Builder.CreateConstInBoundsGEP1_32(SGV->getValueType(), SGV, 0)}; 293 294 Builder.CreateCall( 295 Decl, {}, {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)}); 296 } 297 298 public: 299 AMDGPULowerModuleLDS(const AMDGPUTargetMachine &TM_) : TM(TM_) {} 300 301 struct LDSVariableReplacement { 302 GlobalVariable *SGV = nullptr; 303 DenseMap<GlobalVariable *, Constant *> LDSVarsToConstantGEP; 304 }; 305 306 // remap from lds global to a constantexpr gep to where it has been moved to 307 // for each kernel 308 // an array with an element for each kernel containing where the corresponding 309 // variable was remapped to 310 311 static Constant *getAddressesOfVariablesInKernel( 312 LLVMContext &Ctx, ArrayRef<GlobalVariable *> Variables, 313 const DenseMap<GlobalVariable *, Constant *> &LDSVarsToConstantGEP) { 314 // Create a ConstantArray containing the address of each Variable within the 315 // kernel corresponding to LDSVarsToConstantGEP, or poison if that kernel 316 // does not allocate it 317 // TODO: Drop the ptrtoint conversion 318 319 Type *I32 = Type::getInt32Ty(Ctx); 320 321 ArrayType *KernelOffsetsType = ArrayType::get(I32, Variables.size()); 322 323 SmallVector<Constant *> Elements; 324 for (size_t i = 0; i < Variables.size(); i++) { 325 GlobalVariable *GV = Variables[i]; 326 auto ConstantGepIt = LDSVarsToConstantGEP.find(GV); 327 if (ConstantGepIt != LDSVarsToConstantGEP.end()) { 328 auto elt = ConstantExpr::getPtrToInt(ConstantGepIt->second, I32); 329 Elements.push_back(elt); 330 } else { 331 Elements.push_back(PoisonValue::get(I32)); 332 } 333 } 334 return ConstantArray::get(KernelOffsetsType, Elements); 335 } 336 337 static GlobalVariable *buildLookupTable( 338 Module &M, ArrayRef<GlobalVariable *> Variables, 339 ArrayRef<Function *> kernels, 340 DenseMap<Function *, LDSVariableReplacement> &KernelToReplacement) { 341 if (Variables.empty()) { 342 return nullptr; 343 } 344 LLVMContext &Ctx = M.getContext(); 345 346 const size_t NumberVariables = Variables.size(); 347 const size_t NumberKernels = kernels.size(); 348 349 ArrayType *KernelOffsetsType = 350 ArrayType::get(Type::getInt32Ty(Ctx), NumberVariables); 351 352 ArrayType *AllKernelsOffsetsType = 353 ArrayType::get(KernelOffsetsType, NumberKernels); 354 355 Constant *Missing = PoisonValue::get(KernelOffsetsType); 356 std::vector<Constant *> overallConstantExprElts(NumberKernels); 357 for (size_t i = 0; i < NumberKernels; i++) { 358 auto Replacement = KernelToReplacement.find(kernels[i]); 359 overallConstantExprElts[i] = 360 (Replacement == KernelToReplacement.end()) 361 ? Missing 362 : getAddressesOfVariablesInKernel( 363 Ctx, Variables, Replacement->second.LDSVarsToConstantGEP); 364 } 365 366 Constant *init = 367 ConstantArray::get(AllKernelsOffsetsType, overallConstantExprElts); 368 369 return new GlobalVariable( 370 M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, init, 371 "llvm.amdgcn.lds.offset.table", nullptr, GlobalValue::NotThreadLocal, 372 AMDGPUAS::CONSTANT_ADDRESS); 373 } 374 375 void replaceUseWithTableLookup(Module &M, IRBuilder<> &Builder, 376 GlobalVariable *LookupTable, 377 GlobalVariable *GV, Use &U, 378 Value *OptionalIndex) { 379 // Table is a constant array of the same length as OrderedKernels 380 LLVMContext &Ctx = M.getContext(); 381 Type *I32 = Type::getInt32Ty(Ctx); 382 auto *I = cast<Instruction>(U.getUser()); 383 384 Value *tableKernelIndex = getTableLookupKernelIndex(M, I->getFunction()); 385 386 if (auto *Phi = dyn_cast<PHINode>(I)) { 387 BasicBlock *BB = Phi->getIncomingBlock(U); 388 Builder.SetInsertPoint(&(*(BB->getFirstInsertionPt()))); 389 } else { 390 Builder.SetInsertPoint(I); 391 } 392 393 SmallVector<Value *, 3> GEPIdx = { 394 ConstantInt::get(I32, 0), 395 tableKernelIndex, 396 }; 397 if (OptionalIndex) 398 GEPIdx.push_back(OptionalIndex); 399 400 Value *Address = Builder.CreateInBoundsGEP( 401 LookupTable->getValueType(), LookupTable, GEPIdx, GV->getName()); 402 403 Value *loaded = Builder.CreateLoad(I32, Address); 404 405 Value *replacement = 406 Builder.CreateIntToPtr(loaded, GV->getType(), GV->getName()); 407 408 U.set(replacement); 409 } 410 411 void replaceUsesInInstructionsWithTableLookup( 412 Module &M, ArrayRef<GlobalVariable *> ModuleScopeVariables, 413 GlobalVariable *LookupTable) { 414 415 LLVMContext &Ctx = M.getContext(); 416 IRBuilder<> Builder(Ctx); 417 Type *I32 = Type::getInt32Ty(Ctx); 418 419 for (size_t Index = 0; Index < ModuleScopeVariables.size(); Index++) { 420 auto *GV = ModuleScopeVariables[Index]; 421 422 for (Use &U : make_early_inc_range(GV->uses())) { 423 auto *I = dyn_cast<Instruction>(U.getUser()); 424 if (!I) 425 continue; 426 427 replaceUseWithTableLookup(M, Builder, LookupTable, GV, U, 428 ConstantInt::get(I32, Index)); 429 } 430 } 431 } 432 433 static DenseSet<Function *> kernelsThatIndirectlyAccessAnyOfPassedVariables( 434 Module &M, LDSUsesInfoTy &LDSUsesInfo, 435 DenseSet<GlobalVariable *> const &VariableSet) { 436 437 DenseSet<Function *> KernelSet; 438 439 if (VariableSet.empty()) 440 return KernelSet; 441 442 for (Function &Func : M.functions()) { 443 if (Func.isDeclaration() || !isKernelLDS(&Func)) 444 continue; 445 for (GlobalVariable *GV : LDSUsesInfo.indirect_access[&Func]) { 446 if (VariableSet.contains(GV)) { 447 KernelSet.insert(&Func); 448 break; 449 } 450 } 451 } 452 453 return KernelSet; 454 } 455 456 static GlobalVariable * 457 chooseBestVariableForModuleStrategy(const DataLayout &DL, 458 VariableFunctionMap &LDSVars) { 459 // Find the global variable with the most indirect uses from kernels 460 461 struct CandidateTy { 462 GlobalVariable *GV = nullptr; 463 size_t UserCount = 0; 464 size_t Size = 0; 465 466 CandidateTy() = default; 467 468 CandidateTy(GlobalVariable *GV, uint64_t UserCount, uint64_t AllocSize) 469 : GV(GV), UserCount(UserCount), Size(AllocSize) {} 470 471 bool operator<(const CandidateTy &Other) const { 472 // Fewer users makes module scope variable less attractive 473 if (UserCount < Other.UserCount) { 474 return true; 475 } 476 if (UserCount > Other.UserCount) { 477 return false; 478 } 479 480 // Bigger makes module scope variable less attractive 481 if (Size < Other.Size) { 482 return false; 483 } 484 485 if (Size > Other.Size) { 486 return true; 487 } 488 489 // Arbitrary but consistent 490 return GV->getName() < Other.GV->getName(); 491 } 492 }; 493 494 CandidateTy MostUsed; 495 496 for (auto &K : LDSVars) { 497 GlobalVariable *GV = K.first; 498 if (K.second.size() <= 1) { 499 // A variable reachable by only one kernel is best lowered with kernel 500 // strategy 501 continue; 502 } 503 CandidateTy Candidate( 504 GV, K.second.size(), 505 DL.getTypeAllocSize(GV->getValueType()).getFixedValue()); 506 if (MostUsed < Candidate) 507 MostUsed = Candidate; 508 } 509 510 return MostUsed.GV; 511 } 512 513 static void recordLDSAbsoluteAddress(Module *M, GlobalVariable *GV, 514 uint32_t Address) { 515 // Write the specified address into metadata where it can be retrieved by 516 // the assembler. Format is a half open range, [Address Address+1) 517 LLVMContext &Ctx = M->getContext(); 518 auto *IntTy = 519 M->getDataLayout().getIntPtrType(Ctx, AMDGPUAS::LOCAL_ADDRESS); 520 auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address)); 521 auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address + 1)); 522 GV->setMetadata(LLVMContext::MD_absolute_symbol, 523 MDNode::get(Ctx, {MinC, MaxC})); 524 } 525 526 DenseMap<Function *, Value *> tableKernelIndexCache; 527 Value *getTableLookupKernelIndex(Module &M, Function *F) { 528 // Accesses from a function use the amdgcn_lds_kernel_id intrinsic which 529 // lowers to a read from a live in register. Emit it once in the entry 530 // block to spare deduplicating it later. 531 auto [It, Inserted] = tableKernelIndexCache.try_emplace(F); 532 if (Inserted) { 533 Function *Decl = 534 Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_lds_kernel_id, {}); 535 536 auto InsertAt = F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca(); 537 IRBuilder<> Builder(&*InsertAt); 538 539 It->second = Builder.CreateCall(Decl, {}); 540 } 541 542 return It->second; 543 } 544 545 static std::vector<Function *> assignLDSKernelIDToEachKernel( 546 Module *M, DenseSet<Function *> const &KernelsThatAllocateTableLDS, 547 DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS) { 548 // Associate kernels in the set with an arbirary but reproducible order and 549 // annotate them with that order in metadata. This metadata is recognised by 550 // the backend and lowered to a SGPR which can be read from using 551 // amdgcn_lds_kernel_id. 552 553 std::vector<Function *> OrderedKernels; 554 if (!KernelsThatAllocateTableLDS.empty() || 555 !KernelsThatIndirectlyAllocateDynamicLDS.empty()) { 556 557 for (Function &Func : M->functions()) { 558 if (Func.isDeclaration()) 559 continue; 560 if (!isKernelLDS(&Func)) 561 continue; 562 563 if (KernelsThatAllocateTableLDS.contains(&Func) || 564 KernelsThatIndirectlyAllocateDynamicLDS.contains(&Func)) { 565 assert(Func.hasName()); // else fatal error earlier 566 OrderedKernels.push_back(&Func); 567 } 568 } 569 570 // Put them in an arbitrary but reproducible order 571 OrderedKernels = sortByName(std::move(OrderedKernels)); 572 573 // Annotate the kernels with their order in this vector 574 LLVMContext &Ctx = M->getContext(); 575 IRBuilder<> Builder(Ctx); 576 577 if (OrderedKernels.size() > UINT32_MAX) { 578 // 32 bit keeps it in one SGPR. > 2**32 kernels won't fit on the GPU 579 report_fatal_error("Unimplemented LDS lowering for > 2**32 kernels"); 580 } 581 582 for (size_t i = 0; i < OrderedKernels.size(); i++) { 583 Metadata *AttrMDArgs[1] = { 584 ConstantAsMetadata::get(Builder.getInt32(i)), 585 }; 586 OrderedKernels[i]->setMetadata("llvm.amdgcn.lds.kernel.id", 587 MDNode::get(Ctx, AttrMDArgs)); 588 } 589 } 590 return OrderedKernels; 591 } 592 593 static void partitionVariablesIntoIndirectStrategies( 594 Module &M, LDSUsesInfoTy const &LDSUsesInfo, 595 VariableFunctionMap &LDSToKernelsThatNeedToAccessItIndirectly, 596 DenseSet<GlobalVariable *> &ModuleScopeVariables, 597 DenseSet<GlobalVariable *> &TableLookupVariables, 598 DenseSet<GlobalVariable *> &KernelAccessVariables, 599 DenseSet<GlobalVariable *> &DynamicVariables) { 600 601 GlobalVariable *HybridModuleRoot = 602 LoweringKindLoc != LoweringKind::hybrid 603 ? nullptr 604 : chooseBestVariableForModuleStrategy( 605 M.getDataLayout(), LDSToKernelsThatNeedToAccessItIndirectly); 606 607 DenseSet<Function *> const EmptySet; 608 DenseSet<Function *> const &HybridModuleRootKernels = 609 HybridModuleRoot 610 ? LDSToKernelsThatNeedToAccessItIndirectly[HybridModuleRoot] 611 : EmptySet; 612 613 for (auto &K : LDSToKernelsThatNeedToAccessItIndirectly) { 614 // Each iteration of this loop assigns exactly one global variable to 615 // exactly one of the implementation strategies. 616 617 GlobalVariable *GV = K.first; 618 assert(AMDGPU::isLDSVariableToLower(*GV)); 619 assert(K.second.size() != 0); 620 621 if (AMDGPU::isDynamicLDS(*GV)) { 622 DynamicVariables.insert(GV); 623 continue; 624 } 625 626 switch (LoweringKindLoc) { 627 case LoweringKind::module: 628 ModuleScopeVariables.insert(GV); 629 break; 630 631 case LoweringKind::table: 632 TableLookupVariables.insert(GV); 633 break; 634 635 case LoweringKind::kernel: 636 if (K.second.size() == 1) { 637 KernelAccessVariables.insert(GV); 638 } else { 639 report_fatal_error( 640 "cannot lower LDS '" + GV->getName() + 641 "' to kernel access as it is reachable from multiple kernels"); 642 } 643 break; 644 645 case LoweringKind::hybrid: { 646 if (GV == HybridModuleRoot) { 647 assert(K.second.size() != 1); 648 ModuleScopeVariables.insert(GV); 649 } else if (K.second.size() == 1) { 650 KernelAccessVariables.insert(GV); 651 } else if (set_is_subset(K.second, HybridModuleRootKernels)) { 652 ModuleScopeVariables.insert(GV); 653 } else { 654 TableLookupVariables.insert(GV); 655 } 656 break; 657 } 658 } 659 } 660 661 // All LDS variables accessed indirectly have now been partitioned into 662 // the distinct lowering strategies. 663 assert(ModuleScopeVariables.size() + TableLookupVariables.size() + 664 KernelAccessVariables.size() + DynamicVariables.size() == 665 LDSToKernelsThatNeedToAccessItIndirectly.size()); 666 } 667 668 static GlobalVariable *lowerModuleScopeStructVariables( 669 Module &M, DenseSet<GlobalVariable *> const &ModuleScopeVariables, 670 DenseSet<Function *> const &KernelsThatAllocateModuleLDS) { 671 // Create a struct to hold the ModuleScopeVariables 672 // Replace all uses of those variables from non-kernel functions with the 673 // new struct instance Replace only the uses from kernel functions that will 674 // allocate this instance. That is a space optimisation - kernels that use a 675 // subset of the module scope struct and do not need to allocate it for 676 // indirect calls will only allocate the subset they use (they do so as part 677 // of the per-kernel lowering). 678 if (ModuleScopeVariables.empty()) { 679 return nullptr; 680 } 681 682 LLVMContext &Ctx = M.getContext(); 683 684 LDSVariableReplacement ModuleScopeReplacement = 685 createLDSVariableReplacement(M, "llvm.amdgcn.module.lds", 686 ModuleScopeVariables); 687 688 appendToCompilerUsed(M, {static_cast<GlobalValue *>( 689 ConstantExpr::getPointerBitCastOrAddrSpaceCast( 690 cast<Constant>(ModuleScopeReplacement.SGV), 691 PointerType::getUnqual(Ctx)))}); 692 693 // module.lds will be allocated at zero in any kernel that allocates it 694 recordLDSAbsoluteAddress(&M, ModuleScopeReplacement.SGV, 0); 695 696 // historic 697 removeLocalVarsFromUsedLists(M, ModuleScopeVariables); 698 699 // Replace all uses of module scope variable from non-kernel functions 700 replaceLDSVariablesWithStruct( 701 M, ModuleScopeVariables, ModuleScopeReplacement, [&](Use &U) { 702 Instruction *I = dyn_cast<Instruction>(U.getUser()); 703 if (!I) { 704 return false; 705 } 706 Function *F = I->getFunction(); 707 return !isKernelLDS(F); 708 }); 709 710 // Replace uses of module scope variable from kernel functions that 711 // allocate the module scope variable, otherwise leave them unchanged 712 // Record on each kernel whether the module scope global is used by it 713 714 for (Function &Func : M.functions()) { 715 if (Func.isDeclaration() || !isKernelLDS(&Func)) 716 continue; 717 718 if (KernelsThatAllocateModuleLDS.contains(&Func)) { 719 replaceLDSVariablesWithStruct( 720 M, ModuleScopeVariables, ModuleScopeReplacement, [&](Use &U) { 721 Instruction *I = dyn_cast<Instruction>(U.getUser()); 722 if (!I) { 723 return false; 724 } 725 Function *F = I->getFunction(); 726 return F == &Func; 727 }); 728 729 markUsedByKernel(&Func, ModuleScopeReplacement.SGV); 730 } 731 } 732 733 return ModuleScopeReplacement.SGV; 734 } 735 736 static DenseMap<Function *, LDSVariableReplacement> 737 lowerKernelScopeStructVariables( 738 Module &M, LDSUsesInfoTy &LDSUsesInfo, 739 DenseSet<GlobalVariable *> const &ModuleScopeVariables, 740 DenseSet<Function *> const &KernelsThatAllocateModuleLDS, 741 GlobalVariable *MaybeModuleScopeStruct) { 742 743 // Create a struct for each kernel for the non-module-scope variables. 744 745 DenseMap<Function *, LDSVariableReplacement> KernelToReplacement; 746 for (Function &Func : M.functions()) { 747 if (Func.isDeclaration() || !isKernelLDS(&Func)) 748 continue; 749 750 DenseSet<GlobalVariable *> KernelUsedVariables; 751 // Allocating variables that are used directly in this struct to get 752 // alignment aware allocation and predictable frame size. 753 for (auto &v : LDSUsesInfo.direct_access[&Func]) { 754 if (!AMDGPU::isDynamicLDS(*v)) { 755 KernelUsedVariables.insert(v); 756 } 757 } 758 759 // Allocating variables that are accessed indirectly so that a lookup of 760 // this struct instance can find them from nested functions. 761 for (auto &v : LDSUsesInfo.indirect_access[&Func]) { 762 if (!AMDGPU::isDynamicLDS(*v)) { 763 KernelUsedVariables.insert(v); 764 } 765 } 766 767 // Variables allocated in module lds must all resolve to that struct, 768 // not to the per-kernel instance. 769 if (KernelsThatAllocateModuleLDS.contains(&Func)) { 770 for (GlobalVariable *v : ModuleScopeVariables) { 771 KernelUsedVariables.erase(v); 772 } 773 } 774 775 if (KernelUsedVariables.empty()) { 776 // Either used no LDS, or the LDS it used was all in the module struct 777 // or dynamically sized 778 continue; 779 } 780 781 // The association between kernel function and LDS struct is done by 782 // symbol name, which only works if the function in question has a 783 // name This is not expected to be a problem in practice as kernels 784 // are called by name making anonymous ones (which are named by the 785 // backend) difficult to use. This does mean that llvm test cases need 786 // to name the kernels. 787 if (!Func.hasName()) { 788 report_fatal_error("Anonymous kernels cannot use LDS variables"); 789 } 790 791 std::string VarName = 792 (Twine("llvm.amdgcn.kernel.") + Func.getName() + ".lds").str(); 793 794 auto Replacement = 795 createLDSVariableReplacement(M, VarName, KernelUsedVariables); 796 797 // If any indirect uses, create a direct use to ensure allocation 798 // TODO: Simpler to unconditionally mark used but that regresses 799 // codegen in test/CodeGen/AMDGPU/noclobber-barrier.ll 800 auto Accesses = LDSUsesInfo.indirect_access.find(&Func); 801 if ((Accesses != LDSUsesInfo.indirect_access.end()) && 802 !Accesses->second.empty()) 803 markUsedByKernel(&Func, Replacement.SGV); 804 805 // remove preserves existing codegen 806 removeLocalVarsFromUsedLists(M, KernelUsedVariables); 807 KernelToReplacement[&Func] = Replacement; 808 809 // Rewrite uses within kernel to the new struct 810 replaceLDSVariablesWithStruct( 811 M, KernelUsedVariables, Replacement, [&Func](Use &U) { 812 Instruction *I = dyn_cast<Instruction>(U.getUser()); 813 return I && I->getFunction() == &Func; 814 }); 815 } 816 return KernelToReplacement; 817 } 818 819 static GlobalVariable * 820 buildRepresentativeDynamicLDSInstance(Module &M, LDSUsesInfoTy &LDSUsesInfo, 821 Function *func) { 822 // Create a dynamic lds variable with a name associated with the passed 823 // function that has the maximum alignment of any dynamic lds variable 824 // reachable from this kernel. Dynamic LDS is allocated after the static LDS 825 // allocation, possibly after alignment padding. The representative variable 826 // created here has the maximum alignment of any other dynamic variable 827 // reachable by that kernel. All dynamic LDS variables are allocated at the 828 // same address in each kernel in order to provide the documented aliasing 829 // semantics. Setting the alignment here allows this IR pass to accurately 830 // predict the exact constant at which it will be allocated. 831 832 assert(isKernelLDS(func)); 833 834 LLVMContext &Ctx = M.getContext(); 835 const DataLayout &DL = M.getDataLayout(); 836 Align MaxDynamicAlignment(1); 837 838 auto UpdateMaxAlignment = [&MaxDynamicAlignment, &DL](GlobalVariable *GV) { 839 if (AMDGPU::isDynamicLDS(*GV)) { 840 MaxDynamicAlignment = 841 std::max(MaxDynamicAlignment, AMDGPU::getAlign(DL, GV)); 842 } 843 }; 844 845 for (GlobalVariable *GV : LDSUsesInfo.indirect_access[func]) { 846 UpdateMaxAlignment(GV); 847 } 848 849 for (GlobalVariable *GV : LDSUsesInfo.direct_access[func]) { 850 UpdateMaxAlignment(GV); 851 } 852 853 assert(func->hasName()); // Checked by caller 854 auto emptyCharArray = ArrayType::get(Type::getInt8Ty(Ctx), 0); 855 GlobalVariable *N = new GlobalVariable( 856 M, emptyCharArray, false, GlobalValue::ExternalLinkage, nullptr, 857 Twine("llvm.amdgcn." + func->getName() + ".dynlds"), nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, 858 false); 859 N->setAlignment(MaxDynamicAlignment); 860 861 assert(AMDGPU::isDynamicLDS(*N)); 862 return N; 863 } 864 865 DenseMap<Function *, GlobalVariable *> lowerDynamicLDSVariables( 866 Module &M, LDSUsesInfoTy &LDSUsesInfo, 867 DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS, 868 DenseSet<GlobalVariable *> const &DynamicVariables, 869 std::vector<Function *> const &OrderedKernels) { 870 DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS; 871 if (!KernelsThatIndirectlyAllocateDynamicLDS.empty()) { 872 LLVMContext &Ctx = M.getContext(); 873 IRBuilder<> Builder(Ctx); 874 Type *I32 = Type::getInt32Ty(Ctx); 875 876 std::vector<Constant *> newDynamicLDS; 877 878 // Table is built in the same order as OrderedKernels 879 for (auto &func : OrderedKernels) { 880 881 if (KernelsThatIndirectlyAllocateDynamicLDS.contains(func)) { 882 assert(isKernelLDS(func)); 883 if (!func->hasName()) { 884 report_fatal_error("Anonymous kernels cannot use LDS variables"); 885 } 886 887 GlobalVariable *N = 888 buildRepresentativeDynamicLDSInstance(M, LDSUsesInfo, func); 889 890 KernelToCreatedDynamicLDS[func] = N; 891 892 markUsedByKernel(func, N); 893 894 auto emptyCharArray = ArrayType::get(Type::getInt8Ty(Ctx), 0); 895 auto GEP = ConstantExpr::getGetElementPtr( 896 emptyCharArray, N, ConstantInt::get(I32, 0), true); 897 newDynamicLDS.push_back(ConstantExpr::getPtrToInt(GEP, I32)); 898 } else { 899 newDynamicLDS.push_back(PoisonValue::get(I32)); 900 } 901 } 902 assert(OrderedKernels.size() == newDynamicLDS.size()); 903 904 ArrayType *t = ArrayType::get(I32, newDynamicLDS.size()); 905 Constant *init = ConstantArray::get(t, newDynamicLDS); 906 GlobalVariable *table = new GlobalVariable( 907 M, t, true, GlobalValue::InternalLinkage, init, 908 "llvm.amdgcn.dynlds.offset.table", nullptr, 909 GlobalValue::NotThreadLocal, AMDGPUAS::CONSTANT_ADDRESS); 910 911 for (GlobalVariable *GV : DynamicVariables) { 912 for (Use &U : make_early_inc_range(GV->uses())) { 913 auto *I = dyn_cast<Instruction>(U.getUser()); 914 if (!I) 915 continue; 916 if (isKernelLDS(I->getFunction())) 917 continue; 918 919 replaceUseWithTableLookup(M, Builder, table, GV, U, nullptr); 920 } 921 } 922 } 923 return KernelToCreatedDynamicLDS; 924 } 925 926 bool runOnModule(Module &M) { 927 CallGraph CG = CallGraph(M); 928 bool Changed = superAlignLDSGlobals(M); 929 930 Changed |= eliminateConstantExprUsesOfLDSFromAllInstructions(M); 931 932 Changed = true; // todo: narrow this down 933 934 // For each kernel, what variables does it access directly or through 935 // callees 936 LDSUsesInfoTy LDSUsesInfo = getTransitiveUsesOfLDS(CG, M); 937 938 // For each variable accessed through callees, which kernels access it 939 VariableFunctionMap LDSToKernelsThatNeedToAccessItIndirectly; 940 for (auto &K : LDSUsesInfo.indirect_access) { 941 Function *F = K.first; 942 assert(isKernelLDS(F)); 943 for (GlobalVariable *GV : K.second) { 944 LDSToKernelsThatNeedToAccessItIndirectly[GV].insert(F); 945 } 946 } 947 948 // Partition variables accessed indirectly into the different strategies 949 DenseSet<GlobalVariable *> ModuleScopeVariables; 950 DenseSet<GlobalVariable *> TableLookupVariables; 951 DenseSet<GlobalVariable *> KernelAccessVariables; 952 DenseSet<GlobalVariable *> DynamicVariables; 953 partitionVariablesIntoIndirectStrategies( 954 M, LDSUsesInfo, LDSToKernelsThatNeedToAccessItIndirectly, 955 ModuleScopeVariables, TableLookupVariables, KernelAccessVariables, 956 DynamicVariables); 957 958 // If the kernel accesses a variable that is going to be stored in the 959 // module instance through a call then that kernel needs to allocate the 960 // module instance 961 const DenseSet<Function *> KernelsThatAllocateModuleLDS = 962 kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo, 963 ModuleScopeVariables); 964 const DenseSet<Function *> KernelsThatAllocateTableLDS = 965 kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo, 966 TableLookupVariables); 967 968 const DenseSet<Function *> KernelsThatIndirectlyAllocateDynamicLDS = 969 kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo, 970 DynamicVariables); 971 972 GlobalVariable *MaybeModuleScopeStruct = lowerModuleScopeStructVariables( 973 M, ModuleScopeVariables, KernelsThatAllocateModuleLDS); 974 975 DenseMap<Function *, LDSVariableReplacement> KernelToReplacement = 976 lowerKernelScopeStructVariables(M, LDSUsesInfo, ModuleScopeVariables, 977 KernelsThatAllocateModuleLDS, 978 MaybeModuleScopeStruct); 979 980 // Lower zero cost accesses to the kernel instances just created 981 for (auto &GV : KernelAccessVariables) { 982 auto &funcs = LDSToKernelsThatNeedToAccessItIndirectly[GV]; 983 assert(funcs.size() == 1); // Only one kernel can access it 984 LDSVariableReplacement Replacement = 985 KernelToReplacement[*(funcs.begin())]; 986 987 DenseSet<GlobalVariable *> Vec; 988 Vec.insert(GV); 989 990 replaceLDSVariablesWithStruct(M, Vec, Replacement, [](Use &U) { 991 return isa<Instruction>(U.getUser()); 992 }); 993 } 994 995 // The ith element of this vector is kernel id i 996 std::vector<Function *> OrderedKernels = 997 assignLDSKernelIDToEachKernel(&M, KernelsThatAllocateTableLDS, 998 KernelsThatIndirectlyAllocateDynamicLDS); 999 1000 if (!KernelsThatAllocateTableLDS.empty()) { 1001 LLVMContext &Ctx = M.getContext(); 1002 IRBuilder<> Builder(Ctx); 1003 1004 // The order must be consistent between lookup table and accesses to 1005 // lookup table 1006 auto TableLookupVariablesOrdered = 1007 sortByName(std::vector<GlobalVariable *>(TableLookupVariables.begin(), 1008 TableLookupVariables.end())); 1009 1010 GlobalVariable *LookupTable = buildLookupTable( 1011 M, TableLookupVariablesOrdered, OrderedKernels, KernelToReplacement); 1012 replaceUsesInInstructionsWithTableLookup(M, TableLookupVariablesOrdered, 1013 LookupTable); 1014 1015 // Strip amdgpu-no-lds-kernel-id from all functions reachable from the 1016 // kernel. We may have inferred this wasn't used prior to the pass. 1017 // 1018 // TODO: We could filter out subgraphs that do not access LDS globals. 1019 for (Function *F : KernelsThatAllocateTableLDS) 1020 removeFnAttrFromReachable(CG, F, {"amdgpu-no-lds-kernel-id"}); 1021 } 1022 1023 DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS = 1024 lowerDynamicLDSVariables(M, LDSUsesInfo, 1025 KernelsThatIndirectlyAllocateDynamicLDS, 1026 DynamicVariables, OrderedKernels); 1027 1028 // All kernel frames have been allocated. Calculate and record the 1029 // addresses. 1030 { 1031 const DataLayout &DL = M.getDataLayout(); 1032 1033 for (Function &Func : M.functions()) { 1034 if (Func.isDeclaration() || !isKernelLDS(&Func)) 1035 continue; 1036 1037 // All three of these are optional. The first variable is allocated at 1038 // zero. They are allocated by AMDGPUMachineFunction as one block. 1039 // Layout: 1040 //{ 1041 // module.lds 1042 // alignment padding 1043 // kernel instance 1044 // alignment padding 1045 // dynamic lds variables 1046 //} 1047 1048 const bool AllocateModuleScopeStruct = 1049 MaybeModuleScopeStruct && 1050 KernelsThatAllocateModuleLDS.contains(&Func); 1051 1052 auto Replacement = KernelToReplacement.find(&Func); 1053 const bool AllocateKernelScopeStruct = 1054 Replacement != KernelToReplacement.end(); 1055 1056 const bool AllocateDynamicVariable = 1057 KernelToCreatedDynamicLDS.contains(&Func); 1058 1059 uint32_t Offset = 0; 1060 1061 if (AllocateModuleScopeStruct) { 1062 // Allocated at zero, recorded once on construction, not once per 1063 // kernel 1064 Offset += DL.getTypeAllocSize(MaybeModuleScopeStruct->getValueType()); 1065 } 1066 1067 if (AllocateKernelScopeStruct) { 1068 GlobalVariable *KernelStruct = Replacement->second.SGV; 1069 Offset = alignTo(Offset, AMDGPU::getAlign(DL, KernelStruct)); 1070 recordLDSAbsoluteAddress(&M, KernelStruct, Offset); 1071 Offset += DL.getTypeAllocSize(KernelStruct->getValueType()); 1072 } 1073 1074 // If there is dynamic allocation, the alignment needed is included in 1075 // the static frame size. There may be no reference to the dynamic 1076 // variable in the kernel itself, so without including it here, that 1077 // alignment padding could be missed. 1078 if (AllocateDynamicVariable) { 1079 GlobalVariable *DynamicVariable = KernelToCreatedDynamicLDS[&Func]; 1080 Offset = alignTo(Offset, AMDGPU::getAlign(DL, DynamicVariable)); 1081 recordLDSAbsoluteAddress(&M, DynamicVariable, Offset); 1082 } 1083 1084 if (Offset != 0) { 1085 (void)TM; // TODO: Account for target maximum LDS 1086 std::string Buffer; 1087 raw_string_ostream SS{Buffer}; 1088 SS << format("%u", Offset); 1089 1090 // Instead of explictly marking kernels that access dynamic variables 1091 // using special case metadata, annotate with min-lds == max-lds, i.e. 1092 // that there is no more space available for allocating more static 1093 // LDS variables. That is the right condition to prevent allocating 1094 // more variables which would collide with the addresses assigned to 1095 // dynamic variables. 1096 if (AllocateDynamicVariable) 1097 SS << format(",%u", Offset); 1098 1099 Func.addFnAttr("amdgpu-lds-size", Buffer); 1100 } 1101 } 1102 } 1103 1104 for (auto &GV : make_early_inc_range(M.globals())) 1105 if (AMDGPU::isLDSVariableToLower(GV)) { 1106 // probably want to remove from used lists 1107 GV.removeDeadConstantUsers(); 1108 if (GV.use_empty()) 1109 GV.eraseFromParent(); 1110 } 1111 1112 return Changed; 1113 } 1114 1115 private: 1116 // Increase the alignment of LDS globals if necessary to maximise the chance 1117 // that we can use aligned LDS instructions to access them. 1118 static bool superAlignLDSGlobals(Module &M) { 1119 const DataLayout &DL = M.getDataLayout(); 1120 bool Changed = false; 1121 if (!SuperAlignLDSGlobals) { 1122 return Changed; 1123 } 1124 1125 for (auto &GV : M.globals()) { 1126 if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) { 1127 // Only changing alignment of LDS variables 1128 continue; 1129 } 1130 if (!GV.hasInitializer()) { 1131 // cuda/hip extern __shared__ variable, leave alignment alone 1132 continue; 1133 } 1134 1135 Align Alignment = AMDGPU::getAlign(DL, &GV); 1136 TypeSize GVSize = DL.getTypeAllocSize(GV.getValueType()); 1137 1138 if (GVSize > 8) { 1139 // We might want to use a b96 or b128 load/store 1140 Alignment = std::max(Alignment, Align(16)); 1141 } else if (GVSize > 4) { 1142 // We might want to use a b64 load/store 1143 Alignment = std::max(Alignment, Align(8)); 1144 } else if (GVSize > 2) { 1145 // We might want to use a b32 load/store 1146 Alignment = std::max(Alignment, Align(4)); 1147 } else if (GVSize > 1) { 1148 // We might want to use a b16 load/store 1149 Alignment = std::max(Alignment, Align(2)); 1150 } 1151 1152 if (Alignment != AMDGPU::getAlign(DL, &GV)) { 1153 Changed = true; 1154 GV.setAlignment(Alignment); 1155 } 1156 } 1157 return Changed; 1158 } 1159 1160 static LDSVariableReplacement createLDSVariableReplacement( 1161 Module &M, std::string VarName, 1162 DenseSet<GlobalVariable *> const &LDSVarsToTransform) { 1163 // Create a struct instance containing LDSVarsToTransform and map from those 1164 // variables to ConstantExprGEP 1165 // Variables may be introduced to meet alignment requirements. No aliasing 1166 // metadata is useful for these as they have no uses. Erased before return. 1167 1168 LLVMContext &Ctx = M.getContext(); 1169 const DataLayout &DL = M.getDataLayout(); 1170 assert(!LDSVarsToTransform.empty()); 1171 1172 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 1173 LayoutFields.reserve(LDSVarsToTransform.size()); 1174 { 1175 // The order of fields in this struct depends on the order of 1176 // varables in the argument which varies when changing how they 1177 // are identified, leading to spurious test breakage. 1178 auto Sorted = sortByName(std::vector<GlobalVariable *>( 1179 LDSVarsToTransform.begin(), LDSVarsToTransform.end())); 1180 1181 for (GlobalVariable *GV : Sorted) { 1182 OptimizedStructLayoutField F(GV, 1183 DL.getTypeAllocSize(GV->getValueType()), 1184 AMDGPU::getAlign(DL, GV)); 1185 LayoutFields.emplace_back(F); 1186 } 1187 } 1188 1189 performOptimizedStructLayout(LayoutFields); 1190 1191 std::vector<GlobalVariable *> LocalVars; 1192 BitVector IsPaddingField; 1193 LocalVars.reserve(LDSVarsToTransform.size()); // will be at least this large 1194 IsPaddingField.reserve(LDSVarsToTransform.size()); 1195 { 1196 uint64_t CurrentOffset = 0; 1197 for (size_t I = 0; I < LayoutFields.size(); I++) { 1198 GlobalVariable *FGV = static_cast<GlobalVariable *>( 1199 const_cast<void *>(LayoutFields[I].Id)); 1200 Align DataAlign = LayoutFields[I].Alignment; 1201 1202 uint64_t DataAlignV = DataAlign.value(); 1203 if (uint64_t Rem = CurrentOffset % DataAlignV) { 1204 uint64_t Padding = DataAlignV - Rem; 1205 1206 // Append an array of padding bytes to meet alignment requested 1207 // Note (o + (a - (o % a)) ) % a == 0 1208 // (offset + Padding ) % align == 0 1209 1210 Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding); 1211 LocalVars.push_back(new GlobalVariable( 1212 M, ATy, false, GlobalValue::InternalLinkage, 1213 PoisonValue::get(ATy), "", nullptr, GlobalValue::NotThreadLocal, 1214 AMDGPUAS::LOCAL_ADDRESS, false)); 1215 IsPaddingField.push_back(true); 1216 CurrentOffset += Padding; 1217 } 1218 1219 LocalVars.push_back(FGV); 1220 IsPaddingField.push_back(false); 1221 CurrentOffset += LayoutFields[I].Size; 1222 } 1223 } 1224 1225 std::vector<Type *> LocalVarTypes; 1226 LocalVarTypes.reserve(LocalVars.size()); 1227 std::transform( 1228 LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes), 1229 [](const GlobalVariable *V) -> Type * { return V->getValueType(); }); 1230 1231 StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t"); 1232 1233 Align StructAlign = AMDGPU::getAlign(DL, LocalVars[0]); 1234 1235 GlobalVariable *SGV = new GlobalVariable( 1236 M, LDSTy, false, GlobalValue::InternalLinkage, PoisonValue::get(LDSTy), 1237 VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, 1238 false); 1239 SGV->setAlignment(StructAlign); 1240 1241 DenseMap<GlobalVariable *, Constant *> Map; 1242 Type *I32 = Type::getInt32Ty(Ctx); 1243 for (size_t I = 0; I < LocalVars.size(); I++) { 1244 GlobalVariable *GV = LocalVars[I]; 1245 Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)}; 1246 Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx, true); 1247 if (IsPaddingField[I]) { 1248 assert(GV->use_empty()); 1249 GV->eraseFromParent(); 1250 } else { 1251 Map[GV] = GEP; 1252 } 1253 } 1254 assert(Map.size() == LDSVarsToTransform.size()); 1255 return {SGV, std::move(Map)}; 1256 } 1257 1258 template <typename PredicateTy> 1259 static void replaceLDSVariablesWithStruct( 1260 Module &M, DenseSet<GlobalVariable *> const &LDSVarsToTransformArg, 1261 const LDSVariableReplacement &Replacement, PredicateTy Predicate) { 1262 LLVMContext &Ctx = M.getContext(); 1263 const DataLayout &DL = M.getDataLayout(); 1264 1265 // A hack... we need to insert the aliasing info in a predictable order for 1266 // lit tests. Would like to have them in a stable order already, ideally the 1267 // same order they get allocated, which might mean an ordered set container 1268 auto LDSVarsToTransform = sortByName(std::vector<GlobalVariable *>( 1269 LDSVarsToTransformArg.begin(), LDSVarsToTransformArg.end())); 1270 1271 // Create alias.scope and their lists. Each field in the new structure 1272 // does not alias with all other fields. 1273 SmallVector<MDNode *> AliasScopes; 1274 SmallVector<Metadata *> NoAliasList; 1275 const size_t NumberVars = LDSVarsToTransform.size(); 1276 if (NumberVars > 1) { 1277 MDBuilder MDB(Ctx); 1278 AliasScopes.reserve(NumberVars); 1279 MDNode *Domain = MDB.createAnonymousAliasScopeDomain(); 1280 for (size_t I = 0; I < NumberVars; I++) { 1281 MDNode *Scope = MDB.createAnonymousAliasScope(Domain); 1282 AliasScopes.push_back(Scope); 1283 } 1284 NoAliasList.append(&AliasScopes[1], AliasScopes.end()); 1285 } 1286 1287 // Replace uses of ith variable with a constantexpr to the corresponding 1288 // field of the instance that will be allocated by AMDGPUMachineFunction 1289 for (size_t I = 0; I < NumberVars; I++) { 1290 GlobalVariable *GV = LDSVarsToTransform[I]; 1291 Constant *GEP = Replacement.LDSVarsToConstantGEP.at(GV); 1292 1293 GV->replaceUsesWithIf(GEP, Predicate); 1294 1295 APInt APOff(DL.getIndexTypeSizeInBits(GEP->getType()), 0); 1296 GEP->stripAndAccumulateInBoundsConstantOffsets(DL, APOff); 1297 uint64_t Offset = APOff.getZExtValue(); 1298 1299 Align A = 1300 commonAlignment(Replacement.SGV->getAlign().valueOrOne(), Offset); 1301 1302 if (I) 1303 NoAliasList[I - 1] = AliasScopes[I - 1]; 1304 MDNode *NoAlias = 1305 NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList); 1306 MDNode *AliasScope = 1307 AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]}); 1308 1309 refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias); 1310 } 1311 } 1312 1313 static void refineUsesAlignmentAndAA(Value *Ptr, Align A, 1314 const DataLayout &DL, MDNode *AliasScope, 1315 MDNode *NoAlias, unsigned MaxDepth = 5) { 1316 if (!MaxDepth || (A == 1 && !AliasScope)) 1317 return; 1318 1319 for (User *U : Ptr->users()) { 1320 if (auto *I = dyn_cast<Instruction>(U)) { 1321 if (AliasScope && I->mayReadOrWriteMemory()) { 1322 MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope); 1323 AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope) 1324 : AliasScope); 1325 I->setMetadata(LLVMContext::MD_alias_scope, AS); 1326 1327 MDNode *NA = I->getMetadata(LLVMContext::MD_noalias); 1328 NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias); 1329 I->setMetadata(LLVMContext::MD_noalias, NA); 1330 } 1331 } 1332 1333 if (auto *LI = dyn_cast<LoadInst>(U)) { 1334 LI->setAlignment(std::max(A, LI->getAlign())); 1335 continue; 1336 } 1337 if (auto *SI = dyn_cast<StoreInst>(U)) { 1338 if (SI->getPointerOperand() == Ptr) 1339 SI->setAlignment(std::max(A, SI->getAlign())); 1340 continue; 1341 } 1342 if (auto *AI = dyn_cast<AtomicRMWInst>(U)) { 1343 // None of atomicrmw operations can work on pointers, but let's 1344 // check it anyway in case it will or we will process ConstantExpr. 1345 if (AI->getPointerOperand() == Ptr) 1346 AI->setAlignment(std::max(A, AI->getAlign())); 1347 continue; 1348 } 1349 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) { 1350 if (AI->getPointerOperand() == Ptr) 1351 AI->setAlignment(std::max(A, AI->getAlign())); 1352 continue; 1353 } 1354 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) { 1355 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType()); 1356 APInt Off(BitWidth, 0); 1357 if (GEP->getPointerOperand() == Ptr) { 1358 Align GA; 1359 if (GEP->accumulateConstantOffset(DL, Off)) 1360 GA = commonAlignment(A, Off.getLimitedValue()); 1361 refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias, 1362 MaxDepth - 1); 1363 } 1364 continue; 1365 } 1366 if (auto *I = dyn_cast<Instruction>(U)) { 1367 if (I->getOpcode() == Instruction::BitCast || 1368 I->getOpcode() == Instruction::AddrSpaceCast) 1369 refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1); 1370 } 1371 } 1372 } 1373 }; 1374 1375 class AMDGPULowerModuleLDSLegacy : public ModulePass { 1376 public: 1377 const AMDGPUTargetMachine *TM; 1378 static char ID; 1379 1380 AMDGPULowerModuleLDSLegacy(const AMDGPUTargetMachine *TM_ = nullptr) 1381 : ModulePass(ID), TM(TM_) { 1382 initializeAMDGPULowerModuleLDSLegacyPass(*PassRegistry::getPassRegistry()); 1383 } 1384 1385 void getAnalysisUsage(AnalysisUsage &AU) const override { 1386 if (!TM) 1387 AU.addRequired<TargetPassConfig>(); 1388 } 1389 1390 bool runOnModule(Module &M) override { 1391 if (!TM) { 1392 auto &TPC = getAnalysis<TargetPassConfig>(); 1393 TM = &TPC.getTM<AMDGPUTargetMachine>(); 1394 } 1395 1396 return AMDGPULowerModuleLDS(*TM).runOnModule(M); 1397 } 1398 }; 1399 1400 } // namespace 1401 char AMDGPULowerModuleLDSLegacy::ID = 0; 1402 1403 char &llvm::AMDGPULowerModuleLDSLegacyPassID = AMDGPULowerModuleLDSLegacy::ID; 1404 1405 INITIALIZE_PASS_BEGIN(AMDGPULowerModuleLDSLegacy, DEBUG_TYPE, 1406 "Lower uses of LDS variables from non-kernel functions", 1407 false, false) 1408 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 1409 INITIALIZE_PASS_END(AMDGPULowerModuleLDSLegacy, DEBUG_TYPE, 1410 "Lower uses of LDS variables from non-kernel functions", 1411 false, false) 1412 1413 ModulePass * 1414 llvm::createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM) { 1415 return new AMDGPULowerModuleLDSLegacy(TM); 1416 } 1417 1418 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M, 1419 ModuleAnalysisManager &) { 1420 return AMDGPULowerModuleLDS(TM).runOnModule(M) ? PreservedAnalyses::none() 1421 : PreservedAnalyses::all(); 1422 } 1423