1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass eliminates local data store, LDS, uses from non-kernel functions. 10 // LDS is contiguous memory allocated per kernel execution. 11 // 12 // Background. 13 // 14 // The programming model is global variables, or equivalently function local 15 // static variables, accessible from kernels or other functions. For uses from 16 // kernels this is straightforward - assign an integer to the kernel for the 17 // memory required by all the variables combined, allocate them within that. 18 // For uses from functions there are performance tradeoffs to choose between. 19 // 20 // This model means the GPU runtime can specify the amount of memory allocated. 21 // If this is more than the kernel assumed, the excess can be made available 22 // using a language specific feature, which IR represents as a variable with 23 // no initializer. This feature is referred to here as "Dynamic LDS" and is 24 // lowered slightly differently to the normal case. 25 // 26 // Consequences of this GPU feature: 27 // - memory is limited and exceeding it halts compilation 28 // - a global accessed by one kernel exists independent of other kernels 29 // - a global exists independent of simultaneous execution of the same kernel 30 // - the address of the global may be different from different kernels as they 31 // do not alias, which permits only allocating variables they use 32 // - if the address is allowed to differ, functions need help to find it 33 // 34 // Uses from kernels are implemented here by grouping them in a per-kernel 35 // struct instance. This duplicates the variables, accurately modelling their 36 // aliasing properties relative to a single global representation. It also 37 // permits control over alignment via padding. 38 // 39 // Uses from functions are more complicated and the primary purpose of this 40 // IR pass. Several different lowering are chosen between to meet requirements 41 // to avoid allocating any LDS where it is not necessary, as that impacts 42 // occupancy and may fail the compilation, while not imposing overhead on a 43 // feature whose primary advantage over global memory is performance. The basic 44 // design goal is to avoid one kernel imposing overhead on another. 45 // 46 // Implementation. 47 // 48 // LDS variables with constant annotation or non-undef initializer are passed 49 // through unchanged for simplification or error diagnostics in later passes. 50 // Non-undef initializers are not yet implemented for LDS. 51 // 52 // LDS variables that are always allocated at the same address can be found 53 // by lookup at that address. Otherwise runtime information/cost is required. 54 // 55 // The simplest strategy possible is to group all LDS variables in a single 56 // struct and allocate that struct in every kernel such that the original 57 // variables are always at the same address. LDS is however a limited resource 58 // so this strategy is unusable in practice. It is not implemented here. 59 // 60 // Strategy | Precise allocation | Zero runtime cost | General purpose | 61 // --------+--------------------+-------------------+-----------------+ 62 // Module | No | Yes | Yes | 63 // Table | Yes | No | Yes | 64 // Kernel | Yes | Yes | No | 65 // Hybrid | Yes | Partial | Yes | 66 // 67 // "Module" spends LDS memory to save cycles. "Table" spends cycles and global 68 // memory to save LDS. "Kernel" is as fast as kernel allocation but only works 69 // for variables that are known reachable from a single kernel. "Hybrid" picks 70 // between all three. When forced to choose between LDS and cycles we minimise 71 // LDS use. 72 73 // The "module" lowering implemented here finds LDS variables which are used by 74 // non-kernel functions and creates a new struct with a field for each of those 75 // LDS variables. Variables that are only used from kernels are excluded. 76 // Kernels that do not use this struct are annoteated with the attribute 77 // amdgpu-elide-module-lds which allows the back end to elide the allocation. 78 // 79 // The "table" lowering implemented here has three components. 80 // First kernels are assigned a unique integer identifier which is available in 81 // functions it calls through the intrinsic amdgcn_lds_kernel_id. The integer 82 // is passed through a specific SGPR, thus works with indirect calls. 83 // Second, each kernel allocates LDS variables independent of other kernels and 84 // writes the addresses it chose for each variable into an array in consistent 85 // order. If the kernel does not allocate a given variable, it writes undef to 86 // the corresponding array location. These arrays are written to a constant 87 // table in the order matching the kernel unique integer identifier. 88 // Third, uses from non-kernel functions are replaced with a table lookup using 89 // the intrinsic function to find the address of the variable. 90 // 91 // "Kernel" lowering is only applicable for variables that are unambiguously 92 // reachable from exactly one kernel. For those cases, accesses to the variable 93 // can be lowered to ConstantExpr address of a struct instance specific to that 94 // one kernel. This is zero cost in space and in compute. It will raise a fatal 95 // error on any variable that might be reachable from multiple kernels and is 96 // thus most easily used as part of the hybrid lowering strategy. 97 // 98 // Hybrid lowering is a mixture of the above. It uses the zero cost kernel 99 // lowering where it can. It lowers the variable accessed by the greatest 100 // number of kernels using the module strategy as that is free for the first 101 // variable. Any futher variables that can be lowered with the module strategy 102 // without incurring LDS memory overhead are. The remaining ones are lowered 103 // via table. 104 // 105 // Consequences 106 // - No heuristics or user controlled magic numbers, hybrid is the right choice 107 // - Kernels that don't use functions (or have had them all inlined) are not 108 // affected by any lowering for kernels that do. 109 // - Kernels that don't make indirect function calls are not affected by those 110 // that do. 111 // - Variables which are used by lots of kernels, e.g. those injected by a 112 // language runtime in most kernels, are expected to have no overhead 113 // - Implementations that instantiate templates per-kernel where those templates 114 // use LDS are expected to hit the "Kernel" lowering strategy 115 // - The runtime properties impose a cost in compiler implementation complexity 116 // 117 // Dynamic LDS implementation 118 // Dynamic LDS is lowered similarly to the "table" strategy above and uses the 119 // same intrinsic to identify which kernel is at the root of the dynamic call 120 // graph. This relies on the specified behaviour that all dynamic LDS variables 121 // alias one another, i.e. are at the same address, with respect to a given 122 // kernel. Therefore this pass creates new dynamic LDS variables for each kernel 123 // that allocates any dynamic LDS and builds a table of addresses out of those. 124 // The AMDGPUPromoteAlloca pass skips kernels that use dynamic LDS. 125 // The corresponding optimisation for "kernel" lowering where the table lookup 126 // is elided is not implemented. 127 // 128 // 129 // Implementation notes / limitations 130 // A single LDS global variable represents an instance per kernel that can reach 131 // said variables. This pass essentially specialises said variables per kernel. 132 // Handling ConstantExpr during the pass complicated this significantly so now 133 // all ConstantExpr uses of LDS variables are expanded to instructions. This 134 // may need amending when implementing non-undef initialisers. 135 // 136 // Lowering is split between this IR pass and the back end. This pass chooses 137 // where given variables should be allocated and marks them with metadata, 138 // MD_absolute_symbol. The backend places the variables in coincidentally the 139 // same location and raises a fatal error if something has gone awry. This works 140 // in practice because the only pass between this one and the backend that 141 // changes LDS is PromoteAlloca and the changes it makes do not conflict. 142 // 143 // Addresses are written to constant global arrays based on the same metadata. 144 // 145 // The backend lowers LDS variables in the order of traversal of the function. 146 // This is at odds with the deterministic layout required. The workaround is to 147 // allocate the fixed-address variables immediately upon starting the function 148 // where they can be placed as intended. This requires a means of mapping from 149 // the function to the variables that it allocates. For the module scope lds, 150 // this is via metadata indicating whether the variable is not required. If a 151 // pass deletes that metadata, a fatal error on disagreement with the absolute 152 // symbol metadata will occur. For kernel scope and dynamic, this is by _name_ 153 // correspondence between the function and the variable. It requires the 154 // kernel to have a name (which is only a limitation for tests in practice) and 155 // for nothing to rename the corresponding symbols. This is a hazard if the pass 156 // is run multiple times during debugging. Alternative schemes considered all 157 // involve bespoke metadata. 158 // 159 // If the name correspondence can be replaced, multiple distinct kernels that 160 // have the same memory layout can map to the same kernel id (as the address 161 // itself is handled by the absolute symbol metadata) and that will allow more 162 // uses of the "kernel" style faster lowering and reduce the size of the lookup 163 // tables. 164 // 165 // There is a test that checks this does not fire for a graphics shader. This 166 // lowering is expected to work for graphics if the isKernel test is changed. 167 // 168 // The current markUsedByKernel is sufficient for PromoteAlloca but is elided 169 // before codegen. Replacing this with an equivalent intrinsic which lasts until 170 // shortly after the machine function lowering of LDS would help break the name 171 // mapping. The other part needed is probably to amend PromoteAlloca to embed 172 // the LDS variables it creates in the same struct created here. That avoids the 173 // current hazard where a PromoteAlloca LDS variable might be allocated before 174 // the kernel scope (and thus error on the address check). Given a new invariant 175 // that no LDS variables exist outside of the structs managed here, and an 176 // intrinsic that lasts until after the LDS frame lowering, it should be 177 // possible to drop the name mapping and fold equivalent memory layouts. 178 // 179 //===----------------------------------------------------------------------===// 180 181 #include "AMDGPU.h" 182 #include "Utils/AMDGPUBaseInfo.h" 183 #include "Utils/AMDGPUMemoryUtils.h" 184 #include "llvm/ADT/BitVector.h" 185 #include "llvm/ADT/DenseMap.h" 186 #include "llvm/ADT/DenseSet.h" 187 #include "llvm/ADT/STLExtras.h" 188 #include "llvm/ADT/SetOperations.h" 189 #include "llvm/ADT/SetVector.h" 190 #include "llvm/Analysis/CallGraph.h" 191 #include "llvm/IR/Constants.h" 192 #include "llvm/IR/DerivedTypes.h" 193 #include "llvm/IR/IRBuilder.h" 194 #include "llvm/IR/InlineAsm.h" 195 #include "llvm/IR/Instructions.h" 196 #include "llvm/IR/IntrinsicsAMDGPU.h" 197 #include "llvm/IR/MDBuilder.h" 198 #include "llvm/IR/ReplaceConstant.h" 199 #include "llvm/InitializePasses.h" 200 #include "llvm/Pass.h" 201 #include "llvm/Support/CommandLine.h" 202 #include "llvm/Support/Debug.h" 203 #include "llvm/Support/OptimizedStructLayout.h" 204 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 205 #include "llvm/Transforms/Utils/ModuleUtils.h" 206 207 #include <tuple> 208 #include <vector> 209 210 #include <cstdio> 211 212 #define DEBUG_TYPE "amdgpu-lower-module-lds" 213 214 using namespace llvm; 215 216 namespace { 217 218 cl::opt<bool> SuperAlignLDSGlobals( 219 "amdgpu-super-align-lds-globals", 220 cl::desc("Increase alignment of LDS if it is not on align boundary"), 221 cl::init(true), cl::Hidden); 222 223 enum class LoweringKind { module, table, kernel, hybrid }; 224 cl::opt<LoweringKind> LoweringKindLoc( 225 "amdgpu-lower-module-lds-strategy", 226 cl::desc("Specify lowering strategy for function LDS access:"), cl::Hidden, 227 cl::init(LoweringKind::hybrid), 228 cl::values( 229 clEnumValN(LoweringKind::table, "table", "Lower via table lookup"), 230 clEnumValN(LoweringKind::module, "module", "Lower via module struct"), 231 clEnumValN( 232 LoweringKind::kernel, "kernel", 233 "Lower variables reachable from one kernel, otherwise abort"), 234 clEnumValN(LoweringKind::hybrid, "hybrid", 235 "Lower via mixture of above strategies"))); 236 237 bool isKernelLDS(const Function *F) { 238 // Some weirdness here. AMDGPU::isKernelCC does not call into 239 // AMDGPU::isKernel with the calling conv, it instead calls into 240 // isModuleEntryFunction which returns true for more calling conventions 241 // than AMDGPU::isKernel does. There's a FIXME on AMDGPU::isKernel. 242 // There's also a test that checks that the LDS lowering does not hit on 243 // a graphics shader, denoted amdgpu_ps, so stay with the limited case. 244 // Putting LDS in the name of the function to draw attention to this. 245 return AMDGPU::isKernel(F->getCallingConv()); 246 } 247 248 class AMDGPULowerModuleLDS : public ModulePass { 249 250 static void 251 removeLocalVarsFromUsedLists(Module &M, 252 const DenseSet<GlobalVariable *> &LocalVars) { 253 // The verifier rejects used lists containing an inttoptr of a constant 254 // so remove the variables from these lists before replaceAllUsesWith 255 SmallPtrSet<Constant *, 8> LocalVarsSet; 256 for (GlobalVariable *LocalVar : LocalVars) 257 LocalVarsSet.insert(cast<Constant>(LocalVar->stripPointerCasts())); 258 259 removeFromUsedLists( 260 M, [&LocalVarsSet](Constant *C) { return LocalVarsSet.count(C); }); 261 262 for (GlobalVariable *LocalVar : LocalVars) 263 LocalVar->removeDeadConstantUsers(); 264 } 265 266 static void markUsedByKernel(IRBuilder<> &Builder, Function *Func, 267 GlobalVariable *SGV) { 268 // The llvm.amdgcn.module.lds instance is implicitly used by all kernels 269 // that might call a function which accesses a field within it. This is 270 // presently approximated to 'all kernels' if there are any such functions 271 // in the module. This implicit use is redefined as an explicit use here so 272 // that later passes, specifically PromoteAlloca, account for the required 273 // memory without any knowledge of this transform. 274 275 // An operand bundle on llvm.donothing works because the call instruction 276 // survives until after the last pass that needs to account for LDS. It is 277 // better than inline asm as the latter survives until the end of codegen. A 278 // totally robust solution would be a function with the same semantics as 279 // llvm.donothing that takes a pointer to the instance and is lowered to a 280 // no-op after LDS is allocated, but that is not presently necessary. 281 282 // This intrinsic is eliminated shortly before instruction selection. It 283 // does not suffice to indicate to ISel that a given global which is not 284 // immediately used by the kernel must still be allocated by it. An 285 // equivalent target specific intrinsic which lasts until immediately after 286 // codegen would suffice for that, but one would still need to ensure that 287 // the variables are allocated in the anticpated order. 288 289 LLVMContext &Ctx = Func->getContext(); 290 291 Builder.SetInsertPoint(Func->getEntryBlock().getFirstNonPHI()); 292 293 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx), {}); 294 295 Function *Decl = 296 Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {}); 297 298 Value *UseInstance[1] = {Builder.CreateInBoundsGEP( 299 SGV->getValueType(), SGV, ConstantInt::get(Type::getInt32Ty(Ctx), 0))}; 300 301 Builder.CreateCall(FTy, Decl, {}, 302 {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)}, 303 ""); 304 } 305 306 static bool eliminateConstantExprUsesOfLDSFromAllInstructions(Module &M) { 307 // Constants are uniqued within LLVM. A ConstantExpr referring to a LDS 308 // global may have uses from multiple different functions as a result. 309 // This pass specialises LDS variables with respect to the kernel that 310 // allocates them. 311 312 // This is semantically equivalent to (the unimplemented as slow): 313 // for (auto &F : M.functions()) 314 // for (auto &BB : F) 315 // for (auto &I : BB) 316 // for (Use &Op : I.operands()) 317 // if (constantExprUsesLDS(Op)) 318 // replaceConstantExprInFunction(I, Op); 319 320 SmallVector<Constant *> LDSGlobals; 321 for (auto &GV : M.globals()) 322 if (AMDGPU::isLDSVariableToLower(GV)) 323 LDSGlobals.push_back(&GV); 324 325 return convertUsersOfConstantsToInstructions(LDSGlobals); 326 } 327 328 public: 329 static char ID; 330 331 AMDGPULowerModuleLDS() : ModulePass(ID) { 332 initializeAMDGPULowerModuleLDSPass(*PassRegistry::getPassRegistry()); 333 } 334 335 using FunctionVariableMap = DenseMap<Function *, DenseSet<GlobalVariable *>>; 336 337 using VariableFunctionMap = DenseMap<GlobalVariable *, DenseSet<Function *>>; 338 339 static void getUsesOfLDSByFunction(CallGraph const &CG, Module &M, 340 FunctionVariableMap &kernels, 341 FunctionVariableMap &functions) { 342 343 // Get uses from the current function, excluding uses by called functions 344 // Two output variables to avoid walking the globals list twice 345 for (auto &GV : M.globals()) { 346 if (!AMDGPU::isLDSVariableToLower(GV)) { 347 continue; 348 } 349 350 for (User *V : GV.users()) { 351 if (auto *I = dyn_cast<Instruction>(V)) { 352 Function *F = I->getFunction(); 353 if (isKernelLDS(F)) { 354 kernels[F].insert(&GV); 355 } else { 356 functions[F].insert(&GV); 357 } 358 } 359 } 360 } 361 } 362 363 struct LDSUsesInfoTy { 364 FunctionVariableMap direct_access; 365 FunctionVariableMap indirect_access; 366 }; 367 368 static LDSUsesInfoTy getTransitiveUsesOfLDS(CallGraph const &CG, Module &M) { 369 370 FunctionVariableMap direct_map_kernel; 371 FunctionVariableMap direct_map_function; 372 getUsesOfLDSByFunction(CG, M, direct_map_kernel, direct_map_function); 373 374 // Collect variables that are used by functions whose address has escaped 375 DenseSet<GlobalVariable *> VariablesReachableThroughFunctionPointer; 376 for (Function &F : M.functions()) { 377 if (!isKernelLDS(&F)) 378 if (F.hasAddressTaken(nullptr, 379 /* IgnoreCallbackUses */ false, 380 /* IgnoreAssumeLikeCalls */ false, 381 /* IgnoreLLVMUsed */ true, 382 /* IgnoreArcAttachedCall */ false)) { 383 set_union(VariablesReachableThroughFunctionPointer, 384 direct_map_function[&F]); 385 } 386 } 387 388 auto functionMakesUnknownCall = [&](const Function *F) -> bool { 389 assert(!F->isDeclaration()); 390 for (CallGraphNode::CallRecord R : *CG[F]) { 391 if (!R.second->getFunction()) { 392 return true; 393 } 394 } 395 return false; 396 }; 397 398 // Work out which variables are reachable through function calls 399 FunctionVariableMap transitive_map_function = direct_map_function; 400 401 // If the function makes any unknown call, assume the worst case that it can 402 // access all variables accessed by functions whose address escaped 403 for (Function &F : M.functions()) { 404 if (!F.isDeclaration() && functionMakesUnknownCall(&F)) { 405 if (!isKernelLDS(&F)) { 406 set_union(transitive_map_function[&F], 407 VariablesReachableThroughFunctionPointer); 408 } 409 } 410 } 411 412 // Direct implementation of collecting all variables reachable from each 413 // function 414 for (Function &Func : M.functions()) { 415 if (Func.isDeclaration() || isKernelLDS(&Func)) 416 continue; 417 418 DenseSet<Function *> seen; // catches cycles 419 SmallVector<Function *, 4> wip{&Func}; 420 421 while (!wip.empty()) { 422 Function *F = wip.pop_back_val(); 423 424 // Can accelerate this by referring to transitive map for functions that 425 // have already been computed, with more care than this 426 set_union(transitive_map_function[&Func], direct_map_function[F]); 427 428 for (CallGraphNode::CallRecord R : *CG[F]) { 429 Function *ith = R.second->getFunction(); 430 if (ith) { 431 if (!seen.contains(ith)) { 432 seen.insert(ith); 433 wip.push_back(ith); 434 } 435 } 436 } 437 } 438 } 439 440 // direct_map_kernel lists which variables are used by the kernel 441 // find the variables which are used through a function call 442 FunctionVariableMap indirect_map_kernel; 443 444 for (Function &Func : M.functions()) { 445 if (Func.isDeclaration() || !isKernelLDS(&Func)) 446 continue; 447 448 for (CallGraphNode::CallRecord R : *CG[&Func]) { 449 Function *ith = R.second->getFunction(); 450 if (ith) { 451 set_union(indirect_map_kernel[&Func], transitive_map_function[ith]); 452 } else { 453 set_union(indirect_map_kernel[&Func], 454 VariablesReachableThroughFunctionPointer); 455 } 456 } 457 } 458 459 return {std::move(direct_map_kernel), std::move(indirect_map_kernel)}; 460 } 461 462 struct LDSVariableReplacement { 463 GlobalVariable *SGV = nullptr; 464 DenseMap<GlobalVariable *, Constant *> LDSVarsToConstantGEP; 465 }; 466 467 // remap from lds global to a constantexpr gep to where it has been moved to 468 // for each kernel 469 // an array with an element for each kernel containing where the corresponding 470 // variable was remapped to 471 472 static Constant *getAddressesOfVariablesInKernel( 473 LLVMContext &Ctx, ArrayRef<GlobalVariable *> Variables, 474 DenseMap<GlobalVariable *, Constant *> &LDSVarsToConstantGEP) { 475 // Create a ConstantArray containing the address of each Variable within the 476 // kernel corresponding to LDSVarsToConstantGEP, or poison if that kernel 477 // does not allocate it 478 // TODO: Drop the ptrtoint conversion 479 480 Type *I32 = Type::getInt32Ty(Ctx); 481 482 ArrayType *KernelOffsetsType = ArrayType::get(I32, Variables.size()); 483 484 SmallVector<Constant *> Elements; 485 for (size_t i = 0; i < Variables.size(); i++) { 486 GlobalVariable *GV = Variables[i]; 487 if (LDSVarsToConstantGEP.count(GV) != 0) { 488 auto elt = ConstantExpr::getPtrToInt(LDSVarsToConstantGEP[GV], I32); 489 Elements.push_back(elt); 490 } else { 491 Elements.push_back(PoisonValue::get(I32)); 492 } 493 } 494 return ConstantArray::get(KernelOffsetsType, Elements); 495 } 496 497 static GlobalVariable *buildLookupTable( 498 Module &M, ArrayRef<GlobalVariable *> Variables, 499 ArrayRef<Function *> kernels, 500 DenseMap<Function *, LDSVariableReplacement> &KernelToReplacement) { 501 if (Variables.empty()) { 502 return nullptr; 503 } 504 LLVMContext &Ctx = M.getContext(); 505 506 const size_t NumberVariables = Variables.size(); 507 const size_t NumberKernels = kernels.size(); 508 509 ArrayType *KernelOffsetsType = 510 ArrayType::get(Type::getInt32Ty(Ctx), NumberVariables); 511 512 ArrayType *AllKernelsOffsetsType = 513 ArrayType::get(KernelOffsetsType, NumberKernels); 514 515 std::vector<Constant *> overallConstantExprElts(NumberKernels); 516 for (size_t i = 0; i < NumberKernels; i++) { 517 LDSVariableReplacement Replacement = KernelToReplacement[kernels[i]]; 518 overallConstantExprElts[i] = getAddressesOfVariablesInKernel( 519 Ctx, Variables, Replacement.LDSVarsToConstantGEP); 520 } 521 522 Constant *init = 523 ConstantArray::get(AllKernelsOffsetsType, overallConstantExprElts); 524 525 return new GlobalVariable( 526 M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, init, 527 "llvm.amdgcn.lds.offset.table", nullptr, GlobalValue::NotThreadLocal, 528 AMDGPUAS::CONSTANT_ADDRESS); 529 } 530 531 void replaceUseWithTableLookup(Module &M, IRBuilder<> &Builder, 532 GlobalVariable *LookupTable, 533 GlobalVariable *GV, Use &U, 534 Value *OptionalIndex) { 535 // Table is a constant array of the same length as OrderedKernels 536 LLVMContext &Ctx = M.getContext(); 537 Type *I32 = Type::getInt32Ty(Ctx); 538 auto *I = cast<Instruction>(U.getUser()); 539 540 Value *tableKernelIndex = getTableLookupKernelIndex(M, I->getFunction()); 541 542 if (auto *Phi = dyn_cast<PHINode>(I)) { 543 BasicBlock *BB = Phi->getIncomingBlock(U); 544 Builder.SetInsertPoint(&(*(BB->getFirstInsertionPt()))); 545 } else { 546 Builder.SetInsertPoint(I); 547 } 548 549 SmallVector<Value *, 3> GEPIdx = { 550 ConstantInt::get(I32, 0), 551 tableKernelIndex, 552 }; 553 if (OptionalIndex) 554 GEPIdx.push_back(OptionalIndex); 555 556 Value *Address = Builder.CreateInBoundsGEP( 557 LookupTable->getValueType(), LookupTable, GEPIdx, GV->getName()); 558 559 Value *loaded = Builder.CreateLoad(I32, Address); 560 561 Value *replacement = 562 Builder.CreateIntToPtr(loaded, GV->getType(), GV->getName()); 563 564 U.set(replacement); 565 } 566 567 void replaceUsesInInstructionsWithTableLookup( 568 Module &M, ArrayRef<GlobalVariable *> ModuleScopeVariables, 569 GlobalVariable *LookupTable) { 570 571 LLVMContext &Ctx = M.getContext(); 572 IRBuilder<> Builder(Ctx); 573 Type *I32 = Type::getInt32Ty(Ctx); 574 575 for (size_t Index = 0; Index < ModuleScopeVariables.size(); Index++) { 576 auto *GV = ModuleScopeVariables[Index]; 577 578 for (Use &U : make_early_inc_range(GV->uses())) { 579 auto *I = dyn_cast<Instruction>(U.getUser()); 580 if (!I) 581 continue; 582 583 replaceUseWithTableLookup(M, Builder, LookupTable, GV, U, 584 ConstantInt::get(I32, Index)); 585 } 586 } 587 } 588 589 static DenseSet<Function *> kernelsThatIndirectlyAccessAnyOfPassedVariables( 590 Module &M, LDSUsesInfoTy &LDSUsesInfo, 591 DenseSet<GlobalVariable *> const &VariableSet) { 592 593 DenseSet<Function *> KernelSet; 594 595 if (VariableSet.empty()) 596 return KernelSet; 597 598 for (Function &Func : M.functions()) { 599 if (Func.isDeclaration() || !isKernelLDS(&Func)) 600 continue; 601 for (GlobalVariable *GV : LDSUsesInfo.indirect_access[&Func]) { 602 if (VariableSet.contains(GV)) { 603 KernelSet.insert(&Func); 604 break; 605 } 606 } 607 } 608 609 return KernelSet; 610 } 611 612 static GlobalVariable * 613 chooseBestVariableForModuleStrategy(const DataLayout &DL, 614 VariableFunctionMap &LDSVars) { 615 // Find the global variable with the most indirect uses from kernels 616 617 struct CandidateTy { 618 GlobalVariable *GV = nullptr; 619 size_t UserCount = 0; 620 size_t Size = 0; 621 622 CandidateTy() = default; 623 624 CandidateTy(GlobalVariable *GV, uint64_t UserCount, uint64_t AllocSize) 625 : GV(GV), UserCount(UserCount), Size(AllocSize) {} 626 627 bool operator<(const CandidateTy &Other) const { 628 // Fewer users makes module scope variable less attractive 629 if (UserCount < Other.UserCount) { 630 return true; 631 } 632 if (UserCount > Other.UserCount) { 633 return false; 634 } 635 636 // Bigger makes module scope variable less attractive 637 if (Size < Other.Size) { 638 return false; 639 } 640 641 if (Size > Other.Size) { 642 return true; 643 } 644 645 // Arbitrary but consistent 646 return GV->getName() < Other.GV->getName(); 647 } 648 }; 649 650 CandidateTy MostUsed; 651 652 for (auto &K : LDSVars) { 653 GlobalVariable *GV = K.first; 654 if (K.second.size() <= 1) { 655 // A variable reachable by only one kernel is best lowered with kernel 656 // strategy 657 continue; 658 } 659 CandidateTy Candidate( 660 GV, K.second.size(), 661 DL.getTypeAllocSize(GV->getValueType()).getFixedValue()); 662 if (MostUsed < Candidate) 663 MostUsed = Candidate; 664 } 665 666 return MostUsed.GV; 667 } 668 669 static void recordLDSAbsoluteAddress(Module *M, GlobalVariable *GV, 670 uint32_t Address) { 671 // Write the specified address into metadata where it can be retrieved by 672 // the assembler. Format is a half open range, [Address Address+1) 673 LLVMContext &Ctx = M->getContext(); 674 auto *IntTy = 675 M->getDataLayout().getIntPtrType(Ctx, AMDGPUAS::LOCAL_ADDRESS); 676 auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address)); 677 auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address + 1)); 678 GV->setMetadata(LLVMContext::MD_absolute_symbol, 679 MDNode::get(Ctx, {MinC, MaxC})); 680 } 681 682 DenseMap<Function *, Value *> tableKernelIndexCache; 683 Value *getTableLookupKernelIndex(Module &M, Function *F) { 684 // Accesses from a function use the amdgcn_lds_kernel_id intrinsic which 685 // lowers to a read from a live in register. Emit it once in the entry 686 // block to spare deduplicating it later. 687 if (tableKernelIndexCache.count(F) == 0) { 688 LLVMContext &Ctx = M.getContext(); 689 IRBuilder<> Builder(Ctx); 690 FunctionType *FTy = FunctionType::get(Type::getInt32Ty(Ctx), {}); 691 Function *Decl = 692 Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_lds_kernel_id, {}); 693 694 BasicBlock::iterator it = 695 F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca(); 696 Instruction &i = *it; 697 Builder.SetInsertPoint(&i); 698 699 tableKernelIndexCache[F] = Builder.CreateCall(FTy, Decl, {}); 700 } 701 702 return tableKernelIndexCache[F]; 703 } 704 705 static std::vector<Function *> assignLDSKernelIDToEachKernel( 706 Module *M, DenseSet<Function *> const &KernelsThatAllocateTableLDS, 707 DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS) { 708 // Associate kernels in the set with an arbirary but reproducible order and 709 // annotate them with that order in metadata. This metadata is recognised by 710 // the backend and lowered to a SGPR which can be read from using 711 // amdgcn_lds_kernel_id. 712 713 std::vector<Function *> OrderedKernels; 714 if (!KernelsThatAllocateTableLDS.empty() || 715 !KernelsThatIndirectlyAllocateDynamicLDS.empty()) { 716 717 for (Function &Func : M->functions()) { 718 if (Func.isDeclaration()) 719 continue; 720 if (!isKernelLDS(&Func)) 721 continue; 722 723 if (KernelsThatAllocateTableLDS.contains(&Func) || 724 KernelsThatIndirectlyAllocateDynamicLDS.contains(&Func)) { 725 assert(Func.hasName()); // else fatal error earlier 726 OrderedKernels.push_back(&Func); 727 } 728 } 729 730 // Put them in an arbitrary but reproducible order 731 llvm::sort(OrderedKernels.begin(), OrderedKernels.end(), 732 [](const Function *lhs, const Function *rhs) -> bool { 733 return lhs->getName() < rhs->getName(); 734 }); 735 736 // Annotate the kernels with their order in this vector 737 LLVMContext &Ctx = M->getContext(); 738 IRBuilder<> Builder(Ctx); 739 740 if (OrderedKernels.size() > UINT32_MAX) { 741 // 32 bit keeps it in one SGPR. > 2**32 kernels won't fit on the GPU 742 report_fatal_error("Unimplemented LDS lowering for > 2**32 kernels"); 743 } 744 745 for (size_t i = 0; i < OrderedKernels.size(); i++) { 746 Metadata *AttrMDArgs[1] = { 747 ConstantAsMetadata::get(Builder.getInt32(i)), 748 }; 749 OrderedKernels[i]->setMetadata("llvm.amdgcn.lds.kernel.id", 750 MDNode::get(Ctx, AttrMDArgs)); 751 } 752 } 753 return OrderedKernels; 754 } 755 756 static void partitionVariablesIntoIndirectStrategies( 757 Module &M, LDSUsesInfoTy const &LDSUsesInfo, 758 VariableFunctionMap &LDSToKernelsThatNeedToAccessItIndirectly, 759 DenseSet<GlobalVariable *> &ModuleScopeVariables, 760 DenseSet<GlobalVariable *> &TableLookupVariables, 761 DenseSet<GlobalVariable *> &KernelAccessVariables, 762 DenseSet<GlobalVariable *> &DynamicVariables) { 763 764 GlobalVariable *HybridModuleRoot = 765 LoweringKindLoc != LoweringKind::hybrid 766 ? nullptr 767 : chooseBestVariableForModuleStrategy( 768 M.getDataLayout(), LDSToKernelsThatNeedToAccessItIndirectly); 769 770 DenseSet<Function *> const EmptySet; 771 DenseSet<Function *> const &HybridModuleRootKernels = 772 HybridModuleRoot 773 ? LDSToKernelsThatNeedToAccessItIndirectly[HybridModuleRoot] 774 : EmptySet; 775 776 for (auto &K : LDSToKernelsThatNeedToAccessItIndirectly) { 777 // Each iteration of this loop assigns exactly one global variable to 778 // exactly one of the implementation strategies. 779 780 GlobalVariable *GV = K.first; 781 assert(AMDGPU::isLDSVariableToLower(*GV)); 782 assert(K.second.size() != 0); 783 784 if (AMDGPU::isDynamicLDS(*GV)) { 785 DynamicVariables.insert(GV); 786 continue; 787 } 788 789 switch (LoweringKindLoc) { 790 case LoweringKind::module: 791 ModuleScopeVariables.insert(GV); 792 break; 793 794 case LoweringKind::table: 795 TableLookupVariables.insert(GV); 796 break; 797 798 case LoweringKind::kernel: 799 if (K.second.size() == 1) { 800 KernelAccessVariables.insert(GV); 801 } else { 802 report_fatal_error( 803 "cannot lower LDS '" + GV->getName() + 804 "' to kernel access as it is reachable from multiple kernels"); 805 } 806 break; 807 808 case LoweringKind::hybrid: { 809 if (GV == HybridModuleRoot) { 810 assert(K.second.size() != 1); 811 ModuleScopeVariables.insert(GV); 812 } else if (K.second.size() == 1) { 813 KernelAccessVariables.insert(GV); 814 } else if (set_is_subset(K.second, HybridModuleRootKernels)) { 815 ModuleScopeVariables.insert(GV); 816 } else { 817 TableLookupVariables.insert(GV); 818 } 819 break; 820 } 821 } 822 } 823 824 // All LDS variables accessed indirectly have now been partitioned into 825 // the distinct lowering strategies. 826 assert(ModuleScopeVariables.size() + TableLookupVariables.size() + 827 KernelAccessVariables.size() + DynamicVariables.size() == 828 LDSToKernelsThatNeedToAccessItIndirectly.size()); 829 } 830 831 static GlobalVariable *lowerModuleScopeStructVariables( 832 Module &M, DenseSet<GlobalVariable *> const &ModuleScopeVariables, 833 DenseSet<Function *> const &KernelsThatAllocateModuleLDS) { 834 // Create a struct to hold the ModuleScopeVariables 835 // Replace all uses of those variables from non-kernel functions with the 836 // new struct instance Replace only the uses from kernel functions that will 837 // allocate this instance. That is a space optimisation - kernels that use a 838 // subset of the module scope struct and do not need to allocate it for 839 // indirect calls will only allocate the subset they use (they do so as part 840 // of the per-kernel lowering). 841 if (ModuleScopeVariables.empty()) { 842 return nullptr; 843 } 844 845 LLVMContext &Ctx = M.getContext(); 846 847 LDSVariableReplacement ModuleScopeReplacement = 848 createLDSVariableReplacement(M, "llvm.amdgcn.module.lds", 849 ModuleScopeVariables); 850 851 appendToCompilerUsed(M, {static_cast<GlobalValue *>( 852 ConstantExpr::getPointerBitCastOrAddrSpaceCast( 853 cast<Constant>(ModuleScopeReplacement.SGV), 854 Type::getInt8PtrTy(Ctx)))}); 855 856 // module.lds will be allocated at zero in any kernel that allocates it 857 recordLDSAbsoluteAddress(&M, ModuleScopeReplacement.SGV, 0); 858 859 // historic 860 removeLocalVarsFromUsedLists(M, ModuleScopeVariables); 861 862 // Replace all uses of module scope variable from non-kernel functions 863 replaceLDSVariablesWithStruct( 864 M, ModuleScopeVariables, ModuleScopeReplacement, [&](Use &U) { 865 Instruction *I = dyn_cast<Instruction>(U.getUser()); 866 if (!I) { 867 return false; 868 } 869 Function *F = I->getFunction(); 870 return !isKernelLDS(F); 871 }); 872 873 // Replace uses of module scope variable from kernel functions that 874 // allocate the module scope variable, otherwise leave them unchanged 875 // Record on each kernel whether the module scope global is used by it 876 877 IRBuilder<> Builder(Ctx); 878 879 for (Function &Func : M.functions()) { 880 if (Func.isDeclaration() || !isKernelLDS(&Func)) 881 continue; 882 883 if (KernelsThatAllocateModuleLDS.contains(&Func)) { 884 replaceLDSVariablesWithStruct( 885 M, ModuleScopeVariables, ModuleScopeReplacement, [&](Use &U) { 886 Instruction *I = dyn_cast<Instruction>(U.getUser()); 887 if (!I) { 888 return false; 889 } 890 Function *F = I->getFunction(); 891 return F == &Func; 892 }); 893 894 markUsedByKernel(Builder, &Func, ModuleScopeReplacement.SGV); 895 896 } else { 897 markElideModuleLDS(Func); 898 } 899 } 900 901 return ModuleScopeReplacement.SGV; 902 } 903 904 static DenseMap<Function *, LDSVariableReplacement> 905 lowerKernelScopeStructVariables( 906 Module &M, LDSUsesInfoTy &LDSUsesInfo, 907 DenseSet<GlobalVariable *> const &ModuleScopeVariables, 908 DenseSet<Function *> const &KernelsThatAllocateModuleLDS, 909 GlobalVariable *MaybeModuleScopeStruct) { 910 911 // Create a struct for each kernel for the non-module-scope variables. 912 913 DenseMap<Function *, LDSVariableReplacement> KernelToReplacement; 914 for (Function &Func : M.functions()) { 915 if (Func.isDeclaration() || !isKernelLDS(&Func)) 916 continue; 917 918 DenseSet<GlobalVariable *> KernelUsedVariables; 919 // Allocating variables that are used directly in this struct to get 920 // alignment aware allocation and predictable frame size. 921 for (auto &v : LDSUsesInfo.direct_access[&Func]) { 922 if (!AMDGPU::isDynamicLDS(*v)) { 923 KernelUsedVariables.insert(v); 924 } 925 } 926 927 // Allocating variables that are accessed indirectly so that a lookup of 928 // this struct instance can find them from nested functions. 929 for (auto &v : LDSUsesInfo.indirect_access[&Func]) { 930 if (!AMDGPU::isDynamicLDS(*v)) { 931 KernelUsedVariables.insert(v); 932 } 933 } 934 935 // Variables allocated in module lds must all resolve to that struct, 936 // not to the per-kernel instance. 937 if (KernelsThatAllocateModuleLDS.contains(&Func)) { 938 for (GlobalVariable *v : ModuleScopeVariables) { 939 KernelUsedVariables.erase(v); 940 } 941 } 942 943 if (KernelUsedVariables.empty()) { 944 // Either used no LDS, or the LDS it used was all in the module struct 945 // or dynamically sized 946 continue; 947 } 948 949 // The association between kernel function and LDS struct is done by 950 // symbol name, which only works if the function in question has a 951 // name This is not expected to be a problem in practice as kernels 952 // are called by name making anonymous ones (which are named by the 953 // backend) difficult to use. This does mean that llvm test cases need 954 // to name the kernels. 955 if (!Func.hasName()) { 956 report_fatal_error("Anonymous kernels cannot use LDS variables"); 957 } 958 959 std::string VarName = 960 (Twine("llvm.amdgcn.kernel.") + Func.getName() + ".lds").str(); 961 962 auto Replacement = 963 createLDSVariableReplacement(M, VarName, KernelUsedVariables); 964 965 // remove preserves existing codegen 966 removeLocalVarsFromUsedLists(M, KernelUsedVariables); 967 KernelToReplacement[&Func] = Replacement; 968 969 // Rewrite uses within kernel to the new struct 970 replaceLDSVariablesWithStruct( 971 M, KernelUsedVariables, Replacement, [&Func](Use &U) { 972 Instruction *I = dyn_cast<Instruction>(U.getUser()); 973 return I && I->getFunction() == &Func; 974 }); 975 } 976 return KernelToReplacement; 977 } 978 979 static GlobalVariable * 980 buildRepresentativeDynamicLDSInstance(Module &M, LDSUsesInfoTy &LDSUsesInfo, 981 Function *func) { 982 // Create a dynamic lds variable with a name associated with the passed 983 // function that has the maximum alignment of any dynamic lds variable 984 // reachable from this kernel. Dynamic LDS is allocated after the static LDS 985 // allocation, possibly after alignment padding. The representative variable 986 // created here has the maximum alignment of any other dynamic variable 987 // reachable by that kernel. All dynamic LDS variables are allocated at the 988 // same address in each kernel in order to provide the documented aliasing 989 // semantics. Setting the alignment here allows this IR pass to accurately 990 // predict the exact constant at which it will be allocated. 991 992 assert(isKernelLDS(func)); 993 994 LLVMContext &Ctx = M.getContext(); 995 const DataLayout &DL = M.getDataLayout(); 996 Align MaxDynamicAlignment(1); 997 998 auto UpdateMaxAlignment = [&MaxDynamicAlignment, &DL](GlobalVariable *GV) { 999 if (AMDGPU::isDynamicLDS(*GV)) { 1000 MaxDynamicAlignment = 1001 std::max(MaxDynamicAlignment, AMDGPU::getAlign(DL, GV)); 1002 } 1003 }; 1004 1005 for (GlobalVariable *GV : LDSUsesInfo.indirect_access[func]) { 1006 UpdateMaxAlignment(GV); 1007 } 1008 1009 for (GlobalVariable *GV : LDSUsesInfo.direct_access[func]) { 1010 UpdateMaxAlignment(GV); 1011 } 1012 1013 assert(func->hasName()); // Checked by caller 1014 auto emptyCharArray = ArrayType::get(Type::getInt8Ty(Ctx), 0); 1015 GlobalVariable *N = new GlobalVariable( 1016 M, emptyCharArray, false, GlobalValue::ExternalLinkage, nullptr, 1017 Twine("llvm.amdgcn." + func->getName() + ".dynlds"), nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, 1018 false); 1019 N->setAlignment(MaxDynamicAlignment); 1020 1021 assert(AMDGPU::isDynamicLDS(*N)); 1022 return N; 1023 } 1024 1025 DenseMap<Function *, GlobalVariable *> lowerDynamicLDSVariables( 1026 Module &M, LDSUsesInfoTy &LDSUsesInfo, 1027 DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS, 1028 DenseSet<GlobalVariable *> const &DynamicVariables, 1029 std::vector<Function *> const &OrderedKernels) { 1030 DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS; 1031 if (!KernelsThatIndirectlyAllocateDynamicLDS.empty()) { 1032 LLVMContext &Ctx = M.getContext(); 1033 IRBuilder<> Builder(Ctx); 1034 Type *I32 = Type::getInt32Ty(Ctx); 1035 1036 std::vector<Constant *> newDynamicLDS; 1037 1038 // Table is built in the same order as OrderedKernels 1039 for (auto &func : OrderedKernels) { 1040 1041 if (KernelsThatIndirectlyAllocateDynamicLDS.contains(func)) { 1042 assert(isKernelLDS(func)); 1043 if (!func->hasName()) { 1044 report_fatal_error("Anonymous kernels cannot use LDS variables"); 1045 } 1046 1047 GlobalVariable *N = 1048 buildRepresentativeDynamicLDSInstance(M, LDSUsesInfo, func); 1049 1050 KernelToCreatedDynamicLDS[func] = N; 1051 1052 markUsedByKernel(Builder, func, N); 1053 1054 auto emptyCharArray = ArrayType::get(Type::getInt8Ty(Ctx), 0); 1055 auto GEP = ConstantExpr::getGetElementPtr( 1056 emptyCharArray, N, ConstantInt::get(I32, 0), true); 1057 newDynamicLDS.push_back(ConstantExpr::getPtrToInt(GEP, I32)); 1058 } else { 1059 newDynamicLDS.push_back(PoisonValue::get(I32)); 1060 } 1061 } 1062 assert(OrderedKernels.size() == newDynamicLDS.size()); 1063 1064 ArrayType *t = ArrayType::get(I32, newDynamicLDS.size()); 1065 Constant *init = ConstantArray::get(t, newDynamicLDS); 1066 GlobalVariable *table = new GlobalVariable( 1067 M, t, true, GlobalValue::InternalLinkage, init, 1068 "llvm.amdgcn.dynlds.offset.table", nullptr, 1069 GlobalValue::NotThreadLocal, AMDGPUAS::CONSTANT_ADDRESS); 1070 1071 for (GlobalVariable *GV : DynamicVariables) { 1072 for (Use &U : make_early_inc_range(GV->uses())) { 1073 auto *I = dyn_cast<Instruction>(U.getUser()); 1074 if (!I) 1075 continue; 1076 if (isKernelLDS(I->getFunction())) 1077 continue; 1078 1079 replaceUseWithTableLookup(M, Builder, table, GV, U, nullptr); 1080 } 1081 } 1082 } 1083 return KernelToCreatedDynamicLDS; 1084 } 1085 1086 static bool canElideModuleLDS(const Function &F) { 1087 return F.hasFnAttribute("amdgpu-elide-module-lds"); 1088 } 1089 1090 static void markElideModuleLDS(Function &F) { 1091 F.addFnAttr("amdgpu-elide-module-lds"); 1092 } 1093 1094 bool runOnModule(Module &M) override { 1095 CallGraph CG = CallGraph(M); 1096 bool Changed = superAlignLDSGlobals(M); 1097 1098 Changed |= eliminateConstantExprUsesOfLDSFromAllInstructions(M); 1099 1100 Changed = true; // todo: narrow this down 1101 1102 // For each kernel, what variables does it access directly or through 1103 // callees 1104 LDSUsesInfoTy LDSUsesInfo = getTransitiveUsesOfLDS(CG, M); 1105 1106 // For each variable accessed through callees, which kernels access it 1107 VariableFunctionMap LDSToKernelsThatNeedToAccessItIndirectly; 1108 for (auto &K : LDSUsesInfo.indirect_access) { 1109 Function *F = K.first; 1110 assert(isKernelLDS(F)); 1111 for (GlobalVariable *GV : K.second) { 1112 LDSToKernelsThatNeedToAccessItIndirectly[GV].insert(F); 1113 } 1114 } 1115 1116 // Partition variables accessed indirectly into the different strategies 1117 DenseSet<GlobalVariable *> ModuleScopeVariables; 1118 DenseSet<GlobalVariable *> TableLookupVariables; 1119 DenseSet<GlobalVariable *> KernelAccessVariables; 1120 DenseSet<GlobalVariable *> DynamicVariables; 1121 partitionVariablesIntoIndirectStrategies( 1122 M, LDSUsesInfo, LDSToKernelsThatNeedToAccessItIndirectly, 1123 ModuleScopeVariables, TableLookupVariables, KernelAccessVariables, 1124 DynamicVariables); 1125 1126 // If the kernel accesses a variable that is going to be stored in the 1127 // module instance through a call then that kernel needs to allocate the 1128 // module instance 1129 DenseSet<Function *> KernelsThatAllocateModuleLDS = 1130 kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo, 1131 ModuleScopeVariables); 1132 DenseSet<Function *> KernelsThatAllocateTableLDS = 1133 kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo, 1134 TableLookupVariables); 1135 1136 DenseSet<Function *> KernelsThatIndirectlyAllocateDynamicLDS = 1137 kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo, 1138 DynamicVariables); 1139 1140 GlobalVariable *MaybeModuleScopeStruct = lowerModuleScopeStructVariables( 1141 M, ModuleScopeVariables, KernelsThatAllocateModuleLDS); 1142 1143 DenseMap<Function *, LDSVariableReplacement> KernelToReplacement = 1144 lowerKernelScopeStructVariables(M, LDSUsesInfo, ModuleScopeVariables, 1145 KernelsThatAllocateModuleLDS, 1146 MaybeModuleScopeStruct); 1147 1148 // Lower zero cost accesses to the kernel instances just created 1149 for (auto &GV : KernelAccessVariables) { 1150 auto &funcs = LDSToKernelsThatNeedToAccessItIndirectly[GV]; 1151 assert(funcs.size() == 1); // Only one kernel can access it 1152 LDSVariableReplacement Replacement = 1153 KernelToReplacement[*(funcs.begin())]; 1154 1155 DenseSet<GlobalVariable *> Vec; 1156 Vec.insert(GV); 1157 1158 // TODO: Looks like a latent bug, Replacement may not be marked 1159 // UsedByKernel here 1160 replaceLDSVariablesWithStruct(M, Vec, Replacement, [](Use &U) { 1161 return isa<Instruction>(U.getUser()); 1162 }); 1163 } 1164 1165 // The ith element of this vector is kernel id i 1166 std::vector<Function *> OrderedKernels = 1167 assignLDSKernelIDToEachKernel(&M, KernelsThatAllocateTableLDS, 1168 KernelsThatIndirectlyAllocateDynamicLDS); 1169 1170 if (!KernelsThatAllocateTableLDS.empty()) { 1171 LLVMContext &Ctx = M.getContext(); 1172 IRBuilder<> Builder(Ctx); 1173 1174 for (size_t i = 0; i < OrderedKernels.size(); i++) { 1175 markUsedByKernel(Builder, OrderedKernels[i], 1176 KernelToReplacement[OrderedKernels[i]].SGV); 1177 } 1178 1179 // The order must be consistent between lookup table and accesses to 1180 // lookup table 1181 std::vector<GlobalVariable *> TableLookupVariablesOrdered( 1182 TableLookupVariables.begin(), TableLookupVariables.end()); 1183 llvm::sort(TableLookupVariablesOrdered.begin(), 1184 TableLookupVariablesOrdered.end(), 1185 [](const GlobalVariable *lhs, const GlobalVariable *rhs) { 1186 return lhs->getName() < rhs->getName(); 1187 }); 1188 1189 GlobalVariable *LookupTable = buildLookupTable( 1190 M, TableLookupVariablesOrdered, OrderedKernels, KernelToReplacement); 1191 replaceUsesInInstructionsWithTableLookup(M, TableLookupVariablesOrdered, 1192 LookupTable); 1193 } 1194 1195 DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS = 1196 lowerDynamicLDSVariables(M, LDSUsesInfo, 1197 KernelsThatIndirectlyAllocateDynamicLDS, 1198 DynamicVariables, OrderedKernels); 1199 1200 // All kernel frames have been allocated. Calculate and record the 1201 // addresses. 1202 1203 { 1204 const DataLayout &DL = M.getDataLayout(); 1205 1206 for (Function &Func : M.functions()) { 1207 if (Func.isDeclaration() || !isKernelLDS(&Func)) 1208 continue; 1209 1210 // All three of these are optional. The first variable is allocated at 1211 // zero. They are allocated by allocateKnownAddressLDSGlobal in the 1212 // following order: 1213 //{ 1214 // module.lds 1215 // alignment padding 1216 // kernel instance 1217 // alignment padding 1218 // dynamic lds variables 1219 //} 1220 1221 const bool AllocateModuleScopeStruct = 1222 MaybeModuleScopeStruct && !canElideModuleLDS(Func); 1223 1224 const bool AllocateKernelScopeStruct = 1225 KernelToReplacement.contains(&Func); 1226 1227 const bool AllocateDynamicVariable = 1228 KernelToCreatedDynamicLDS.contains(&Func); 1229 1230 uint32_t Offset = 0; 1231 1232 if (AllocateModuleScopeStruct) { 1233 // Allocated at zero, recorded once on construction, not once per 1234 // kernel 1235 Offset += DL.getTypeAllocSize(MaybeModuleScopeStruct->getValueType()); 1236 } 1237 1238 if (AllocateKernelScopeStruct) { 1239 GlobalVariable *KernelStruct = KernelToReplacement[&Func].SGV; 1240 1241 Offset = alignTo(Offset, AMDGPU::getAlign(DL, KernelStruct)); 1242 1243 recordLDSAbsoluteAddress(&M, KernelStruct, Offset); 1244 1245 Offset += DL.getTypeAllocSize(KernelStruct->getValueType()); 1246 1247 } 1248 1249 if (AllocateDynamicVariable) { 1250 GlobalVariable *DynamicVariable = KernelToCreatedDynamicLDS[&Func]; 1251 1252 Offset = alignTo(Offset, AMDGPU::getAlign(DL, DynamicVariable)); 1253 1254 recordLDSAbsoluteAddress(&M, DynamicVariable, Offset); 1255 } 1256 } 1257 } 1258 1259 for (auto &GV : make_early_inc_range(M.globals())) 1260 if (AMDGPU::isLDSVariableToLower(GV)) { 1261 // probably want to remove from used lists 1262 GV.removeDeadConstantUsers(); 1263 if (GV.use_empty()) 1264 GV.eraseFromParent(); 1265 } 1266 1267 return Changed; 1268 } 1269 1270 private: 1271 // Increase the alignment of LDS globals if necessary to maximise the chance 1272 // that we can use aligned LDS instructions to access them. 1273 static bool superAlignLDSGlobals(Module &M) { 1274 const DataLayout &DL = M.getDataLayout(); 1275 bool Changed = false; 1276 if (!SuperAlignLDSGlobals) { 1277 return Changed; 1278 } 1279 1280 for (auto &GV : M.globals()) { 1281 if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) { 1282 // Only changing alignment of LDS variables 1283 continue; 1284 } 1285 if (!GV.hasInitializer()) { 1286 // cuda/hip extern __shared__ variable, leave alignment alone 1287 continue; 1288 } 1289 1290 Align Alignment = AMDGPU::getAlign(DL, &GV); 1291 TypeSize GVSize = DL.getTypeAllocSize(GV.getValueType()); 1292 1293 if (GVSize > 8) { 1294 // We might want to use a b96 or b128 load/store 1295 Alignment = std::max(Alignment, Align(16)); 1296 } else if (GVSize > 4) { 1297 // We might want to use a b64 load/store 1298 Alignment = std::max(Alignment, Align(8)); 1299 } else if (GVSize > 2) { 1300 // We might want to use a b32 load/store 1301 Alignment = std::max(Alignment, Align(4)); 1302 } else if (GVSize > 1) { 1303 // We might want to use a b16 load/store 1304 Alignment = std::max(Alignment, Align(2)); 1305 } 1306 1307 if (Alignment != AMDGPU::getAlign(DL, &GV)) { 1308 Changed = true; 1309 GV.setAlignment(Alignment); 1310 } 1311 } 1312 return Changed; 1313 } 1314 1315 static LDSVariableReplacement createLDSVariableReplacement( 1316 Module &M, std::string VarName, 1317 DenseSet<GlobalVariable *> const &LDSVarsToTransform) { 1318 // Create a struct instance containing LDSVarsToTransform and map from those 1319 // variables to ConstantExprGEP 1320 // Variables may be introduced to meet alignment requirements. No aliasing 1321 // metadata is useful for these as they have no uses. Erased before return. 1322 1323 LLVMContext &Ctx = M.getContext(); 1324 const DataLayout &DL = M.getDataLayout(); 1325 assert(!LDSVarsToTransform.empty()); 1326 1327 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 1328 LayoutFields.reserve(LDSVarsToTransform.size()); 1329 { 1330 // The order of fields in this struct depends on the order of 1331 // varables in the argument which varies when changing how they 1332 // are identified, leading to spurious test breakage. 1333 std::vector<GlobalVariable *> Sorted(LDSVarsToTransform.begin(), 1334 LDSVarsToTransform.end()); 1335 llvm::sort(Sorted.begin(), Sorted.end(), 1336 [](const GlobalVariable *lhs, const GlobalVariable *rhs) { 1337 return lhs->getName() < rhs->getName(); 1338 }); 1339 for (GlobalVariable *GV : Sorted) { 1340 OptimizedStructLayoutField F(GV, 1341 DL.getTypeAllocSize(GV->getValueType()), 1342 AMDGPU::getAlign(DL, GV)); 1343 LayoutFields.emplace_back(F); 1344 } 1345 } 1346 1347 performOptimizedStructLayout(LayoutFields); 1348 1349 std::vector<GlobalVariable *> LocalVars; 1350 BitVector IsPaddingField; 1351 LocalVars.reserve(LDSVarsToTransform.size()); // will be at least this large 1352 IsPaddingField.reserve(LDSVarsToTransform.size()); 1353 { 1354 uint64_t CurrentOffset = 0; 1355 for (size_t I = 0; I < LayoutFields.size(); I++) { 1356 GlobalVariable *FGV = static_cast<GlobalVariable *>( 1357 const_cast<void *>(LayoutFields[I].Id)); 1358 Align DataAlign = LayoutFields[I].Alignment; 1359 1360 uint64_t DataAlignV = DataAlign.value(); 1361 if (uint64_t Rem = CurrentOffset % DataAlignV) { 1362 uint64_t Padding = DataAlignV - Rem; 1363 1364 // Append an array of padding bytes to meet alignment requested 1365 // Note (o + (a - (o % a)) ) % a == 0 1366 // (offset + Padding ) % align == 0 1367 1368 Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding); 1369 LocalVars.push_back(new GlobalVariable( 1370 M, ATy, false, GlobalValue::InternalLinkage, UndefValue::get(ATy), 1371 "", nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, 1372 false)); 1373 IsPaddingField.push_back(true); 1374 CurrentOffset += Padding; 1375 } 1376 1377 LocalVars.push_back(FGV); 1378 IsPaddingField.push_back(false); 1379 CurrentOffset += LayoutFields[I].Size; 1380 } 1381 } 1382 1383 std::vector<Type *> LocalVarTypes; 1384 LocalVarTypes.reserve(LocalVars.size()); 1385 std::transform( 1386 LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes), 1387 [](const GlobalVariable *V) -> Type * { return V->getValueType(); }); 1388 1389 StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t"); 1390 1391 Align StructAlign = AMDGPU::getAlign(DL, LocalVars[0]); 1392 1393 GlobalVariable *SGV = new GlobalVariable( 1394 M, LDSTy, false, GlobalValue::InternalLinkage, UndefValue::get(LDSTy), 1395 VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, 1396 false); 1397 SGV->setAlignment(StructAlign); 1398 1399 DenseMap<GlobalVariable *, Constant *> Map; 1400 Type *I32 = Type::getInt32Ty(Ctx); 1401 for (size_t I = 0; I < LocalVars.size(); I++) { 1402 GlobalVariable *GV = LocalVars[I]; 1403 Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)}; 1404 Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx, true); 1405 if (IsPaddingField[I]) { 1406 assert(GV->use_empty()); 1407 GV->eraseFromParent(); 1408 } else { 1409 Map[GV] = GEP; 1410 } 1411 } 1412 assert(Map.size() == LDSVarsToTransform.size()); 1413 return {SGV, std::move(Map)}; 1414 } 1415 1416 template <typename PredicateTy> 1417 static void replaceLDSVariablesWithStruct( 1418 Module &M, DenseSet<GlobalVariable *> const &LDSVarsToTransformArg, 1419 LDSVariableReplacement Replacement, PredicateTy Predicate) { 1420 LLVMContext &Ctx = M.getContext(); 1421 const DataLayout &DL = M.getDataLayout(); 1422 1423 // A hack... we need to insert the aliasing info in a predictable order for 1424 // lit tests. Would like to have them in a stable order already, ideally the 1425 // same order they get allocated, which might mean an ordered set container 1426 std::vector<GlobalVariable *> LDSVarsToTransform( 1427 LDSVarsToTransformArg.begin(), LDSVarsToTransformArg.end()); 1428 llvm::sort(LDSVarsToTransform.begin(), LDSVarsToTransform.end(), 1429 [](const GlobalVariable *lhs, const GlobalVariable *rhs) { 1430 return lhs->getName() < rhs->getName(); 1431 }); 1432 1433 // Create alias.scope and their lists. Each field in the new structure 1434 // does not alias with all other fields. 1435 SmallVector<MDNode *> AliasScopes; 1436 SmallVector<Metadata *> NoAliasList; 1437 const size_t NumberVars = LDSVarsToTransform.size(); 1438 if (NumberVars > 1) { 1439 MDBuilder MDB(Ctx); 1440 AliasScopes.reserve(NumberVars); 1441 MDNode *Domain = MDB.createAnonymousAliasScopeDomain(); 1442 for (size_t I = 0; I < NumberVars; I++) { 1443 MDNode *Scope = MDB.createAnonymousAliasScope(Domain); 1444 AliasScopes.push_back(Scope); 1445 } 1446 NoAliasList.append(&AliasScopes[1], AliasScopes.end()); 1447 } 1448 1449 // Replace uses of ith variable with a constantexpr to the corresponding 1450 // field of the instance that will be allocated by AMDGPUMachineFunction 1451 for (size_t I = 0; I < NumberVars; I++) { 1452 GlobalVariable *GV = LDSVarsToTransform[I]; 1453 Constant *GEP = Replacement.LDSVarsToConstantGEP[GV]; 1454 1455 GV->replaceUsesWithIf(GEP, Predicate); 1456 1457 APInt APOff(DL.getIndexTypeSizeInBits(GEP->getType()), 0); 1458 GEP->stripAndAccumulateInBoundsConstantOffsets(DL, APOff); 1459 uint64_t Offset = APOff.getZExtValue(); 1460 1461 Align A = 1462 commonAlignment(Replacement.SGV->getAlign().valueOrOne(), Offset); 1463 1464 if (I) 1465 NoAliasList[I - 1] = AliasScopes[I - 1]; 1466 MDNode *NoAlias = 1467 NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList); 1468 MDNode *AliasScope = 1469 AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]}); 1470 1471 refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias); 1472 } 1473 } 1474 1475 static void refineUsesAlignmentAndAA(Value *Ptr, Align A, 1476 const DataLayout &DL, MDNode *AliasScope, 1477 MDNode *NoAlias, unsigned MaxDepth = 5) { 1478 if (!MaxDepth || (A == 1 && !AliasScope)) 1479 return; 1480 1481 for (User *U : Ptr->users()) { 1482 if (auto *I = dyn_cast<Instruction>(U)) { 1483 if (AliasScope && I->mayReadOrWriteMemory()) { 1484 MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope); 1485 AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope) 1486 : AliasScope); 1487 I->setMetadata(LLVMContext::MD_alias_scope, AS); 1488 1489 MDNode *NA = I->getMetadata(LLVMContext::MD_noalias); 1490 NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias); 1491 I->setMetadata(LLVMContext::MD_noalias, NA); 1492 } 1493 } 1494 1495 if (auto *LI = dyn_cast<LoadInst>(U)) { 1496 LI->setAlignment(std::max(A, LI->getAlign())); 1497 continue; 1498 } 1499 if (auto *SI = dyn_cast<StoreInst>(U)) { 1500 if (SI->getPointerOperand() == Ptr) 1501 SI->setAlignment(std::max(A, SI->getAlign())); 1502 continue; 1503 } 1504 if (auto *AI = dyn_cast<AtomicRMWInst>(U)) { 1505 // None of atomicrmw operations can work on pointers, but let's 1506 // check it anyway in case it will or we will process ConstantExpr. 1507 if (AI->getPointerOperand() == Ptr) 1508 AI->setAlignment(std::max(A, AI->getAlign())); 1509 continue; 1510 } 1511 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) { 1512 if (AI->getPointerOperand() == Ptr) 1513 AI->setAlignment(std::max(A, AI->getAlign())); 1514 continue; 1515 } 1516 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) { 1517 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType()); 1518 APInt Off(BitWidth, 0); 1519 if (GEP->getPointerOperand() == Ptr) { 1520 Align GA; 1521 if (GEP->accumulateConstantOffset(DL, Off)) 1522 GA = commonAlignment(A, Off.getLimitedValue()); 1523 refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias, 1524 MaxDepth - 1); 1525 } 1526 continue; 1527 } 1528 if (auto *I = dyn_cast<Instruction>(U)) { 1529 if (I->getOpcode() == Instruction::BitCast || 1530 I->getOpcode() == Instruction::AddrSpaceCast) 1531 refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1); 1532 } 1533 } 1534 } 1535 }; 1536 1537 } // namespace 1538 char AMDGPULowerModuleLDS::ID = 0; 1539 1540 char &llvm::AMDGPULowerModuleLDSID = AMDGPULowerModuleLDS::ID; 1541 1542 INITIALIZE_PASS(AMDGPULowerModuleLDS, DEBUG_TYPE, 1543 "Lower uses of LDS variables from non-kernel functions", false, 1544 false) 1545 1546 ModulePass *llvm::createAMDGPULowerModuleLDSPass() { 1547 return new AMDGPULowerModuleLDS(); 1548 } 1549 1550 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M, 1551 ModuleAnalysisManager &) { 1552 return AMDGPULowerModuleLDS().runOnModule(M) ? PreservedAnalyses::none() 1553 : PreservedAnalyses::all(); 1554 } 1555