xref: /llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp (revision ccb3a8feaa5b132dc829e55e069dde62008df4a8)
1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass eliminates local data store, LDS, uses from non-kernel functions.
10 // LDS is contiguous memory allocated per kernel execution.
11 //
12 // Background.
13 //
14 // The programming model is global variables, or equivalently function local
15 // static variables, accessible from kernels or other functions. For uses from
16 // kernels this is straightforward - assign an integer to the kernel for the
17 // memory required by all the variables combined, allocate them within that.
18 // For uses from functions there are performance tradeoffs to choose between.
19 //
20 // This model means the GPU runtime can specify the amount of memory allocated.
21 // If this is more than the kernel assumed, the excess can be made available
22 // using a language specific feature, which IR represents as a variable with
23 // no initializer. This feature is referred to here as "Dynamic LDS" and is
24 // lowered slightly differently to the normal case.
25 //
26 // Consequences of this GPU feature:
27 // - memory is limited and exceeding it halts compilation
28 // - a global accessed by one kernel exists independent of other kernels
29 // - a global exists independent of simultaneous execution of the same kernel
30 // - the address of the global may be different from different kernels as they
31 //   do not alias, which permits only allocating variables they use
32 // - if the address is allowed to differ, functions need help to find it
33 //
34 // Uses from kernels are implemented here by grouping them in a per-kernel
35 // struct instance. This duplicates the variables, accurately modelling their
36 // aliasing properties relative to a single global representation. It also
37 // permits control over alignment via padding.
38 //
39 // Uses from functions are more complicated and the primary purpose of this
40 // IR pass. Several different lowering are chosen between to meet requirements
41 // to avoid allocating any LDS where it is not necessary, as that impacts
42 // occupancy and may fail the compilation, while not imposing overhead on a
43 // feature whose primary advantage over global memory is performance. The basic
44 // design goal is to avoid one kernel imposing overhead on another.
45 //
46 // Implementation.
47 //
48 // LDS variables with constant annotation or non-undef initializer are passed
49 // through unchanged for simplification or error diagnostics in later passes.
50 // Non-undef initializers are not yet implemented for LDS.
51 //
52 // LDS variables that are always allocated at the same address can be found
53 // by lookup at that address. Otherwise runtime information/cost is required.
54 //
55 // The simplest strategy possible is to group all LDS variables in a single
56 // struct and allocate that struct in every kernel such that the original
57 // variables are always at the same address. LDS is however a limited resource
58 // so this strategy is unusable in practice. It is not implemented here.
59 //
60 // Strategy | Precise allocation | Zero runtime cost | General purpose |
61 //  --------+--------------------+-------------------+-----------------+
62 //   Module |                 No |               Yes |             Yes |
63 //    Table |                Yes |                No |             Yes |
64 //   Kernel |                Yes |               Yes |              No |
65 //   Hybrid |                Yes |           Partial |             Yes |
66 //
67 // "Module" spends LDS memory to save cycles. "Table" spends cycles and global
68 // memory to save LDS. "Kernel" is as fast as kernel allocation but only works
69 // for variables that are known reachable from a single kernel. "Hybrid" picks
70 // between all three. When forced to choose between LDS and cycles we minimise
71 // LDS use.
72 
73 // The "module" lowering implemented here finds LDS variables which are used by
74 // non-kernel functions and creates a new struct with a field for each of those
75 // LDS variables. Variables that are only used from kernels are excluded.
76 //
77 // The "table" lowering implemented here has three components.
78 // First kernels are assigned a unique integer identifier which is available in
79 // functions it calls through the intrinsic amdgcn_lds_kernel_id. The integer
80 // is passed through a specific SGPR, thus works with indirect calls.
81 // Second, each kernel allocates LDS variables independent of other kernels and
82 // writes the addresses it chose for each variable into an array in consistent
83 // order. If the kernel does not allocate a given variable, it writes undef to
84 // the corresponding array location. These arrays are written to a constant
85 // table in the order matching the kernel unique integer identifier.
86 // Third, uses from non-kernel functions are replaced with a table lookup using
87 // the intrinsic function to find the address of the variable.
88 //
89 // "Kernel" lowering is only applicable for variables that are unambiguously
90 // reachable from exactly one kernel. For those cases, accesses to the variable
91 // can be lowered to ConstantExpr address of a struct instance specific to that
92 // one kernel. This is zero cost in space and in compute. It will raise a fatal
93 // error on any variable that might be reachable from multiple kernels and is
94 // thus most easily used as part of the hybrid lowering strategy.
95 //
96 // Hybrid lowering is a mixture of the above. It uses the zero cost kernel
97 // lowering where it can. It lowers the variable accessed by the greatest
98 // number of kernels using the module strategy as that is free for the first
99 // variable. Any futher variables that can be lowered with the module strategy
100 // without incurring LDS memory overhead are. The remaining ones are lowered
101 // via table.
102 //
103 // Consequences
104 // - No heuristics or user controlled magic numbers, hybrid is the right choice
105 // - Kernels that don't use functions (or have had them all inlined) are not
106 //   affected by any lowering for kernels that do.
107 // - Kernels that don't make indirect function calls are not affected by those
108 //   that do.
109 // - Variables which are used by lots of kernels, e.g. those injected by a
110 //   language runtime in most kernels, are expected to have no overhead
111 // - Implementations that instantiate templates per-kernel where those templates
112 //   use LDS are expected to hit the "Kernel" lowering strategy
113 // - The runtime properties impose a cost in compiler implementation complexity
114 //
115 // Dynamic LDS implementation
116 // Dynamic LDS is lowered similarly to the "table" strategy above and uses the
117 // same intrinsic to identify which kernel is at the root of the dynamic call
118 // graph. This relies on the specified behaviour that all dynamic LDS variables
119 // alias one another, i.e. are at the same address, with respect to a given
120 // kernel. Therefore this pass creates new dynamic LDS variables for each kernel
121 // that allocates any dynamic LDS and builds a table of addresses out of those.
122 // The AMDGPUPromoteAlloca pass skips kernels that use dynamic LDS.
123 // The corresponding optimisation for "kernel" lowering where the table lookup
124 // is elided is not implemented.
125 //
126 //
127 // Implementation notes / limitations
128 // A single LDS global variable represents an instance per kernel that can reach
129 // said variables. This pass essentially specialises said variables per kernel.
130 // Handling ConstantExpr during the pass complicated this significantly so now
131 // all ConstantExpr uses of LDS variables are expanded to instructions. This
132 // may need amending when implementing non-undef initialisers.
133 //
134 // Lowering is split between this IR pass and the back end. This pass chooses
135 // where given variables should be allocated and marks them with metadata,
136 // MD_absolute_symbol. The backend places the variables in coincidentally the
137 // same location and raises a fatal error if something has gone awry. This works
138 // in practice because the only pass between this one and the backend that
139 // changes LDS is PromoteAlloca and the changes it makes do not conflict.
140 //
141 // Addresses are written to constant global arrays based on the same metadata.
142 //
143 // The backend lowers LDS variables in the order of traversal of the function.
144 // This is at odds with the deterministic layout required. The workaround is to
145 // allocate the fixed-address variables immediately upon starting the function
146 // where they can be placed as intended. This requires a means of mapping from
147 // the function to the variables that it allocates. For the module scope lds,
148 // this is via metadata indicating whether the variable is not required. If a
149 // pass deletes that metadata, a fatal error on disagreement with the absolute
150 // symbol metadata will occur. For kernel scope and dynamic, this is by _name_
151 // correspondence between the function and the variable. It requires the
152 // kernel to have a name (which is only a limitation for tests in practice) and
153 // for nothing to rename the corresponding symbols. This is a hazard if the pass
154 // is run multiple times during debugging. Alternative schemes considered all
155 // involve bespoke metadata.
156 //
157 // If the name correspondence can be replaced, multiple distinct kernels that
158 // have the same memory layout can map to the same kernel id (as the address
159 // itself is handled by the absolute symbol metadata) and that will allow more
160 // uses of the "kernel" style faster lowering and reduce the size of the lookup
161 // tables.
162 //
163 // There is a test that checks this does not fire for a graphics shader. This
164 // lowering is expected to work for graphics if the isKernel test is changed.
165 //
166 // The current markUsedByKernel is sufficient for PromoteAlloca but is elided
167 // before codegen. Replacing this with an equivalent intrinsic which lasts until
168 // shortly after the machine function lowering of LDS would help break the name
169 // mapping. The other part needed is probably to amend PromoteAlloca to embed
170 // the LDS variables it creates in the same struct created here. That avoids the
171 // current hazard where a PromoteAlloca LDS variable might be allocated before
172 // the kernel scope (and thus error on the address check). Given a new invariant
173 // that no LDS variables exist outside of the structs managed here, and an
174 // intrinsic that lasts until after the LDS frame lowering, it should be
175 // possible to drop the name mapping and fold equivalent memory layouts.
176 //
177 //===----------------------------------------------------------------------===//
178 
179 #include "AMDGPU.h"
180 #include "AMDGPUTargetMachine.h"
181 #include "Utils/AMDGPUBaseInfo.h"
182 #include "Utils/AMDGPUMemoryUtils.h"
183 #include "llvm/ADT/BitVector.h"
184 #include "llvm/ADT/DenseMap.h"
185 #include "llvm/ADT/DenseSet.h"
186 #include "llvm/ADT/STLExtras.h"
187 #include "llvm/ADT/SetOperations.h"
188 #include "llvm/Analysis/CallGraph.h"
189 #include "llvm/CodeGen/TargetPassConfig.h"
190 #include "llvm/IR/Constants.h"
191 #include "llvm/IR/DerivedTypes.h"
192 #include "llvm/IR/IRBuilder.h"
193 #include "llvm/IR/InlineAsm.h"
194 #include "llvm/IR/Instructions.h"
195 #include "llvm/IR/IntrinsicsAMDGPU.h"
196 #include "llvm/IR/MDBuilder.h"
197 #include "llvm/IR/ReplaceConstant.h"
198 #include "llvm/InitializePasses.h"
199 #include "llvm/Pass.h"
200 #include "llvm/Support/CommandLine.h"
201 #include "llvm/Support/Debug.h"
202 #include "llvm/Support/Format.h"
203 #include "llvm/Support/OptimizedStructLayout.h"
204 #include "llvm/Support/raw_ostream.h"
205 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
206 #include "llvm/Transforms/Utils/ModuleUtils.h"
207 
208 #include <vector>
209 
210 #include <cstdio>
211 
212 #define DEBUG_TYPE "amdgpu-lower-module-lds"
213 
214 using namespace llvm;
215 
216 namespace {
217 
218 cl::opt<bool> SuperAlignLDSGlobals(
219     "amdgpu-super-align-lds-globals",
220     cl::desc("Increase alignment of LDS if it is not on align boundary"),
221     cl::init(true), cl::Hidden);
222 
223 enum class LoweringKind { module, table, kernel, hybrid };
224 cl::opt<LoweringKind> LoweringKindLoc(
225     "amdgpu-lower-module-lds-strategy",
226     cl::desc("Specify lowering strategy for function LDS access:"), cl::Hidden,
227     cl::init(LoweringKind::hybrid),
228     cl::values(
229         clEnumValN(LoweringKind::table, "table", "Lower via table lookup"),
230         clEnumValN(LoweringKind::module, "module", "Lower via module struct"),
231         clEnumValN(
232             LoweringKind::kernel, "kernel",
233             "Lower variables reachable from one kernel, otherwise abort"),
234         clEnumValN(LoweringKind::hybrid, "hybrid",
235                    "Lower via mixture of above strategies")));
236 
237 bool isKernelLDS(const Function *F) {
238   // Some weirdness here. AMDGPU::isKernelCC does not call into
239   // AMDGPU::isKernel with the calling conv, it instead calls into
240   // isModuleEntryFunction which returns true for more calling conventions
241   // than AMDGPU::isKernel does. There's a FIXME on AMDGPU::isKernel.
242   // There's also a test that checks that the LDS lowering does not hit on
243   // a graphics shader, denoted amdgpu_ps, so stay with the limited case.
244   // Putting LDS in the name of the function to draw attention to this.
245   return AMDGPU::isKernel(F->getCallingConv());
246 }
247 
248 template <typename T> std::vector<T> sortByName(std::vector<T> &&V) {
249   llvm::sort(V.begin(), V.end(), [](const auto *L, const auto *R) {
250     return L->getName() < R->getName();
251   });
252   return {std::move(V)};
253 }
254 
255 class AMDGPULowerModuleLDS {
256   const AMDGPUTargetMachine &TM;
257 
258   static void
259   removeLocalVarsFromUsedLists(Module &M,
260                                const DenseSet<GlobalVariable *> &LocalVars) {
261     // The verifier rejects used lists containing an inttoptr of a constant
262     // so remove the variables from these lists before replaceAllUsesWith
263     SmallPtrSet<Constant *, 8> LocalVarsSet;
264     for (GlobalVariable *LocalVar : LocalVars)
265       LocalVarsSet.insert(cast<Constant>(LocalVar->stripPointerCasts()));
266 
267     removeFromUsedLists(
268         M, [&LocalVarsSet](Constant *C) { return LocalVarsSet.count(C); });
269 
270     for (GlobalVariable *LocalVar : LocalVars)
271       LocalVar->removeDeadConstantUsers();
272   }
273 
274   static void markUsedByKernel(Function *Func, GlobalVariable *SGV) {
275     // The llvm.amdgcn.module.lds instance is implicitly used by all kernels
276     // that might call a function which accesses a field within it. This is
277     // presently approximated to 'all kernels' if there are any such functions
278     // in the module. This implicit use is redefined as an explicit use here so
279     // that later passes, specifically PromoteAlloca, account for the required
280     // memory without any knowledge of this transform.
281 
282     // An operand bundle on llvm.donothing works because the call instruction
283     // survives until after the last pass that needs to account for LDS. It is
284     // better than inline asm as the latter survives until the end of codegen. A
285     // totally robust solution would be a function with the same semantics as
286     // llvm.donothing that takes a pointer to the instance and is lowered to a
287     // no-op after LDS is allocated, but that is not presently necessary.
288 
289     // This intrinsic is eliminated shortly before instruction selection. It
290     // does not suffice to indicate to ISel that a given global which is not
291     // immediately used by the kernel must still be allocated by it. An
292     // equivalent target specific intrinsic which lasts until immediately after
293     // codegen would suffice for that, but one would still need to ensure that
294     // the variables are allocated in the anticpated order.
295     BasicBlock *Entry = &Func->getEntryBlock();
296     IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
297 
298     Function *Decl =
299         Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
300 
301     Value *UseInstance[1] = {
302         Builder.CreateConstInBoundsGEP1_32(SGV->getValueType(), SGV, 0)};
303 
304     Builder.CreateCall(
305         Decl, {}, {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)});
306   }
307 
308   static bool eliminateConstantExprUsesOfLDSFromAllInstructions(Module &M) {
309     // Constants are uniqued within LLVM. A ConstantExpr referring to a LDS
310     // global may have uses from multiple different functions as a result.
311     // This pass specialises LDS variables with respect to the kernel that
312     // allocates them.
313 
314     // This is semantically equivalent to (the unimplemented as slow):
315     // for (auto &F : M.functions())
316     //   for (auto &BB : F)
317     //     for (auto &I : BB)
318     //       for (Use &Op : I.operands())
319     //         if (constantExprUsesLDS(Op))
320     //           replaceConstantExprInFunction(I, Op);
321 
322     SmallVector<Constant *> LDSGlobals;
323     for (auto &GV : M.globals())
324       if (AMDGPU::isLDSVariableToLower(GV))
325         LDSGlobals.push_back(&GV);
326 
327     return convertUsersOfConstantsToInstructions(LDSGlobals);
328   }
329 
330 public:
331   AMDGPULowerModuleLDS(const AMDGPUTargetMachine &TM_) : TM(TM_) {}
332 
333   using FunctionVariableMap = DenseMap<Function *, DenseSet<GlobalVariable *>>;
334 
335   using VariableFunctionMap = DenseMap<GlobalVariable *, DenseSet<Function *>>;
336 
337   static void getUsesOfLDSByFunction(CallGraph const &CG, Module &M,
338                                      FunctionVariableMap &kernels,
339                                      FunctionVariableMap &functions) {
340 
341     // Get uses from the current function, excluding uses by called functions
342     // Two output variables to avoid walking the globals list twice
343     for (auto &GV : M.globals()) {
344       if (!AMDGPU::isLDSVariableToLower(GV)) {
345         continue;
346       }
347 
348       for (User *V : GV.users()) {
349         if (auto *I = dyn_cast<Instruction>(V)) {
350           Function *F = I->getFunction();
351           if (isKernelLDS(F)) {
352             kernels[F].insert(&GV);
353           } else {
354             functions[F].insert(&GV);
355           }
356         }
357       }
358     }
359   }
360 
361   struct LDSUsesInfoTy {
362     FunctionVariableMap direct_access;
363     FunctionVariableMap indirect_access;
364   };
365 
366   static LDSUsesInfoTy getTransitiveUsesOfLDS(CallGraph const &CG, Module &M) {
367 
368     FunctionVariableMap direct_map_kernel;
369     FunctionVariableMap direct_map_function;
370     getUsesOfLDSByFunction(CG, M, direct_map_kernel, direct_map_function);
371 
372     // Collect variables that are used by functions whose address has escaped
373     DenseSet<GlobalVariable *> VariablesReachableThroughFunctionPointer;
374     for (Function &F : M.functions()) {
375       if (!isKernelLDS(&F))
376         if (F.hasAddressTaken(nullptr,
377                               /* IgnoreCallbackUses */ false,
378                               /* IgnoreAssumeLikeCalls */ false,
379                               /* IgnoreLLVMUsed */ true,
380                               /* IgnoreArcAttachedCall */ false)) {
381           set_union(VariablesReachableThroughFunctionPointer,
382                     direct_map_function[&F]);
383         }
384     }
385 
386     auto functionMakesUnknownCall = [&](const Function *F) -> bool {
387       assert(!F->isDeclaration());
388       for (const CallGraphNode::CallRecord &R : *CG[F]) {
389         if (!R.second->getFunction()) {
390           return true;
391         }
392       }
393       return false;
394     };
395 
396     // Work out which variables are reachable through function calls
397     FunctionVariableMap transitive_map_function = direct_map_function;
398 
399     // If the function makes any unknown call, assume the worst case that it can
400     // access all variables accessed by functions whose address escaped
401     for (Function &F : M.functions()) {
402       if (!F.isDeclaration() && functionMakesUnknownCall(&F)) {
403         if (!isKernelLDS(&F)) {
404           set_union(transitive_map_function[&F],
405                     VariablesReachableThroughFunctionPointer);
406         }
407       }
408     }
409 
410     // Direct implementation of collecting all variables reachable from each
411     // function
412     for (Function &Func : M.functions()) {
413       if (Func.isDeclaration() || isKernelLDS(&Func))
414         continue;
415 
416       DenseSet<Function *> seen; // catches cycles
417       SmallVector<Function *, 4> wip{&Func};
418 
419       while (!wip.empty()) {
420         Function *F = wip.pop_back_val();
421 
422         // Can accelerate this by referring to transitive map for functions that
423         // have already been computed, with more care than this
424         set_union(transitive_map_function[&Func], direct_map_function[F]);
425 
426         for (const CallGraphNode::CallRecord &R : *CG[F]) {
427           Function *ith = R.second->getFunction();
428           if (ith) {
429             if (!seen.contains(ith)) {
430               seen.insert(ith);
431               wip.push_back(ith);
432             }
433           }
434         }
435       }
436     }
437 
438     // direct_map_kernel lists which variables are used by the kernel
439     // find the variables which are used through a function call
440     FunctionVariableMap indirect_map_kernel;
441 
442     for (Function &Func : M.functions()) {
443       if (Func.isDeclaration() || !isKernelLDS(&Func))
444         continue;
445 
446       for (const CallGraphNode::CallRecord &R : *CG[&Func]) {
447         Function *ith = R.second->getFunction();
448         if (ith) {
449           set_union(indirect_map_kernel[&Func], transitive_map_function[ith]);
450         } else {
451           set_union(indirect_map_kernel[&Func],
452                     VariablesReachableThroughFunctionPointer);
453         }
454       }
455     }
456 
457     // Verify that we fall into one of 2 cases:
458     //    - All variables are absolute: this is a re-run of the pass
459     //      so we don't have anything to do.
460     //    - No variables are absolute.
461     std::optional<bool> HasAbsoluteGVs;
462     for (auto &Map : {direct_map_kernel, indirect_map_kernel}) {
463       for (auto &[Fn, GVs] : Map) {
464         for (auto *GV : GVs) {
465           bool IsAbsolute = GV->isAbsoluteSymbolRef();
466           if (HasAbsoluteGVs.has_value()) {
467             if (*HasAbsoluteGVs != IsAbsolute) {
468               report_fatal_error(
469                   "Module cannot mix absolute and non-absolute LDS GVs");
470             }
471           } else
472             HasAbsoluteGVs = IsAbsolute;
473         }
474       }
475     }
476 
477     // If we only had absolute GVs, we have nothing to do, return an empty
478     // result.
479     if (HasAbsoluteGVs && *HasAbsoluteGVs)
480       return {FunctionVariableMap(), FunctionVariableMap()};
481 
482     return {std::move(direct_map_kernel), std::move(indirect_map_kernel)};
483   }
484 
485   struct LDSVariableReplacement {
486     GlobalVariable *SGV = nullptr;
487     DenseMap<GlobalVariable *, Constant *> LDSVarsToConstantGEP;
488   };
489 
490   // remap from lds global to a constantexpr gep to where it has been moved to
491   // for each kernel
492   // an array with an element for each kernel containing where the corresponding
493   // variable was remapped to
494 
495   static Constant *getAddressesOfVariablesInKernel(
496       LLVMContext &Ctx, ArrayRef<GlobalVariable *> Variables,
497       const DenseMap<GlobalVariable *, Constant *> &LDSVarsToConstantGEP) {
498     // Create a ConstantArray containing the address of each Variable within the
499     // kernel corresponding to LDSVarsToConstantGEP, or poison if that kernel
500     // does not allocate it
501     // TODO: Drop the ptrtoint conversion
502 
503     Type *I32 = Type::getInt32Ty(Ctx);
504 
505     ArrayType *KernelOffsetsType = ArrayType::get(I32, Variables.size());
506 
507     SmallVector<Constant *> Elements;
508     for (size_t i = 0; i < Variables.size(); i++) {
509       GlobalVariable *GV = Variables[i];
510       auto ConstantGepIt = LDSVarsToConstantGEP.find(GV);
511       if (ConstantGepIt != LDSVarsToConstantGEP.end()) {
512         auto elt = ConstantExpr::getPtrToInt(ConstantGepIt->second, I32);
513         Elements.push_back(elt);
514       } else {
515         Elements.push_back(PoisonValue::get(I32));
516       }
517     }
518     return ConstantArray::get(KernelOffsetsType, Elements);
519   }
520 
521   static GlobalVariable *buildLookupTable(
522       Module &M, ArrayRef<GlobalVariable *> Variables,
523       ArrayRef<Function *> kernels,
524       DenseMap<Function *, LDSVariableReplacement> &KernelToReplacement) {
525     if (Variables.empty()) {
526       return nullptr;
527     }
528     LLVMContext &Ctx = M.getContext();
529 
530     const size_t NumberVariables = Variables.size();
531     const size_t NumberKernels = kernels.size();
532 
533     ArrayType *KernelOffsetsType =
534         ArrayType::get(Type::getInt32Ty(Ctx), NumberVariables);
535 
536     ArrayType *AllKernelsOffsetsType =
537         ArrayType::get(KernelOffsetsType, NumberKernels);
538 
539     Constant *Missing = PoisonValue::get(KernelOffsetsType);
540     std::vector<Constant *> overallConstantExprElts(NumberKernels);
541     for (size_t i = 0; i < NumberKernels; i++) {
542       auto Replacement = KernelToReplacement.find(kernels[i]);
543       overallConstantExprElts[i] =
544           (Replacement == KernelToReplacement.end())
545               ? Missing
546               : getAddressesOfVariablesInKernel(
547                     Ctx, Variables, Replacement->second.LDSVarsToConstantGEP);
548     }
549 
550     Constant *init =
551         ConstantArray::get(AllKernelsOffsetsType, overallConstantExprElts);
552 
553     return new GlobalVariable(
554         M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, init,
555         "llvm.amdgcn.lds.offset.table", nullptr, GlobalValue::NotThreadLocal,
556         AMDGPUAS::CONSTANT_ADDRESS);
557   }
558 
559   void replaceUseWithTableLookup(Module &M, IRBuilder<> &Builder,
560                                  GlobalVariable *LookupTable,
561                                  GlobalVariable *GV, Use &U,
562                                  Value *OptionalIndex) {
563     // Table is a constant array of the same length as OrderedKernels
564     LLVMContext &Ctx = M.getContext();
565     Type *I32 = Type::getInt32Ty(Ctx);
566     auto *I = cast<Instruction>(U.getUser());
567 
568     Value *tableKernelIndex = getTableLookupKernelIndex(M, I->getFunction());
569 
570     if (auto *Phi = dyn_cast<PHINode>(I)) {
571       BasicBlock *BB = Phi->getIncomingBlock(U);
572       Builder.SetInsertPoint(&(*(BB->getFirstInsertionPt())));
573     } else {
574       Builder.SetInsertPoint(I);
575     }
576 
577     SmallVector<Value *, 3> GEPIdx = {
578         ConstantInt::get(I32, 0),
579         tableKernelIndex,
580     };
581     if (OptionalIndex)
582       GEPIdx.push_back(OptionalIndex);
583 
584     Value *Address = Builder.CreateInBoundsGEP(
585         LookupTable->getValueType(), LookupTable, GEPIdx, GV->getName());
586 
587     Value *loaded = Builder.CreateLoad(I32, Address);
588 
589     Value *replacement =
590         Builder.CreateIntToPtr(loaded, GV->getType(), GV->getName());
591 
592     U.set(replacement);
593   }
594 
595   void replaceUsesInInstructionsWithTableLookup(
596       Module &M, ArrayRef<GlobalVariable *> ModuleScopeVariables,
597       GlobalVariable *LookupTable) {
598 
599     LLVMContext &Ctx = M.getContext();
600     IRBuilder<> Builder(Ctx);
601     Type *I32 = Type::getInt32Ty(Ctx);
602 
603     for (size_t Index = 0; Index < ModuleScopeVariables.size(); Index++) {
604       auto *GV = ModuleScopeVariables[Index];
605 
606       for (Use &U : make_early_inc_range(GV->uses())) {
607         auto *I = dyn_cast<Instruction>(U.getUser());
608         if (!I)
609           continue;
610 
611         replaceUseWithTableLookup(M, Builder, LookupTable, GV, U,
612                                   ConstantInt::get(I32, Index));
613       }
614     }
615   }
616 
617   static DenseSet<Function *> kernelsThatIndirectlyAccessAnyOfPassedVariables(
618       Module &M, LDSUsesInfoTy &LDSUsesInfo,
619       DenseSet<GlobalVariable *> const &VariableSet) {
620 
621     DenseSet<Function *> KernelSet;
622 
623     if (VariableSet.empty())
624       return KernelSet;
625 
626     for (Function &Func : M.functions()) {
627       if (Func.isDeclaration() || !isKernelLDS(&Func))
628         continue;
629       for (GlobalVariable *GV : LDSUsesInfo.indirect_access[&Func]) {
630         if (VariableSet.contains(GV)) {
631           KernelSet.insert(&Func);
632           break;
633         }
634       }
635     }
636 
637     return KernelSet;
638   }
639 
640   static GlobalVariable *
641   chooseBestVariableForModuleStrategy(const DataLayout &DL,
642                                       VariableFunctionMap &LDSVars) {
643     // Find the global variable with the most indirect uses from kernels
644 
645     struct CandidateTy {
646       GlobalVariable *GV = nullptr;
647       size_t UserCount = 0;
648       size_t Size = 0;
649 
650       CandidateTy() = default;
651 
652       CandidateTy(GlobalVariable *GV, uint64_t UserCount, uint64_t AllocSize)
653           : GV(GV), UserCount(UserCount), Size(AllocSize) {}
654 
655       bool operator<(const CandidateTy &Other) const {
656         // Fewer users makes module scope variable less attractive
657         if (UserCount < Other.UserCount) {
658           return true;
659         }
660         if (UserCount > Other.UserCount) {
661           return false;
662         }
663 
664         // Bigger makes module scope variable less attractive
665         if (Size < Other.Size) {
666           return false;
667         }
668 
669         if (Size > Other.Size) {
670           return true;
671         }
672 
673         // Arbitrary but consistent
674         return GV->getName() < Other.GV->getName();
675       }
676     };
677 
678     CandidateTy MostUsed;
679 
680     for (auto &K : LDSVars) {
681       GlobalVariable *GV = K.first;
682       if (K.second.size() <= 1) {
683         // A variable reachable by only one kernel is best lowered with kernel
684         // strategy
685         continue;
686       }
687       CandidateTy Candidate(
688           GV, K.second.size(),
689           DL.getTypeAllocSize(GV->getValueType()).getFixedValue());
690       if (MostUsed < Candidate)
691         MostUsed = Candidate;
692     }
693 
694     return MostUsed.GV;
695   }
696 
697   static void recordLDSAbsoluteAddress(Module *M, GlobalVariable *GV,
698                                        uint32_t Address) {
699     // Write the specified address into metadata where it can be retrieved by
700     // the assembler. Format is a half open range, [Address Address+1)
701     LLVMContext &Ctx = M->getContext();
702     auto *IntTy =
703         M->getDataLayout().getIntPtrType(Ctx, AMDGPUAS::LOCAL_ADDRESS);
704     auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address));
705     auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address + 1));
706     GV->setMetadata(LLVMContext::MD_absolute_symbol,
707                     MDNode::get(Ctx, {MinC, MaxC}));
708   }
709 
710   DenseMap<Function *, Value *> tableKernelIndexCache;
711   Value *getTableLookupKernelIndex(Module &M, Function *F) {
712     // Accesses from a function use the amdgcn_lds_kernel_id intrinsic which
713     // lowers to a read from a live in register. Emit it once in the entry
714     // block to spare deduplicating it later.
715     auto [It, Inserted] = tableKernelIndexCache.try_emplace(F);
716     if (Inserted) {
717       Function *Decl =
718           Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_lds_kernel_id, {});
719 
720       auto InsertAt = F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
721       IRBuilder<> Builder(&*InsertAt);
722 
723       It->second = Builder.CreateCall(Decl, {});
724     }
725 
726     return It->second;
727   }
728 
729   static std::vector<Function *> assignLDSKernelIDToEachKernel(
730       Module *M, DenseSet<Function *> const &KernelsThatAllocateTableLDS,
731       DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS) {
732     // Associate kernels in the set with an arbirary but reproducible order and
733     // annotate them with that order in metadata. This metadata is recognised by
734     // the backend and lowered to a SGPR which can be read from using
735     // amdgcn_lds_kernel_id.
736 
737     std::vector<Function *> OrderedKernels;
738     if (!KernelsThatAllocateTableLDS.empty() ||
739         !KernelsThatIndirectlyAllocateDynamicLDS.empty()) {
740 
741       for (Function &Func : M->functions()) {
742         if (Func.isDeclaration())
743           continue;
744         if (!isKernelLDS(&Func))
745           continue;
746 
747         if (KernelsThatAllocateTableLDS.contains(&Func) ||
748             KernelsThatIndirectlyAllocateDynamicLDS.contains(&Func)) {
749           assert(Func.hasName()); // else fatal error earlier
750           OrderedKernels.push_back(&Func);
751         }
752       }
753 
754       // Put them in an arbitrary but reproducible order
755       OrderedKernels = sortByName(std::move(OrderedKernels));
756 
757       // Annotate the kernels with their order in this vector
758       LLVMContext &Ctx = M->getContext();
759       IRBuilder<> Builder(Ctx);
760 
761       if (OrderedKernels.size() > UINT32_MAX) {
762         // 32 bit keeps it in one SGPR. > 2**32 kernels won't fit on the GPU
763         report_fatal_error("Unimplemented LDS lowering for > 2**32 kernels");
764       }
765 
766       for (size_t i = 0; i < OrderedKernels.size(); i++) {
767         Metadata *AttrMDArgs[1] = {
768             ConstantAsMetadata::get(Builder.getInt32(i)),
769         };
770         OrderedKernels[i]->setMetadata("llvm.amdgcn.lds.kernel.id",
771                                        MDNode::get(Ctx, AttrMDArgs));
772       }
773     }
774     return OrderedKernels;
775   }
776 
777   static void partitionVariablesIntoIndirectStrategies(
778       Module &M, LDSUsesInfoTy const &LDSUsesInfo,
779       VariableFunctionMap &LDSToKernelsThatNeedToAccessItIndirectly,
780       DenseSet<GlobalVariable *> &ModuleScopeVariables,
781       DenseSet<GlobalVariable *> &TableLookupVariables,
782       DenseSet<GlobalVariable *> &KernelAccessVariables,
783       DenseSet<GlobalVariable *> &DynamicVariables) {
784 
785     GlobalVariable *HybridModuleRoot =
786         LoweringKindLoc != LoweringKind::hybrid
787             ? nullptr
788             : chooseBestVariableForModuleStrategy(
789                   M.getDataLayout(), LDSToKernelsThatNeedToAccessItIndirectly);
790 
791     DenseSet<Function *> const EmptySet;
792     DenseSet<Function *> const &HybridModuleRootKernels =
793         HybridModuleRoot
794             ? LDSToKernelsThatNeedToAccessItIndirectly[HybridModuleRoot]
795             : EmptySet;
796 
797     for (auto &K : LDSToKernelsThatNeedToAccessItIndirectly) {
798       // Each iteration of this loop assigns exactly one global variable to
799       // exactly one of the implementation strategies.
800 
801       GlobalVariable *GV = K.first;
802       assert(AMDGPU::isLDSVariableToLower(*GV));
803       assert(K.second.size() != 0);
804 
805       if (AMDGPU::isDynamicLDS(*GV)) {
806         DynamicVariables.insert(GV);
807         continue;
808       }
809 
810       switch (LoweringKindLoc) {
811       case LoweringKind::module:
812         ModuleScopeVariables.insert(GV);
813         break;
814 
815       case LoweringKind::table:
816         TableLookupVariables.insert(GV);
817         break;
818 
819       case LoweringKind::kernel:
820         if (K.second.size() == 1) {
821           KernelAccessVariables.insert(GV);
822         } else {
823           report_fatal_error(
824               "cannot lower LDS '" + GV->getName() +
825               "' to kernel access as it is reachable from multiple kernels");
826         }
827         break;
828 
829       case LoweringKind::hybrid: {
830         if (GV == HybridModuleRoot) {
831           assert(K.second.size() != 1);
832           ModuleScopeVariables.insert(GV);
833         } else if (K.second.size() == 1) {
834           KernelAccessVariables.insert(GV);
835         } else if (set_is_subset(K.second, HybridModuleRootKernels)) {
836           ModuleScopeVariables.insert(GV);
837         } else {
838           TableLookupVariables.insert(GV);
839         }
840         break;
841       }
842       }
843     }
844 
845     // All LDS variables accessed indirectly have now been partitioned into
846     // the distinct lowering strategies.
847     assert(ModuleScopeVariables.size() + TableLookupVariables.size() +
848                KernelAccessVariables.size() + DynamicVariables.size() ==
849            LDSToKernelsThatNeedToAccessItIndirectly.size());
850   }
851 
852   static GlobalVariable *lowerModuleScopeStructVariables(
853       Module &M, DenseSet<GlobalVariable *> const &ModuleScopeVariables,
854       DenseSet<Function *> const &KernelsThatAllocateModuleLDS) {
855     // Create a struct to hold the ModuleScopeVariables
856     // Replace all uses of those variables from non-kernel functions with the
857     // new struct instance Replace only the uses from kernel functions that will
858     // allocate this instance. That is a space optimisation - kernels that use a
859     // subset of the module scope struct and do not need to allocate it for
860     // indirect calls will only allocate the subset they use (they do so as part
861     // of the per-kernel lowering).
862     if (ModuleScopeVariables.empty()) {
863       return nullptr;
864     }
865 
866     LLVMContext &Ctx = M.getContext();
867 
868     LDSVariableReplacement ModuleScopeReplacement =
869         createLDSVariableReplacement(M, "llvm.amdgcn.module.lds",
870                                      ModuleScopeVariables);
871 
872     appendToCompilerUsed(M, {static_cast<GlobalValue *>(
873                                 ConstantExpr::getPointerBitCastOrAddrSpaceCast(
874                                     cast<Constant>(ModuleScopeReplacement.SGV),
875                                     PointerType::getUnqual(Ctx)))});
876 
877     // module.lds will be allocated at zero in any kernel that allocates it
878     recordLDSAbsoluteAddress(&M, ModuleScopeReplacement.SGV, 0);
879 
880     // historic
881     removeLocalVarsFromUsedLists(M, ModuleScopeVariables);
882 
883     // Replace all uses of module scope variable from non-kernel functions
884     replaceLDSVariablesWithStruct(
885         M, ModuleScopeVariables, ModuleScopeReplacement, [&](Use &U) {
886           Instruction *I = dyn_cast<Instruction>(U.getUser());
887           if (!I) {
888             return false;
889           }
890           Function *F = I->getFunction();
891           return !isKernelLDS(F);
892         });
893 
894     // Replace uses of module scope variable from kernel functions that
895     // allocate the module scope variable, otherwise leave them unchanged
896     // Record on each kernel whether the module scope global is used by it
897 
898     for (Function &Func : M.functions()) {
899       if (Func.isDeclaration() || !isKernelLDS(&Func))
900         continue;
901 
902       if (KernelsThatAllocateModuleLDS.contains(&Func)) {
903         replaceLDSVariablesWithStruct(
904             M, ModuleScopeVariables, ModuleScopeReplacement, [&](Use &U) {
905               Instruction *I = dyn_cast<Instruction>(U.getUser());
906               if (!I) {
907                 return false;
908               }
909               Function *F = I->getFunction();
910               return F == &Func;
911             });
912 
913         markUsedByKernel(&Func, ModuleScopeReplacement.SGV);
914       }
915     }
916 
917     return ModuleScopeReplacement.SGV;
918   }
919 
920   static DenseMap<Function *, LDSVariableReplacement>
921   lowerKernelScopeStructVariables(
922       Module &M, LDSUsesInfoTy &LDSUsesInfo,
923       DenseSet<GlobalVariable *> const &ModuleScopeVariables,
924       DenseSet<Function *> const &KernelsThatAllocateModuleLDS,
925       GlobalVariable *MaybeModuleScopeStruct) {
926 
927     // Create a struct for each kernel for the non-module-scope variables.
928 
929     DenseMap<Function *, LDSVariableReplacement> KernelToReplacement;
930     for (Function &Func : M.functions()) {
931       if (Func.isDeclaration() || !isKernelLDS(&Func))
932         continue;
933 
934       DenseSet<GlobalVariable *> KernelUsedVariables;
935       // Allocating variables that are used directly in this struct to get
936       // alignment aware allocation and predictable frame size.
937       for (auto &v : LDSUsesInfo.direct_access[&Func]) {
938         if (!AMDGPU::isDynamicLDS(*v)) {
939           KernelUsedVariables.insert(v);
940         }
941       }
942 
943       // Allocating variables that are accessed indirectly so that a lookup of
944       // this struct instance can find them from nested functions.
945       for (auto &v : LDSUsesInfo.indirect_access[&Func]) {
946         if (!AMDGPU::isDynamicLDS(*v)) {
947           KernelUsedVariables.insert(v);
948         }
949       }
950 
951       // Variables allocated in module lds must all resolve to that struct,
952       // not to the per-kernel instance.
953       if (KernelsThatAllocateModuleLDS.contains(&Func)) {
954         for (GlobalVariable *v : ModuleScopeVariables) {
955           KernelUsedVariables.erase(v);
956         }
957       }
958 
959       if (KernelUsedVariables.empty()) {
960         // Either used no LDS, or the LDS it used was all in the module struct
961         // or dynamically sized
962         continue;
963       }
964 
965       // The association between kernel function and LDS struct is done by
966       // symbol name, which only works if the function in question has a
967       // name This is not expected to be a problem in practice as kernels
968       // are called by name making anonymous ones (which are named by the
969       // backend) difficult to use. This does mean that llvm test cases need
970       // to name the kernels.
971       if (!Func.hasName()) {
972         report_fatal_error("Anonymous kernels cannot use LDS variables");
973       }
974 
975       std::string VarName =
976           (Twine("llvm.amdgcn.kernel.") + Func.getName() + ".lds").str();
977 
978       auto Replacement =
979           createLDSVariableReplacement(M, VarName, KernelUsedVariables);
980 
981       // If any indirect uses, create a direct use to ensure allocation
982       // TODO: Simpler to unconditionally mark used but that regresses
983       // codegen in test/CodeGen/AMDGPU/noclobber-barrier.ll
984       auto Accesses = LDSUsesInfo.indirect_access.find(&Func);
985       if ((Accesses != LDSUsesInfo.indirect_access.end()) &&
986           !Accesses->second.empty())
987         markUsedByKernel(&Func, Replacement.SGV);
988 
989       // remove preserves existing codegen
990       removeLocalVarsFromUsedLists(M, KernelUsedVariables);
991       KernelToReplacement[&Func] = Replacement;
992 
993       // Rewrite uses within kernel to the new struct
994       replaceLDSVariablesWithStruct(
995           M, KernelUsedVariables, Replacement, [&Func](Use &U) {
996             Instruction *I = dyn_cast<Instruction>(U.getUser());
997             return I && I->getFunction() == &Func;
998           });
999     }
1000     return KernelToReplacement;
1001   }
1002 
1003   static GlobalVariable *
1004   buildRepresentativeDynamicLDSInstance(Module &M, LDSUsesInfoTy &LDSUsesInfo,
1005                                         Function *func) {
1006     // Create a dynamic lds variable with a name associated with the passed
1007     // function that has the maximum alignment of any dynamic lds variable
1008     // reachable from this kernel. Dynamic LDS is allocated after the static LDS
1009     // allocation, possibly after alignment padding. The representative variable
1010     // created here has the maximum alignment of any other dynamic variable
1011     // reachable by that kernel. All dynamic LDS variables are allocated at the
1012     // same address in each kernel in order to provide the documented aliasing
1013     // semantics. Setting the alignment here allows this IR pass to accurately
1014     // predict the exact constant at which it will be allocated.
1015 
1016     assert(isKernelLDS(func));
1017 
1018     LLVMContext &Ctx = M.getContext();
1019     const DataLayout &DL = M.getDataLayout();
1020     Align MaxDynamicAlignment(1);
1021 
1022     auto UpdateMaxAlignment = [&MaxDynamicAlignment, &DL](GlobalVariable *GV) {
1023       if (AMDGPU::isDynamicLDS(*GV)) {
1024         MaxDynamicAlignment =
1025             std::max(MaxDynamicAlignment, AMDGPU::getAlign(DL, GV));
1026       }
1027     };
1028 
1029     for (GlobalVariable *GV : LDSUsesInfo.indirect_access[func]) {
1030       UpdateMaxAlignment(GV);
1031     }
1032 
1033     for (GlobalVariable *GV : LDSUsesInfo.direct_access[func]) {
1034       UpdateMaxAlignment(GV);
1035     }
1036 
1037     assert(func->hasName()); // Checked by caller
1038     auto emptyCharArray = ArrayType::get(Type::getInt8Ty(Ctx), 0);
1039     GlobalVariable *N = new GlobalVariable(
1040         M, emptyCharArray, false, GlobalValue::ExternalLinkage, nullptr,
1041         Twine("llvm.amdgcn." + func->getName() + ".dynlds"), nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
1042         false);
1043     N->setAlignment(MaxDynamicAlignment);
1044 
1045     assert(AMDGPU::isDynamicLDS(*N));
1046     return N;
1047   }
1048 
1049   /// Strip "amdgpu-no-lds-kernel-id" from any functions where we may have
1050   /// introduced its use. If AMDGPUAttributor ran prior to the pass, we inferred
1051   /// the lack of llvm.amdgcn.lds.kernel.id calls.
1052   void removeNoLdsKernelIdFromReachable(CallGraph &CG, Function *KernelRoot) {
1053     KernelRoot->removeFnAttr("amdgpu-no-lds-kernel-id");
1054 
1055     SmallVector<Function *> Tmp({CG[KernelRoot]->getFunction()});
1056     if (!Tmp.back())
1057       return;
1058 
1059     SmallPtrSet<Function *, 8> Visited;
1060     bool SeenUnknownCall = false;
1061 
1062     do {
1063       Function *F = Tmp.pop_back_val();
1064 
1065       for (auto &N : *CG[F]) {
1066         if (!N.second)
1067           continue;
1068 
1069         Function *Callee = N.second->getFunction();
1070         if (!Callee) {
1071           if (!SeenUnknownCall) {
1072             SeenUnknownCall = true;
1073 
1074             // If we see any indirect calls, assume nothing about potential
1075             // targets.
1076             // TODO: This could be refined to possible LDS global users.
1077             for (auto &N : *CG.getExternalCallingNode()) {
1078               Function *PotentialCallee = N.second->getFunction();
1079               if (!isKernelLDS(PotentialCallee))
1080                 PotentialCallee->removeFnAttr("amdgpu-no-lds-kernel-id");
1081             }
1082 
1083             continue;
1084           }
1085         }
1086 
1087         Callee->removeFnAttr("amdgpu-no-lds-kernel-id");
1088         if (Visited.insert(Callee).second)
1089           Tmp.push_back(Callee);
1090       }
1091     } while (!Tmp.empty());
1092   }
1093 
1094   DenseMap<Function *, GlobalVariable *> lowerDynamicLDSVariables(
1095       Module &M, LDSUsesInfoTy &LDSUsesInfo,
1096       DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS,
1097       DenseSet<GlobalVariable *> const &DynamicVariables,
1098       std::vector<Function *> const &OrderedKernels) {
1099     DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS;
1100     if (!KernelsThatIndirectlyAllocateDynamicLDS.empty()) {
1101       LLVMContext &Ctx = M.getContext();
1102       IRBuilder<> Builder(Ctx);
1103       Type *I32 = Type::getInt32Ty(Ctx);
1104 
1105       std::vector<Constant *> newDynamicLDS;
1106 
1107       // Table is built in the same order as OrderedKernels
1108       for (auto &func : OrderedKernels) {
1109 
1110         if (KernelsThatIndirectlyAllocateDynamicLDS.contains(func)) {
1111           assert(isKernelLDS(func));
1112           if (!func->hasName()) {
1113             report_fatal_error("Anonymous kernels cannot use LDS variables");
1114           }
1115 
1116           GlobalVariable *N =
1117               buildRepresentativeDynamicLDSInstance(M, LDSUsesInfo, func);
1118 
1119           KernelToCreatedDynamicLDS[func] = N;
1120 
1121           markUsedByKernel(func, N);
1122 
1123           auto emptyCharArray = ArrayType::get(Type::getInt8Ty(Ctx), 0);
1124           auto GEP = ConstantExpr::getGetElementPtr(
1125               emptyCharArray, N, ConstantInt::get(I32, 0), true);
1126           newDynamicLDS.push_back(ConstantExpr::getPtrToInt(GEP, I32));
1127         } else {
1128           newDynamicLDS.push_back(PoisonValue::get(I32));
1129         }
1130       }
1131       assert(OrderedKernels.size() == newDynamicLDS.size());
1132 
1133       ArrayType *t = ArrayType::get(I32, newDynamicLDS.size());
1134       Constant *init = ConstantArray::get(t, newDynamicLDS);
1135       GlobalVariable *table = new GlobalVariable(
1136           M, t, true, GlobalValue::InternalLinkage, init,
1137           "llvm.amdgcn.dynlds.offset.table", nullptr,
1138           GlobalValue::NotThreadLocal, AMDGPUAS::CONSTANT_ADDRESS);
1139 
1140       for (GlobalVariable *GV : DynamicVariables) {
1141         for (Use &U : make_early_inc_range(GV->uses())) {
1142           auto *I = dyn_cast<Instruction>(U.getUser());
1143           if (!I)
1144             continue;
1145           if (isKernelLDS(I->getFunction()))
1146             continue;
1147 
1148           replaceUseWithTableLookup(M, Builder, table, GV, U, nullptr);
1149         }
1150       }
1151     }
1152     return KernelToCreatedDynamicLDS;
1153   }
1154 
1155   bool runOnModule(Module &M) {
1156     CallGraph CG = CallGraph(M);
1157     bool Changed = superAlignLDSGlobals(M);
1158 
1159     Changed |= eliminateConstantExprUsesOfLDSFromAllInstructions(M);
1160 
1161     Changed = true; // todo: narrow this down
1162 
1163     // For each kernel, what variables does it access directly or through
1164     // callees
1165     LDSUsesInfoTy LDSUsesInfo = getTransitiveUsesOfLDS(CG, M);
1166 
1167     // For each variable accessed through callees, which kernels access it
1168     VariableFunctionMap LDSToKernelsThatNeedToAccessItIndirectly;
1169     for (auto &K : LDSUsesInfo.indirect_access) {
1170       Function *F = K.first;
1171       assert(isKernelLDS(F));
1172       for (GlobalVariable *GV : K.second) {
1173         LDSToKernelsThatNeedToAccessItIndirectly[GV].insert(F);
1174       }
1175     }
1176 
1177     // Partition variables accessed indirectly into the different strategies
1178     DenseSet<GlobalVariable *> ModuleScopeVariables;
1179     DenseSet<GlobalVariable *> TableLookupVariables;
1180     DenseSet<GlobalVariable *> KernelAccessVariables;
1181     DenseSet<GlobalVariable *> DynamicVariables;
1182     partitionVariablesIntoIndirectStrategies(
1183         M, LDSUsesInfo, LDSToKernelsThatNeedToAccessItIndirectly,
1184         ModuleScopeVariables, TableLookupVariables, KernelAccessVariables,
1185         DynamicVariables);
1186 
1187     // If the kernel accesses a variable that is going to be stored in the
1188     // module instance through a call then that kernel needs to allocate the
1189     // module instance
1190     const DenseSet<Function *> KernelsThatAllocateModuleLDS =
1191         kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
1192                                                         ModuleScopeVariables);
1193     const DenseSet<Function *> KernelsThatAllocateTableLDS =
1194         kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
1195                                                         TableLookupVariables);
1196 
1197     const DenseSet<Function *> KernelsThatIndirectlyAllocateDynamicLDS =
1198         kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
1199                                                         DynamicVariables);
1200 
1201     GlobalVariable *MaybeModuleScopeStruct = lowerModuleScopeStructVariables(
1202         M, ModuleScopeVariables, KernelsThatAllocateModuleLDS);
1203 
1204     DenseMap<Function *, LDSVariableReplacement> KernelToReplacement =
1205         lowerKernelScopeStructVariables(M, LDSUsesInfo, ModuleScopeVariables,
1206                                         KernelsThatAllocateModuleLDS,
1207                                         MaybeModuleScopeStruct);
1208 
1209     // Lower zero cost accesses to the kernel instances just created
1210     for (auto &GV : KernelAccessVariables) {
1211       auto &funcs = LDSToKernelsThatNeedToAccessItIndirectly[GV];
1212       assert(funcs.size() == 1); // Only one kernel can access it
1213       LDSVariableReplacement Replacement =
1214           KernelToReplacement[*(funcs.begin())];
1215 
1216       DenseSet<GlobalVariable *> Vec;
1217       Vec.insert(GV);
1218 
1219       replaceLDSVariablesWithStruct(M, Vec, Replacement, [](Use &U) {
1220         return isa<Instruction>(U.getUser());
1221       });
1222     }
1223 
1224     // The ith element of this vector is kernel id i
1225     std::vector<Function *> OrderedKernels =
1226         assignLDSKernelIDToEachKernel(&M, KernelsThatAllocateTableLDS,
1227                                       KernelsThatIndirectlyAllocateDynamicLDS);
1228 
1229     if (!KernelsThatAllocateTableLDS.empty()) {
1230       LLVMContext &Ctx = M.getContext();
1231       IRBuilder<> Builder(Ctx);
1232 
1233       // The order must be consistent between lookup table and accesses to
1234       // lookup table
1235       auto TableLookupVariablesOrdered =
1236           sortByName(std::vector<GlobalVariable *>(TableLookupVariables.begin(),
1237                                                    TableLookupVariables.end()));
1238 
1239       GlobalVariable *LookupTable = buildLookupTable(
1240           M, TableLookupVariablesOrdered, OrderedKernels, KernelToReplacement);
1241       replaceUsesInInstructionsWithTableLookup(M, TableLookupVariablesOrdered,
1242                                                LookupTable);
1243 
1244       // Strip amdgpu-no-lds-kernel-id from all functions reachable from the
1245       // kernel. We may have inferred this wasn't used prior to the pass.
1246       //
1247       // TODO: We could filter out subgraphs that do not access LDS globals.
1248       for (Function *F : KernelsThatAllocateTableLDS)
1249         removeNoLdsKernelIdFromReachable(CG, F);
1250     }
1251 
1252     DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS =
1253         lowerDynamicLDSVariables(M, LDSUsesInfo,
1254                                  KernelsThatIndirectlyAllocateDynamicLDS,
1255                                  DynamicVariables, OrderedKernels);
1256 
1257     // All kernel frames have been allocated. Calculate and record the
1258     // addresses.
1259     {
1260       const DataLayout &DL = M.getDataLayout();
1261 
1262       for (Function &Func : M.functions()) {
1263         if (Func.isDeclaration() || !isKernelLDS(&Func))
1264           continue;
1265 
1266         // All three of these are optional. The first variable is allocated at
1267         // zero. They are allocated by AMDGPUMachineFunction as one block.
1268         // Layout:
1269         //{
1270         //  module.lds
1271         //  alignment padding
1272         //  kernel instance
1273         //  alignment padding
1274         //  dynamic lds variables
1275         //}
1276 
1277         const bool AllocateModuleScopeStruct =
1278             MaybeModuleScopeStruct &&
1279             KernelsThatAllocateModuleLDS.contains(&Func);
1280 
1281         auto Replacement = KernelToReplacement.find(&Func);
1282         const bool AllocateKernelScopeStruct =
1283             Replacement != KernelToReplacement.end();
1284 
1285         const bool AllocateDynamicVariable =
1286             KernelToCreatedDynamicLDS.contains(&Func);
1287 
1288         uint32_t Offset = 0;
1289 
1290         if (AllocateModuleScopeStruct) {
1291           // Allocated at zero, recorded once on construction, not once per
1292           // kernel
1293           Offset += DL.getTypeAllocSize(MaybeModuleScopeStruct->getValueType());
1294         }
1295 
1296         if (AllocateKernelScopeStruct) {
1297           GlobalVariable *KernelStruct = Replacement->second.SGV;
1298           Offset = alignTo(Offset, AMDGPU::getAlign(DL, KernelStruct));
1299           recordLDSAbsoluteAddress(&M, KernelStruct, Offset);
1300           Offset += DL.getTypeAllocSize(KernelStruct->getValueType());
1301         }
1302 
1303         // If there is dynamic allocation, the alignment needed is included in
1304         // the static frame size. There may be no reference to the dynamic
1305         // variable in the kernel itself, so without including it here, that
1306         // alignment padding could be missed.
1307         if (AllocateDynamicVariable) {
1308           GlobalVariable *DynamicVariable = KernelToCreatedDynamicLDS[&Func];
1309           Offset = alignTo(Offset, AMDGPU::getAlign(DL, DynamicVariable));
1310           recordLDSAbsoluteAddress(&M, DynamicVariable, Offset);
1311         }
1312 
1313         if (Offset != 0) {
1314           (void)TM; // TODO: Account for target maximum LDS
1315           std::string Buffer;
1316           raw_string_ostream SS{Buffer};
1317           SS << format("%u", Offset);
1318 
1319           // Instead of explictly marking kernels that access dynamic variables
1320           // using special case metadata, annotate with min-lds == max-lds, i.e.
1321           // that there is no more space available for allocating more static
1322           // LDS variables. That is the right condition to prevent allocating
1323           // more variables which would collide with the addresses assigned to
1324           // dynamic variables.
1325           if (AllocateDynamicVariable)
1326             SS << format(",%u", Offset);
1327 
1328           Func.addFnAttr("amdgpu-lds-size", Buffer);
1329         }
1330       }
1331     }
1332 
1333     for (auto &GV : make_early_inc_range(M.globals()))
1334       if (AMDGPU::isLDSVariableToLower(GV)) {
1335         // probably want to remove from used lists
1336         GV.removeDeadConstantUsers();
1337         if (GV.use_empty())
1338           GV.eraseFromParent();
1339       }
1340 
1341     return Changed;
1342   }
1343 
1344 private:
1345   // Increase the alignment of LDS globals if necessary to maximise the chance
1346   // that we can use aligned LDS instructions to access them.
1347   static bool superAlignLDSGlobals(Module &M) {
1348     const DataLayout &DL = M.getDataLayout();
1349     bool Changed = false;
1350     if (!SuperAlignLDSGlobals) {
1351       return Changed;
1352     }
1353 
1354     for (auto &GV : M.globals()) {
1355       if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) {
1356         // Only changing alignment of LDS variables
1357         continue;
1358       }
1359       if (!GV.hasInitializer()) {
1360         // cuda/hip extern __shared__ variable, leave alignment alone
1361         continue;
1362       }
1363 
1364       Align Alignment = AMDGPU::getAlign(DL, &GV);
1365       TypeSize GVSize = DL.getTypeAllocSize(GV.getValueType());
1366 
1367       if (GVSize > 8) {
1368         // We might want to use a b96 or b128 load/store
1369         Alignment = std::max(Alignment, Align(16));
1370       } else if (GVSize > 4) {
1371         // We might want to use a b64 load/store
1372         Alignment = std::max(Alignment, Align(8));
1373       } else if (GVSize > 2) {
1374         // We might want to use a b32 load/store
1375         Alignment = std::max(Alignment, Align(4));
1376       } else if (GVSize > 1) {
1377         // We might want to use a b16 load/store
1378         Alignment = std::max(Alignment, Align(2));
1379       }
1380 
1381       if (Alignment != AMDGPU::getAlign(DL, &GV)) {
1382         Changed = true;
1383         GV.setAlignment(Alignment);
1384       }
1385     }
1386     return Changed;
1387   }
1388 
1389   static LDSVariableReplacement createLDSVariableReplacement(
1390       Module &M, std::string VarName,
1391       DenseSet<GlobalVariable *> const &LDSVarsToTransform) {
1392     // Create a struct instance containing LDSVarsToTransform and map from those
1393     // variables to ConstantExprGEP
1394     // Variables may be introduced to meet alignment requirements. No aliasing
1395     // metadata is useful for these as they have no uses. Erased before return.
1396 
1397     LLVMContext &Ctx = M.getContext();
1398     const DataLayout &DL = M.getDataLayout();
1399     assert(!LDSVarsToTransform.empty());
1400 
1401     SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
1402     LayoutFields.reserve(LDSVarsToTransform.size());
1403     {
1404       // The order of fields in this struct depends on the order of
1405       // varables in the argument which varies when changing how they
1406       // are identified, leading to spurious test breakage.
1407       auto Sorted = sortByName(std::vector<GlobalVariable *>(
1408           LDSVarsToTransform.begin(), LDSVarsToTransform.end()));
1409 
1410       for (GlobalVariable *GV : Sorted) {
1411         OptimizedStructLayoutField F(GV,
1412                                      DL.getTypeAllocSize(GV->getValueType()),
1413                                      AMDGPU::getAlign(DL, GV));
1414         LayoutFields.emplace_back(F);
1415       }
1416     }
1417 
1418     performOptimizedStructLayout(LayoutFields);
1419 
1420     std::vector<GlobalVariable *> LocalVars;
1421     BitVector IsPaddingField;
1422     LocalVars.reserve(LDSVarsToTransform.size()); // will be at least this large
1423     IsPaddingField.reserve(LDSVarsToTransform.size());
1424     {
1425       uint64_t CurrentOffset = 0;
1426       for (size_t I = 0; I < LayoutFields.size(); I++) {
1427         GlobalVariable *FGV = static_cast<GlobalVariable *>(
1428             const_cast<void *>(LayoutFields[I].Id));
1429         Align DataAlign = LayoutFields[I].Alignment;
1430 
1431         uint64_t DataAlignV = DataAlign.value();
1432         if (uint64_t Rem = CurrentOffset % DataAlignV) {
1433           uint64_t Padding = DataAlignV - Rem;
1434 
1435           // Append an array of padding bytes to meet alignment requested
1436           // Note (o +      (a - (o % a)) ) % a == 0
1437           //      (offset + Padding       ) % align == 0
1438 
1439           Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding);
1440           LocalVars.push_back(new GlobalVariable(
1441               M, ATy, false, GlobalValue::InternalLinkage,
1442               PoisonValue::get(ATy), "", nullptr, GlobalValue::NotThreadLocal,
1443               AMDGPUAS::LOCAL_ADDRESS, false));
1444           IsPaddingField.push_back(true);
1445           CurrentOffset += Padding;
1446         }
1447 
1448         LocalVars.push_back(FGV);
1449         IsPaddingField.push_back(false);
1450         CurrentOffset += LayoutFields[I].Size;
1451       }
1452     }
1453 
1454     std::vector<Type *> LocalVarTypes;
1455     LocalVarTypes.reserve(LocalVars.size());
1456     std::transform(
1457         LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes),
1458         [](const GlobalVariable *V) -> Type * { return V->getValueType(); });
1459 
1460     StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t");
1461 
1462     Align StructAlign = AMDGPU::getAlign(DL, LocalVars[0]);
1463 
1464     GlobalVariable *SGV = new GlobalVariable(
1465         M, LDSTy, false, GlobalValue::InternalLinkage, PoisonValue::get(LDSTy),
1466         VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
1467         false);
1468     SGV->setAlignment(StructAlign);
1469 
1470     DenseMap<GlobalVariable *, Constant *> Map;
1471     Type *I32 = Type::getInt32Ty(Ctx);
1472     for (size_t I = 0; I < LocalVars.size(); I++) {
1473       GlobalVariable *GV = LocalVars[I];
1474       Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)};
1475       Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx, true);
1476       if (IsPaddingField[I]) {
1477         assert(GV->use_empty());
1478         GV->eraseFromParent();
1479       } else {
1480         Map[GV] = GEP;
1481       }
1482     }
1483     assert(Map.size() == LDSVarsToTransform.size());
1484     return {SGV, std::move(Map)};
1485   }
1486 
1487   template <typename PredicateTy>
1488   static void replaceLDSVariablesWithStruct(
1489       Module &M, DenseSet<GlobalVariable *> const &LDSVarsToTransformArg,
1490       const LDSVariableReplacement &Replacement, PredicateTy Predicate) {
1491     LLVMContext &Ctx = M.getContext();
1492     const DataLayout &DL = M.getDataLayout();
1493 
1494     // A hack... we need to insert the aliasing info in a predictable order for
1495     // lit tests. Would like to have them in a stable order already, ideally the
1496     // same order they get allocated, which might mean an ordered set container
1497     auto LDSVarsToTransform = sortByName(std::vector<GlobalVariable *>(
1498         LDSVarsToTransformArg.begin(), LDSVarsToTransformArg.end()));
1499 
1500     // Create alias.scope and their lists. Each field in the new structure
1501     // does not alias with all other fields.
1502     SmallVector<MDNode *> AliasScopes;
1503     SmallVector<Metadata *> NoAliasList;
1504     const size_t NumberVars = LDSVarsToTransform.size();
1505     if (NumberVars > 1) {
1506       MDBuilder MDB(Ctx);
1507       AliasScopes.reserve(NumberVars);
1508       MDNode *Domain = MDB.createAnonymousAliasScopeDomain();
1509       for (size_t I = 0; I < NumberVars; I++) {
1510         MDNode *Scope = MDB.createAnonymousAliasScope(Domain);
1511         AliasScopes.push_back(Scope);
1512       }
1513       NoAliasList.append(&AliasScopes[1], AliasScopes.end());
1514     }
1515 
1516     // Replace uses of ith variable with a constantexpr to the corresponding
1517     // field of the instance that will be allocated by AMDGPUMachineFunction
1518     for (size_t I = 0; I < NumberVars; I++) {
1519       GlobalVariable *GV = LDSVarsToTransform[I];
1520       Constant *GEP = Replacement.LDSVarsToConstantGEP.at(GV);
1521 
1522       GV->replaceUsesWithIf(GEP, Predicate);
1523 
1524       APInt APOff(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
1525       GEP->stripAndAccumulateInBoundsConstantOffsets(DL, APOff);
1526       uint64_t Offset = APOff.getZExtValue();
1527 
1528       Align A =
1529           commonAlignment(Replacement.SGV->getAlign().valueOrOne(), Offset);
1530 
1531       if (I)
1532         NoAliasList[I - 1] = AliasScopes[I - 1];
1533       MDNode *NoAlias =
1534           NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList);
1535       MDNode *AliasScope =
1536           AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]});
1537 
1538       refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias);
1539     }
1540   }
1541 
1542   static void refineUsesAlignmentAndAA(Value *Ptr, Align A,
1543                                        const DataLayout &DL, MDNode *AliasScope,
1544                                        MDNode *NoAlias, unsigned MaxDepth = 5) {
1545     if (!MaxDepth || (A == 1 && !AliasScope))
1546       return;
1547 
1548     for (User *U : Ptr->users()) {
1549       if (auto *I = dyn_cast<Instruction>(U)) {
1550         if (AliasScope && I->mayReadOrWriteMemory()) {
1551           MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope);
1552           AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope)
1553                    : AliasScope);
1554           I->setMetadata(LLVMContext::MD_alias_scope, AS);
1555 
1556           MDNode *NA = I->getMetadata(LLVMContext::MD_noalias);
1557           NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias);
1558           I->setMetadata(LLVMContext::MD_noalias, NA);
1559         }
1560       }
1561 
1562       if (auto *LI = dyn_cast<LoadInst>(U)) {
1563         LI->setAlignment(std::max(A, LI->getAlign()));
1564         continue;
1565       }
1566       if (auto *SI = dyn_cast<StoreInst>(U)) {
1567         if (SI->getPointerOperand() == Ptr)
1568           SI->setAlignment(std::max(A, SI->getAlign()));
1569         continue;
1570       }
1571       if (auto *AI = dyn_cast<AtomicRMWInst>(U)) {
1572         // None of atomicrmw operations can work on pointers, but let's
1573         // check it anyway in case it will or we will process ConstantExpr.
1574         if (AI->getPointerOperand() == Ptr)
1575           AI->setAlignment(std::max(A, AI->getAlign()));
1576         continue;
1577       }
1578       if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) {
1579         if (AI->getPointerOperand() == Ptr)
1580           AI->setAlignment(std::max(A, AI->getAlign()));
1581         continue;
1582       }
1583       if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
1584         unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1585         APInt Off(BitWidth, 0);
1586         if (GEP->getPointerOperand() == Ptr) {
1587           Align GA;
1588           if (GEP->accumulateConstantOffset(DL, Off))
1589             GA = commonAlignment(A, Off.getLimitedValue());
1590           refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias,
1591                                    MaxDepth - 1);
1592         }
1593         continue;
1594       }
1595       if (auto *I = dyn_cast<Instruction>(U)) {
1596         if (I->getOpcode() == Instruction::BitCast ||
1597             I->getOpcode() == Instruction::AddrSpaceCast)
1598           refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1);
1599       }
1600     }
1601   }
1602 };
1603 
1604 class AMDGPULowerModuleLDSLegacy : public ModulePass {
1605 public:
1606   const AMDGPUTargetMachine *TM;
1607   static char ID;
1608 
1609   AMDGPULowerModuleLDSLegacy(const AMDGPUTargetMachine *TM_ = nullptr)
1610       : ModulePass(ID), TM(TM_) {
1611     initializeAMDGPULowerModuleLDSLegacyPass(*PassRegistry::getPassRegistry());
1612   }
1613 
1614   void getAnalysisUsage(AnalysisUsage &AU) const override {
1615     if (!TM)
1616       AU.addRequired<TargetPassConfig>();
1617   }
1618 
1619   bool runOnModule(Module &M) override {
1620     if (!TM) {
1621       auto &TPC = getAnalysis<TargetPassConfig>();
1622       TM = &TPC.getTM<AMDGPUTargetMachine>();
1623     }
1624 
1625     return AMDGPULowerModuleLDS(*TM).runOnModule(M);
1626   }
1627 };
1628 
1629 } // namespace
1630 char AMDGPULowerModuleLDSLegacy::ID = 0;
1631 
1632 char &llvm::AMDGPULowerModuleLDSLegacyPassID = AMDGPULowerModuleLDSLegacy::ID;
1633 
1634 INITIALIZE_PASS_BEGIN(AMDGPULowerModuleLDSLegacy, DEBUG_TYPE,
1635                       "Lower uses of LDS variables from non-kernel functions",
1636                       false, false)
1637 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
1638 INITIALIZE_PASS_END(AMDGPULowerModuleLDSLegacy, DEBUG_TYPE,
1639                     "Lower uses of LDS variables from non-kernel functions",
1640                     false, false)
1641 
1642 ModulePass *
1643 llvm::createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM) {
1644   return new AMDGPULowerModuleLDSLegacy(TM);
1645 }
1646 
1647 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M,
1648                                                 ModuleAnalysisManager &) {
1649   return AMDGPULowerModuleLDS(TM).runOnModule(M) ? PreservedAnalyses::none()
1650                                                  : PreservedAnalyses::all();
1651 }
1652