xref: /llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp (revision 70bb5d2b9d81d1cf70ea696fbf7511d7e0811bbe)
1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass eliminates local data store, LDS, uses from non-kernel functions.
10 // LDS is contiguous memory allocated per kernel execution.
11 //
12 // Background.
13 //
14 // The programming model is global variables, or equivalently function local
15 // static variables, accessible from kernels or other functions. For uses from
16 // kernels this is straightforward - assign an integer to the kernel for the
17 // memory required by all the variables combined, allocate them within that.
18 // For uses from functions there are performance tradeoffs to choose between.
19 //
20 // This model means the GPU runtime can specify the amount of memory allocated.
21 // If this is more than the kernel assumed, the excess can be made available
22 // using a language specific feature, which IR represents as a variable with
23 // no initializer. This feature is referred to here as "Dynamic LDS" and is
24 // lowered slightly differently to the normal case.
25 //
26 // Consequences of this GPU feature:
27 // - memory is limited and exceeding it halts compilation
28 // - a global accessed by one kernel exists independent of other kernels
29 // - a global exists independent of simultaneous execution of the same kernel
30 // - the address of the global may be different from different kernels as they
31 //   do not alias, which permits only allocating variables they use
32 // - if the address is allowed to differ, functions need help to find it
33 //
34 // Uses from kernels are implemented here by grouping them in a per-kernel
35 // struct instance. This duplicates the variables, accurately modelling their
36 // aliasing properties relative to a single global representation. It also
37 // permits control over alignment via padding.
38 //
39 // Uses from functions are more complicated and the primary purpose of this
40 // IR pass. Several different lowering are chosen between to meet requirements
41 // to avoid allocating any LDS where it is not necessary, as that impacts
42 // occupancy and may fail the compilation, while not imposing overhead on a
43 // feature whose primary advantage over global memory is performance. The basic
44 // design goal is to avoid one kernel imposing overhead on another.
45 //
46 // Implementation.
47 //
48 // LDS variables with constant annotation or non-undef initializer are passed
49 // through unchanged for simplification or error diagnostics in later passes.
50 // Non-undef initializers are not yet implemented for LDS.
51 //
52 // LDS variables that are always allocated at the same address can be found
53 // by lookup at that address. Otherwise runtime information/cost is required.
54 //
55 // The simplest strategy possible is to group all LDS variables in a single
56 // struct and allocate that struct in every kernel such that the original
57 // variables are always at the same address. LDS is however a limited resource
58 // so this strategy is unusable in practice. It is not implemented here.
59 //
60 // Strategy | Precise allocation | Zero runtime cost | General purpose |
61 //  --------+--------------------+-------------------+-----------------+
62 //   Module |                 No |               Yes |             Yes |
63 //    Table |                Yes |                No |             Yes |
64 //   Kernel |                Yes |               Yes |              No |
65 //   Hybrid |                Yes |           Partial |             Yes |
66 //
67 // "Module" spends LDS memory to save cycles. "Table" spends cycles and global
68 // memory to save LDS. "Kernel" is as fast as kernel allocation but only works
69 // for variables that are known reachable from a single kernel. "Hybrid" picks
70 // between all three. When forced to choose between LDS and cycles we minimise
71 // LDS use.
72 
73 // The "module" lowering implemented here finds LDS variables which are used by
74 // non-kernel functions and creates a new struct with a field for each of those
75 // LDS variables. Variables that are only used from kernels are excluded.
76 // Kernels that do not use this struct are annoteated with the attribute
77 // amdgpu-elide-module-lds which allows the back end to elide the allocation.
78 //
79 // The "table" lowering implemented here has three components.
80 // First kernels are assigned a unique integer identifier which is available in
81 // functions it calls through the intrinsic amdgcn_lds_kernel_id. The integer
82 // is passed through a specific SGPR, thus works with indirect calls.
83 // Second, each kernel allocates LDS variables independent of other kernels and
84 // writes the addresses it chose for each variable into an array in consistent
85 // order. If the kernel does not allocate a given variable, it writes undef to
86 // the corresponding array location. These arrays are written to a constant
87 // table in the order matching the kernel unique integer identifier.
88 // Third, uses from non-kernel functions are replaced with a table lookup using
89 // the intrinsic function to find the address of the variable.
90 //
91 // "Kernel" lowering is only applicable for variables that are unambiguously
92 // reachable from exactly one kernel. For those cases, accesses to the variable
93 // can be lowered to ConstantExpr address of a struct instance specific to that
94 // one kernel. This is zero cost in space and in compute. It will raise a fatal
95 // error on any variable that might be reachable from multiple kernels and is
96 // thus most easily used as part of the hybrid lowering strategy.
97 //
98 // Hybrid lowering is a mixture of the above. It uses the zero cost kernel
99 // lowering where it can. It lowers the variable accessed by the greatest
100 // number of kernels using the module strategy as that is free for the first
101 // variable. Any futher variables that can be lowered with the module strategy
102 // without incurring LDS memory overhead are. The remaining ones are lowered
103 // via table.
104 //
105 // Consequences
106 // - No heuristics or user controlled magic numbers, hybrid is the right choice
107 // - Kernels that don't use functions (or have had them all inlined) are not
108 //   affected by any lowering for kernels that do.
109 // - Kernels that don't make indirect function calls are not affected by those
110 //   that do.
111 // - Variables which are used by lots of kernels, e.g. those injected by a
112 //   language runtime in most kernels, are expected to have no overhead
113 // - Implementations that instantiate templates per-kernel where those templates
114 //   use LDS are expected to hit the "Kernel" lowering strategy
115 // - The runtime properties impose a cost in compiler implementation complexity
116 //
117 // Dynamic LDS implementation
118 // Dynamic LDS is lowered similarly to the "table" strategy above and uses the
119 // same intrinsic to identify which kernel is at the root of the dynamic call
120 // graph. This relies on the specified behaviour that all dynamic LDS variables
121 // alias one another, i.e. are at the same address, with respect to a given
122 // kernel. Therefore this pass creates new dynamic LDS variables for each kernel
123 // that allocates any dynamic LDS and builds a table of addresses out of those.
124 // The AMDGPUPromoteAlloca pass skips kernels that use dynamic LDS.
125 // The corresponding optimisation for "kernel" lowering where the table lookup
126 // is elided is not implemented.
127 //
128 //
129 // Implementation notes / limitations
130 // A single LDS global variable represents an instance per kernel that can reach
131 // said variables. This pass essentially specialises said variables per kernel.
132 // Handling ConstantExpr during the pass complicated this significantly so now
133 // all ConstantExpr uses of LDS variables are expanded to instructions. This
134 // may need amending when implementing non-undef initialisers.
135 //
136 // Lowering is split between this IR pass and the back end. This pass chooses
137 // where given variables should be allocated and marks them with metadata,
138 // MD_absolute_symbol. The backend places the variables in coincidentally the
139 // same location and raises a fatal error if something has gone awry. This works
140 // in practice because the only pass between this one and the backend that
141 // changes LDS is PromoteAlloca and the changes it makes do not conflict.
142 //
143 // Addresses are written to constant global arrays based on the same metadata.
144 //
145 // The backend lowers LDS variables in the order of traversal of the function.
146 // This is at odds with the deterministic layout required. The workaround is to
147 // allocate the fixed-address variables immediately upon starting the function
148 // where they can be placed as intended. This requires a means of mapping from
149 // the function to the variables that it allocates. For the module scope lds,
150 // this is via metadata indicating whether the variable is not required. If a
151 // pass deletes that metadata, a fatal error on disagreement with the absolute
152 // symbol metadata will occur. For kernel scope and dynamic, this is by _name_
153 // correspondence between the function and the variable. It requires the
154 // kernel to have a name (which is only a limitation for tests in practice) and
155 // for nothing to rename the corresponding symbols. This is a hazard if the pass
156 // is run multiple times during debugging. Alternative schemes considered all
157 // involve bespoke metadata.
158 //
159 // If the name correspondence can be replaced, multiple distinct kernels that
160 // have the same memory layout can map to the same kernel id (as the address
161 // itself is handled by the absolute symbol metadata) and that will allow more
162 // uses of the "kernel" style faster lowering and reduce the size of the lookup
163 // tables.
164 //
165 // There is a test that checks this does not fire for a graphics shader. This
166 // lowering is expected to work for graphics if the isKernel test is changed.
167 //
168 // The current markUsedByKernel is sufficient for PromoteAlloca but is elided
169 // before codegen. Replacing this with an equivalent intrinsic which lasts until
170 // shortly after the machine function lowering of LDS would help break the name
171 // mapping. The other part needed is probably to amend PromoteAlloca to embed
172 // the LDS variables it creates in the same struct created here. That avoids the
173 // current hazard where a PromoteAlloca LDS variable might be allocated before
174 // the kernel scope (and thus error on the address check). Given a new invariant
175 // that no LDS variables exist outside of the structs managed here, and an
176 // intrinsic that lasts until after the LDS frame lowering, it should be
177 // possible to drop the name mapping and fold equivalent memory layouts.
178 //
179 //===----------------------------------------------------------------------===//
180 
181 #include "AMDGPU.h"
182 #include "Utils/AMDGPUBaseInfo.h"
183 #include "Utils/AMDGPUMemoryUtils.h"
184 #include "llvm/ADT/BitVector.h"
185 #include "llvm/ADT/DenseMap.h"
186 #include "llvm/ADT/DenseSet.h"
187 #include "llvm/ADT/STLExtras.h"
188 #include "llvm/ADT/SetOperations.h"
189 #include "llvm/ADT/SetVector.h"
190 #include "llvm/Analysis/CallGraph.h"
191 #include "llvm/IR/Constants.h"
192 #include "llvm/IR/DerivedTypes.h"
193 #include "llvm/IR/IRBuilder.h"
194 #include "llvm/IR/InlineAsm.h"
195 #include "llvm/IR/Instructions.h"
196 #include "llvm/IR/IntrinsicsAMDGPU.h"
197 #include "llvm/IR/MDBuilder.h"
198 #include "llvm/IR/ReplaceConstant.h"
199 #include "llvm/InitializePasses.h"
200 #include "llvm/Pass.h"
201 #include "llvm/Support/CommandLine.h"
202 #include "llvm/Support/Debug.h"
203 #include "llvm/Support/OptimizedStructLayout.h"
204 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
205 #include "llvm/Transforms/Utils/ModuleUtils.h"
206 
207 #include <tuple>
208 #include <vector>
209 
210 #include <cstdio>
211 
212 #define DEBUG_TYPE "amdgpu-lower-module-lds"
213 
214 using namespace llvm;
215 
216 namespace {
217 
218 cl::opt<bool> SuperAlignLDSGlobals(
219     "amdgpu-super-align-lds-globals",
220     cl::desc("Increase alignment of LDS if it is not on align boundary"),
221     cl::init(true), cl::Hidden);
222 
223 enum class LoweringKind { module, table, kernel, hybrid };
224 cl::opt<LoweringKind> LoweringKindLoc(
225     "amdgpu-lower-module-lds-strategy",
226     cl::desc("Specify lowering strategy for function LDS access:"), cl::Hidden,
227     cl::init(LoweringKind::hybrid),
228     cl::values(
229         clEnumValN(LoweringKind::table, "table", "Lower via table lookup"),
230         clEnumValN(LoweringKind::module, "module", "Lower via module struct"),
231         clEnumValN(
232             LoweringKind::kernel, "kernel",
233             "Lower variables reachable from one kernel, otherwise abort"),
234         clEnumValN(LoweringKind::hybrid, "hybrid",
235                    "Lower via mixture of above strategies")));
236 
237 bool isKernelLDS(const Function *F) {
238   // Some weirdness here. AMDGPU::isKernelCC does not call into
239   // AMDGPU::isKernel with the calling conv, it instead calls into
240   // isModuleEntryFunction which returns true for more calling conventions
241   // than AMDGPU::isKernel does. There's a FIXME on AMDGPU::isKernel.
242   // There's also a test that checks that the LDS lowering does not hit on
243   // a graphics shader, denoted amdgpu_ps, so stay with the limited case.
244   // Putting LDS in the name of the function to draw attention to this.
245   return AMDGPU::isKernel(F->getCallingConv());
246 }
247 
248 class AMDGPULowerModuleLDS : public ModulePass {
249 
250   static void
251   removeLocalVarsFromUsedLists(Module &M,
252                                const DenseSet<GlobalVariable *> &LocalVars) {
253     // The verifier rejects used lists containing an inttoptr of a constant
254     // so remove the variables from these lists before replaceAllUsesWith
255     SmallPtrSet<Constant *, 8> LocalVarsSet;
256     for (GlobalVariable *LocalVar : LocalVars)
257       LocalVarsSet.insert(cast<Constant>(LocalVar->stripPointerCasts()));
258 
259     removeFromUsedLists(
260         M, [&LocalVarsSet](Constant *C) { return LocalVarsSet.count(C); });
261 
262     for (GlobalVariable *LocalVar : LocalVars)
263       LocalVar->removeDeadConstantUsers();
264   }
265 
266   static void markUsedByKernel(IRBuilder<> &Builder, Function *Func,
267                                GlobalVariable *SGV) {
268     // The llvm.amdgcn.module.lds instance is implicitly used by all kernels
269     // that might call a function which accesses a field within it. This is
270     // presently approximated to 'all kernels' if there are any such functions
271     // in the module. This implicit use is redefined as an explicit use here so
272     // that later passes, specifically PromoteAlloca, account for the required
273     // memory without any knowledge of this transform.
274 
275     // An operand bundle on llvm.donothing works because the call instruction
276     // survives until after the last pass that needs to account for LDS. It is
277     // better than inline asm as the latter survives until the end of codegen. A
278     // totally robust solution would be a function with the same semantics as
279     // llvm.donothing that takes a pointer to the instance and is lowered to a
280     // no-op after LDS is allocated, but that is not presently necessary.
281 
282     // This intrinsic is eliminated shortly before instruction selection. It
283     // does not suffice to indicate to ISel that a given global which is not
284     // immediately used by the kernel must still be allocated by it. An
285     // equivalent target specific intrinsic which lasts until immediately after
286     // codegen would suffice for that, but one would still need to ensure that
287     // the variables are allocated in the anticpated order.
288 
289     LLVMContext &Ctx = Func->getContext();
290 
291     Builder.SetInsertPoint(Func->getEntryBlock().getFirstNonPHI());
292 
293     FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx), {});
294 
295     Function *Decl =
296         Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
297 
298     Value *UseInstance[1] = {Builder.CreateInBoundsGEP(
299         SGV->getValueType(), SGV, ConstantInt::get(Type::getInt32Ty(Ctx), 0))};
300 
301     Builder.CreateCall(FTy, Decl, {},
302                        {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)},
303                        "");
304   }
305 
306   static bool eliminateConstantExprUsesOfLDSFromAllInstructions(Module &M) {
307     // Constants are uniqued within LLVM. A ConstantExpr referring to a LDS
308     // global may have uses from multiple different functions as a result.
309     // This pass specialises LDS variables with respect to the kernel that
310     // allocates them.
311 
312     // This is semantically equivalent to (the unimplemented as slow):
313     // for (auto &F : M.functions())
314     //   for (auto &BB : F)
315     //     for (auto &I : BB)
316     //       for (Use &Op : I.operands())
317     //         if (constantExprUsesLDS(Op))
318     //           replaceConstantExprInFunction(I, Op);
319 
320     SmallVector<Constant *> LDSGlobals;
321     for (auto &GV : M.globals())
322       if (AMDGPU::isLDSVariableToLower(GV))
323         LDSGlobals.push_back(&GV);
324 
325     return convertUsersOfConstantsToInstructions(LDSGlobals);
326   }
327 
328 public:
329   static char ID;
330 
331   AMDGPULowerModuleLDS() : ModulePass(ID) {
332     initializeAMDGPULowerModuleLDSPass(*PassRegistry::getPassRegistry());
333   }
334 
335   using FunctionVariableMap = DenseMap<Function *, DenseSet<GlobalVariable *>>;
336 
337   using VariableFunctionMap = DenseMap<GlobalVariable *, DenseSet<Function *>>;
338 
339   static void getUsesOfLDSByFunction(CallGraph const &CG, Module &M,
340                                      FunctionVariableMap &kernels,
341                                      FunctionVariableMap &functions) {
342 
343     // Get uses from the current function, excluding uses by called functions
344     // Two output variables to avoid walking the globals list twice
345     for (auto &GV : M.globals()) {
346       if (!AMDGPU::isLDSVariableToLower(GV)) {
347         continue;
348       }
349 
350       for (User *V : GV.users()) {
351         if (auto *I = dyn_cast<Instruction>(V)) {
352           Function *F = I->getFunction();
353           if (isKernelLDS(F)) {
354             kernels[F].insert(&GV);
355           } else {
356             functions[F].insert(&GV);
357           }
358         }
359       }
360     }
361   }
362 
363   struct LDSUsesInfoTy {
364     FunctionVariableMap direct_access;
365     FunctionVariableMap indirect_access;
366   };
367 
368   static LDSUsesInfoTy getTransitiveUsesOfLDS(CallGraph const &CG, Module &M) {
369 
370     FunctionVariableMap direct_map_kernel;
371     FunctionVariableMap direct_map_function;
372     getUsesOfLDSByFunction(CG, M, direct_map_kernel, direct_map_function);
373 
374     // Collect variables that are used by functions whose address has escaped
375     DenseSet<GlobalVariable *> VariablesReachableThroughFunctionPointer;
376     for (Function &F : M.functions()) {
377       if (!isKernelLDS(&F))
378         if (F.hasAddressTaken(nullptr,
379                               /* IgnoreCallbackUses */ false,
380                               /* IgnoreAssumeLikeCalls */ false,
381                               /* IgnoreLLVMUsed */ true,
382                               /* IgnoreArcAttachedCall */ false)) {
383           set_union(VariablesReachableThroughFunctionPointer,
384                     direct_map_function[&F]);
385         }
386     }
387 
388     auto functionMakesUnknownCall = [&](const Function *F) -> bool {
389       assert(!F->isDeclaration());
390       for (const CallGraphNode::CallRecord &R : *CG[F]) {
391         if (!R.second->getFunction()) {
392           return true;
393         }
394       }
395       return false;
396     };
397 
398     // Work out which variables are reachable through function calls
399     FunctionVariableMap transitive_map_function = direct_map_function;
400 
401     // If the function makes any unknown call, assume the worst case that it can
402     // access all variables accessed by functions whose address escaped
403     for (Function &F : M.functions()) {
404       if (!F.isDeclaration() && functionMakesUnknownCall(&F)) {
405         if (!isKernelLDS(&F)) {
406           set_union(transitive_map_function[&F],
407                     VariablesReachableThroughFunctionPointer);
408         }
409       }
410     }
411 
412     // Direct implementation of collecting all variables reachable from each
413     // function
414     for (Function &Func : M.functions()) {
415       if (Func.isDeclaration() || isKernelLDS(&Func))
416         continue;
417 
418       DenseSet<Function *> seen; // catches cycles
419       SmallVector<Function *, 4> wip{&Func};
420 
421       while (!wip.empty()) {
422         Function *F = wip.pop_back_val();
423 
424         // Can accelerate this by referring to transitive map for functions that
425         // have already been computed, with more care than this
426         set_union(transitive_map_function[&Func], direct_map_function[F]);
427 
428         for (const CallGraphNode::CallRecord &R : *CG[F]) {
429           Function *ith = R.second->getFunction();
430           if (ith) {
431             if (!seen.contains(ith)) {
432               seen.insert(ith);
433               wip.push_back(ith);
434             }
435           }
436         }
437       }
438     }
439 
440     // direct_map_kernel lists which variables are used by the kernel
441     // find the variables which are used through a function call
442     FunctionVariableMap indirect_map_kernel;
443 
444     for (Function &Func : M.functions()) {
445       if (Func.isDeclaration() || !isKernelLDS(&Func))
446         continue;
447 
448       for (const CallGraphNode::CallRecord &R : *CG[&Func]) {
449         Function *ith = R.second->getFunction();
450         if (ith) {
451           set_union(indirect_map_kernel[&Func], transitive_map_function[ith]);
452         } else {
453           set_union(indirect_map_kernel[&Func],
454                     VariablesReachableThroughFunctionPointer);
455         }
456       }
457     }
458 
459     return {std::move(direct_map_kernel), std::move(indirect_map_kernel)};
460   }
461 
462   struct LDSVariableReplacement {
463     GlobalVariable *SGV = nullptr;
464     DenseMap<GlobalVariable *, Constant *> LDSVarsToConstantGEP;
465   };
466 
467   // remap from lds global to a constantexpr gep to where it has been moved to
468   // for each kernel
469   // an array with an element for each kernel containing where the corresponding
470   // variable was remapped to
471 
472   static Constant *getAddressesOfVariablesInKernel(
473       LLVMContext &Ctx, ArrayRef<GlobalVariable *> Variables,
474       const DenseMap<GlobalVariable *, Constant *> &LDSVarsToConstantGEP) {
475     // Create a ConstantArray containing the address of each Variable within the
476     // kernel corresponding to LDSVarsToConstantGEP, or poison if that kernel
477     // does not allocate it
478     // TODO: Drop the ptrtoint conversion
479 
480     Type *I32 = Type::getInt32Ty(Ctx);
481 
482     ArrayType *KernelOffsetsType = ArrayType::get(I32, Variables.size());
483 
484     SmallVector<Constant *> Elements;
485     for (size_t i = 0; i < Variables.size(); i++) {
486       GlobalVariable *GV = Variables[i];
487       auto ConstantGepIt = LDSVarsToConstantGEP.find(GV);
488       if (ConstantGepIt != LDSVarsToConstantGEP.end()) {
489         auto elt = ConstantExpr::getPtrToInt(ConstantGepIt->second, I32);
490         Elements.push_back(elt);
491       } else {
492         Elements.push_back(PoisonValue::get(I32));
493       }
494     }
495     return ConstantArray::get(KernelOffsetsType, Elements);
496   }
497 
498   static GlobalVariable *buildLookupTable(
499       Module &M, ArrayRef<GlobalVariable *> Variables,
500       ArrayRef<Function *> kernels,
501       DenseMap<Function *, LDSVariableReplacement> &KernelToReplacement) {
502     if (Variables.empty()) {
503       return nullptr;
504     }
505     LLVMContext &Ctx = M.getContext();
506 
507     const size_t NumberVariables = Variables.size();
508     const size_t NumberKernels = kernels.size();
509 
510     ArrayType *KernelOffsetsType =
511         ArrayType::get(Type::getInt32Ty(Ctx), NumberVariables);
512 
513     ArrayType *AllKernelsOffsetsType =
514         ArrayType::get(KernelOffsetsType, NumberKernels);
515 
516     std::vector<Constant *> overallConstantExprElts(NumberKernels);
517     for (size_t i = 0; i < NumberKernels; i++) {
518       LDSVariableReplacement Replacement = KernelToReplacement[kernels[i]];
519       overallConstantExprElts[i] = getAddressesOfVariablesInKernel(
520           Ctx, Variables, Replacement.LDSVarsToConstantGEP);
521     }
522 
523     Constant *init =
524         ConstantArray::get(AllKernelsOffsetsType, overallConstantExprElts);
525 
526     return new GlobalVariable(
527         M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, init,
528         "llvm.amdgcn.lds.offset.table", nullptr, GlobalValue::NotThreadLocal,
529         AMDGPUAS::CONSTANT_ADDRESS);
530   }
531 
532   void replaceUseWithTableLookup(Module &M, IRBuilder<> &Builder,
533                                  GlobalVariable *LookupTable,
534                                  GlobalVariable *GV, Use &U,
535                                  Value *OptionalIndex) {
536     // Table is a constant array of the same length as OrderedKernels
537     LLVMContext &Ctx = M.getContext();
538     Type *I32 = Type::getInt32Ty(Ctx);
539     auto *I = cast<Instruction>(U.getUser());
540 
541     Value *tableKernelIndex = getTableLookupKernelIndex(M, I->getFunction());
542 
543     if (auto *Phi = dyn_cast<PHINode>(I)) {
544       BasicBlock *BB = Phi->getIncomingBlock(U);
545       Builder.SetInsertPoint(&(*(BB->getFirstInsertionPt())));
546     } else {
547       Builder.SetInsertPoint(I);
548     }
549 
550     SmallVector<Value *, 3> GEPIdx = {
551         ConstantInt::get(I32, 0),
552         tableKernelIndex,
553     };
554     if (OptionalIndex)
555       GEPIdx.push_back(OptionalIndex);
556 
557     Value *Address = Builder.CreateInBoundsGEP(
558         LookupTable->getValueType(), LookupTable, GEPIdx, GV->getName());
559 
560     Value *loaded = Builder.CreateLoad(I32, Address);
561 
562     Value *replacement =
563         Builder.CreateIntToPtr(loaded, GV->getType(), GV->getName());
564 
565     U.set(replacement);
566   }
567 
568   void replaceUsesInInstructionsWithTableLookup(
569       Module &M, ArrayRef<GlobalVariable *> ModuleScopeVariables,
570       GlobalVariable *LookupTable) {
571 
572     LLVMContext &Ctx = M.getContext();
573     IRBuilder<> Builder(Ctx);
574     Type *I32 = Type::getInt32Ty(Ctx);
575 
576     for (size_t Index = 0; Index < ModuleScopeVariables.size(); Index++) {
577       auto *GV = ModuleScopeVariables[Index];
578 
579       for (Use &U : make_early_inc_range(GV->uses())) {
580         auto *I = dyn_cast<Instruction>(U.getUser());
581         if (!I)
582           continue;
583 
584         replaceUseWithTableLookup(M, Builder, LookupTable, GV, U,
585                                   ConstantInt::get(I32, Index));
586       }
587     }
588   }
589 
590   static DenseSet<Function *> kernelsThatIndirectlyAccessAnyOfPassedVariables(
591       Module &M, LDSUsesInfoTy &LDSUsesInfo,
592       DenseSet<GlobalVariable *> const &VariableSet) {
593 
594     DenseSet<Function *> KernelSet;
595 
596     if (VariableSet.empty())
597       return KernelSet;
598 
599     for (Function &Func : M.functions()) {
600       if (Func.isDeclaration() || !isKernelLDS(&Func))
601         continue;
602       for (GlobalVariable *GV : LDSUsesInfo.indirect_access[&Func]) {
603         if (VariableSet.contains(GV)) {
604           KernelSet.insert(&Func);
605           break;
606         }
607       }
608     }
609 
610     return KernelSet;
611   }
612 
613   static GlobalVariable *
614   chooseBestVariableForModuleStrategy(const DataLayout &DL,
615                                       VariableFunctionMap &LDSVars) {
616     // Find the global variable with the most indirect uses from kernels
617 
618     struct CandidateTy {
619       GlobalVariable *GV = nullptr;
620       size_t UserCount = 0;
621       size_t Size = 0;
622 
623       CandidateTy() = default;
624 
625       CandidateTy(GlobalVariable *GV, uint64_t UserCount, uint64_t AllocSize)
626           : GV(GV), UserCount(UserCount), Size(AllocSize) {}
627 
628       bool operator<(const CandidateTy &Other) const {
629         // Fewer users makes module scope variable less attractive
630         if (UserCount < Other.UserCount) {
631           return true;
632         }
633         if (UserCount > Other.UserCount) {
634           return false;
635         }
636 
637         // Bigger makes module scope variable less attractive
638         if (Size < Other.Size) {
639           return false;
640         }
641 
642         if (Size > Other.Size) {
643           return true;
644         }
645 
646         // Arbitrary but consistent
647         return GV->getName() < Other.GV->getName();
648       }
649     };
650 
651     CandidateTy MostUsed;
652 
653     for (auto &K : LDSVars) {
654       GlobalVariable *GV = K.first;
655       if (K.second.size() <= 1) {
656         // A variable reachable by only one kernel is best lowered with kernel
657         // strategy
658         continue;
659       }
660       CandidateTy Candidate(
661           GV, K.second.size(),
662           DL.getTypeAllocSize(GV->getValueType()).getFixedValue());
663       if (MostUsed < Candidate)
664         MostUsed = Candidate;
665     }
666 
667     return MostUsed.GV;
668   }
669 
670   static void recordLDSAbsoluteAddress(Module *M, GlobalVariable *GV,
671                                        uint32_t Address) {
672     // Write the specified address into metadata where it can be retrieved by
673     // the assembler. Format is a half open range, [Address Address+1)
674     LLVMContext &Ctx = M->getContext();
675     auto *IntTy =
676         M->getDataLayout().getIntPtrType(Ctx, AMDGPUAS::LOCAL_ADDRESS);
677     auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address));
678     auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address + 1));
679     GV->setMetadata(LLVMContext::MD_absolute_symbol,
680                     MDNode::get(Ctx, {MinC, MaxC}));
681   }
682 
683   DenseMap<Function *, Value *> tableKernelIndexCache;
684   Value *getTableLookupKernelIndex(Module &M, Function *F) {
685     // Accesses from a function use the amdgcn_lds_kernel_id intrinsic which
686     // lowers to a read from a live in register. Emit it once in the entry
687     // block to spare deduplicating it later.
688     if (tableKernelIndexCache.count(F) == 0) {
689       LLVMContext &Ctx = M.getContext();
690       IRBuilder<> Builder(Ctx);
691       FunctionType *FTy = FunctionType::get(Type::getInt32Ty(Ctx), {});
692       Function *Decl =
693           Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_lds_kernel_id, {});
694 
695       BasicBlock::iterator it =
696           F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
697       Instruction &i = *it;
698       Builder.SetInsertPoint(&i);
699 
700       tableKernelIndexCache[F] = Builder.CreateCall(FTy, Decl, {});
701     }
702 
703     return tableKernelIndexCache[F];
704   }
705 
706   static std::vector<Function *> assignLDSKernelIDToEachKernel(
707       Module *M, DenseSet<Function *> const &KernelsThatAllocateTableLDS,
708       DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS) {
709     // Associate kernels in the set with an arbirary but reproducible order and
710     // annotate them with that order in metadata. This metadata is recognised by
711     // the backend and lowered to a SGPR which can be read from using
712     // amdgcn_lds_kernel_id.
713 
714     std::vector<Function *> OrderedKernels;
715     if (!KernelsThatAllocateTableLDS.empty() ||
716         !KernelsThatIndirectlyAllocateDynamicLDS.empty()) {
717 
718       for (Function &Func : M->functions()) {
719         if (Func.isDeclaration())
720           continue;
721         if (!isKernelLDS(&Func))
722           continue;
723 
724         if (KernelsThatAllocateTableLDS.contains(&Func) ||
725             KernelsThatIndirectlyAllocateDynamicLDS.contains(&Func)) {
726           assert(Func.hasName()); // else fatal error earlier
727           OrderedKernels.push_back(&Func);
728         }
729       }
730 
731       // Put them in an arbitrary but reproducible order
732       llvm::sort(OrderedKernels.begin(), OrderedKernels.end(),
733                  [](const Function *lhs, const Function *rhs) -> bool {
734                    return lhs->getName() < rhs->getName();
735                  });
736 
737       // Annotate the kernels with their order in this vector
738       LLVMContext &Ctx = M->getContext();
739       IRBuilder<> Builder(Ctx);
740 
741       if (OrderedKernels.size() > UINT32_MAX) {
742         // 32 bit keeps it in one SGPR. > 2**32 kernels won't fit on the GPU
743         report_fatal_error("Unimplemented LDS lowering for > 2**32 kernels");
744       }
745 
746       for (size_t i = 0; i < OrderedKernels.size(); i++) {
747         Metadata *AttrMDArgs[1] = {
748             ConstantAsMetadata::get(Builder.getInt32(i)),
749         };
750         OrderedKernels[i]->setMetadata("llvm.amdgcn.lds.kernel.id",
751                                        MDNode::get(Ctx, AttrMDArgs));
752       }
753     }
754     return OrderedKernels;
755   }
756 
757   static void partitionVariablesIntoIndirectStrategies(
758       Module &M, LDSUsesInfoTy const &LDSUsesInfo,
759       VariableFunctionMap &LDSToKernelsThatNeedToAccessItIndirectly,
760       DenseSet<GlobalVariable *> &ModuleScopeVariables,
761       DenseSet<GlobalVariable *> &TableLookupVariables,
762       DenseSet<GlobalVariable *> &KernelAccessVariables,
763       DenseSet<GlobalVariable *> &DynamicVariables) {
764 
765     GlobalVariable *HybridModuleRoot =
766         LoweringKindLoc != LoweringKind::hybrid
767             ? nullptr
768             : chooseBestVariableForModuleStrategy(
769                   M.getDataLayout(), LDSToKernelsThatNeedToAccessItIndirectly);
770 
771     DenseSet<Function *> const EmptySet;
772     DenseSet<Function *> const &HybridModuleRootKernels =
773         HybridModuleRoot
774             ? LDSToKernelsThatNeedToAccessItIndirectly[HybridModuleRoot]
775             : EmptySet;
776 
777     for (auto &K : LDSToKernelsThatNeedToAccessItIndirectly) {
778       // Each iteration of this loop assigns exactly one global variable to
779       // exactly one of the implementation strategies.
780 
781       GlobalVariable *GV = K.first;
782       assert(AMDGPU::isLDSVariableToLower(*GV));
783       assert(K.second.size() != 0);
784 
785       if (AMDGPU::isDynamicLDS(*GV)) {
786         DynamicVariables.insert(GV);
787         continue;
788       }
789 
790       switch (LoweringKindLoc) {
791       case LoweringKind::module:
792         ModuleScopeVariables.insert(GV);
793         break;
794 
795       case LoweringKind::table:
796         TableLookupVariables.insert(GV);
797         break;
798 
799       case LoweringKind::kernel:
800         if (K.second.size() == 1) {
801           KernelAccessVariables.insert(GV);
802         } else {
803           report_fatal_error(
804               "cannot lower LDS '" + GV->getName() +
805               "' to kernel access as it is reachable from multiple kernels");
806         }
807         break;
808 
809       case LoweringKind::hybrid: {
810         if (GV == HybridModuleRoot) {
811           assert(K.second.size() != 1);
812           ModuleScopeVariables.insert(GV);
813         } else if (K.second.size() == 1) {
814           KernelAccessVariables.insert(GV);
815         } else if (set_is_subset(K.second, HybridModuleRootKernels)) {
816           ModuleScopeVariables.insert(GV);
817         } else {
818           TableLookupVariables.insert(GV);
819         }
820         break;
821       }
822       }
823     }
824 
825     // All LDS variables accessed indirectly have now been partitioned into
826     // the distinct lowering strategies.
827     assert(ModuleScopeVariables.size() + TableLookupVariables.size() +
828                KernelAccessVariables.size() + DynamicVariables.size() ==
829            LDSToKernelsThatNeedToAccessItIndirectly.size());
830   }
831 
832   static GlobalVariable *lowerModuleScopeStructVariables(
833       Module &M, DenseSet<GlobalVariable *> const &ModuleScopeVariables,
834       DenseSet<Function *> const &KernelsThatAllocateModuleLDS) {
835     // Create a struct to hold the ModuleScopeVariables
836     // Replace all uses of those variables from non-kernel functions with the
837     // new struct instance Replace only the uses from kernel functions that will
838     // allocate this instance. That is a space optimisation - kernels that use a
839     // subset of the module scope struct and do not need to allocate it for
840     // indirect calls will only allocate the subset they use (they do so as part
841     // of the per-kernel lowering).
842     if (ModuleScopeVariables.empty()) {
843       return nullptr;
844     }
845 
846     LLVMContext &Ctx = M.getContext();
847 
848     LDSVariableReplacement ModuleScopeReplacement =
849         createLDSVariableReplacement(M, "llvm.amdgcn.module.lds",
850                                      ModuleScopeVariables);
851 
852     appendToCompilerUsed(M, {static_cast<GlobalValue *>(
853                                 ConstantExpr::getPointerBitCastOrAddrSpaceCast(
854                                     cast<Constant>(ModuleScopeReplacement.SGV),
855                                     Type::getInt8PtrTy(Ctx)))});
856 
857     // module.lds will be allocated at zero in any kernel that allocates it
858     recordLDSAbsoluteAddress(&M, ModuleScopeReplacement.SGV, 0);
859 
860     // historic
861     removeLocalVarsFromUsedLists(M, ModuleScopeVariables);
862 
863     // Replace all uses of module scope variable from non-kernel functions
864     replaceLDSVariablesWithStruct(
865         M, ModuleScopeVariables, ModuleScopeReplacement, [&](Use &U) {
866           Instruction *I = dyn_cast<Instruction>(U.getUser());
867           if (!I) {
868             return false;
869           }
870           Function *F = I->getFunction();
871           return !isKernelLDS(F);
872         });
873 
874     // Replace uses of module scope variable from kernel functions that
875     // allocate the module scope variable, otherwise leave them unchanged
876     // Record on each kernel whether the module scope global is used by it
877 
878     IRBuilder<> Builder(Ctx);
879 
880     for (Function &Func : M.functions()) {
881       if (Func.isDeclaration() || !isKernelLDS(&Func))
882         continue;
883 
884       if (KernelsThatAllocateModuleLDS.contains(&Func)) {
885         replaceLDSVariablesWithStruct(
886             M, ModuleScopeVariables, ModuleScopeReplacement, [&](Use &U) {
887               Instruction *I = dyn_cast<Instruction>(U.getUser());
888               if (!I) {
889                 return false;
890               }
891               Function *F = I->getFunction();
892               return F == &Func;
893             });
894 
895         markUsedByKernel(Builder, &Func, ModuleScopeReplacement.SGV);
896 
897       } else {
898         markElideModuleLDS(Func);
899       }
900     }
901 
902     return ModuleScopeReplacement.SGV;
903   }
904 
905   static DenseMap<Function *, LDSVariableReplacement>
906   lowerKernelScopeStructVariables(
907       Module &M, LDSUsesInfoTy &LDSUsesInfo,
908       DenseSet<GlobalVariable *> const &ModuleScopeVariables,
909       DenseSet<Function *> const &KernelsThatAllocateModuleLDS,
910       GlobalVariable *MaybeModuleScopeStruct) {
911 
912     // Create a struct for each kernel for the non-module-scope variables.
913 
914     DenseMap<Function *, LDSVariableReplacement> KernelToReplacement;
915     for (Function &Func : M.functions()) {
916       if (Func.isDeclaration() || !isKernelLDS(&Func))
917         continue;
918 
919       DenseSet<GlobalVariable *> KernelUsedVariables;
920       // Allocating variables that are used directly in this struct to get
921       // alignment aware allocation and predictable frame size.
922       for (auto &v : LDSUsesInfo.direct_access[&Func]) {
923         if (!AMDGPU::isDynamicLDS(*v)) {
924           KernelUsedVariables.insert(v);
925         }
926       }
927 
928       // Allocating variables that are accessed indirectly so that a lookup of
929       // this struct instance can find them from nested functions.
930       for (auto &v : LDSUsesInfo.indirect_access[&Func]) {
931         if (!AMDGPU::isDynamicLDS(*v)) {
932           KernelUsedVariables.insert(v);
933         }
934       }
935 
936       // Variables allocated in module lds must all resolve to that struct,
937       // not to the per-kernel instance.
938       if (KernelsThatAllocateModuleLDS.contains(&Func)) {
939         for (GlobalVariable *v : ModuleScopeVariables) {
940           KernelUsedVariables.erase(v);
941         }
942       }
943 
944       if (KernelUsedVariables.empty()) {
945         // Either used no LDS, or the LDS it used was all in the module struct
946         // or dynamically sized
947         continue;
948       }
949 
950       // The association between kernel function and LDS struct is done by
951       // symbol name, which only works if the function in question has a
952       // name This is not expected to be a problem in practice as kernels
953       // are called by name making anonymous ones (which are named by the
954       // backend) difficult to use. This does mean that llvm test cases need
955       // to name the kernels.
956       if (!Func.hasName()) {
957         report_fatal_error("Anonymous kernels cannot use LDS variables");
958       }
959 
960       std::string VarName =
961           (Twine("llvm.amdgcn.kernel.") + Func.getName() + ".lds").str();
962 
963       auto Replacement =
964           createLDSVariableReplacement(M, VarName, KernelUsedVariables);
965 
966       // remove preserves existing codegen
967       removeLocalVarsFromUsedLists(M, KernelUsedVariables);
968       KernelToReplacement[&Func] = Replacement;
969 
970       // Rewrite uses within kernel to the new struct
971       replaceLDSVariablesWithStruct(
972           M, KernelUsedVariables, Replacement, [&Func](Use &U) {
973             Instruction *I = dyn_cast<Instruction>(U.getUser());
974             return I && I->getFunction() == &Func;
975           });
976     }
977     return KernelToReplacement;
978   }
979 
980   static GlobalVariable *
981   buildRepresentativeDynamicLDSInstance(Module &M, LDSUsesInfoTy &LDSUsesInfo,
982                                         Function *func) {
983     // Create a dynamic lds variable with a name associated with the passed
984     // function that has the maximum alignment of any dynamic lds variable
985     // reachable from this kernel. Dynamic LDS is allocated after the static LDS
986     // allocation, possibly after alignment padding. The representative variable
987     // created here has the maximum alignment of any other dynamic variable
988     // reachable by that kernel. All dynamic LDS variables are allocated at the
989     // same address in each kernel in order to provide the documented aliasing
990     // semantics. Setting the alignment here allows this IR pass to accurately
991     // predict the exact constant at which it will be allocated.
992 
993     assert(isKernelLDS(func));
994 
995     LLVMContext &Ctx = M.getContext();
996     const DataLayout &DL = M.getDataLayout();
997     Align MaxDynamicAlignment(1);
998 
999     auto UpdateMaxAlignment = [&MaxDynamicAlignment, &DL](GlobalVariable *GV) {
1000       if (AMDGPU::isDynamicLDS(*GV)) {
1001         MaxDynamicAlignment =
1002             std::max(MaxDynamicAlignment, AMDGPU::getAlign(DL, GV));
1003       }
1004     };
1005 
1006     for (GlobalVariable *GV : LDSUsesInfo.indirect_access[func]) {
1007       UpdateMaxAlignment(GV);
1008     }
1009 
1010     for (GlobalVariable *GV : LDSUsesInfo.direct_access[func]) {
1011       UpdateMaxAlignment(GV);
1012     }
1013 
1014     assert(func->hasName()); // Checked by caller
1015     auto emptyCharArray = ArrayType::get(Type::getInt8Ty(Ctx), 0);
1016     GlobalVariable *N = new GlobalVariable(
1017         M, emptyCharArray, false, GlobalValue::ExternalLinkage, nullptr,
1018         Twine("llvm.amdgcn." + func->getName() + ".dynlds"), nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
1019         false);
1020     N->setAlignment(MaxDynamicAlignment);
1021 
1022     assert(AMDGPU::isDynamicLDS(*N));
1023     return N;
1024   }
1025 
1026   DenseMap<Function *, GlobalVariable *> lowerDynamicLDSVariables(
1027       Module &M, LDSUsesInfoTy &LDSUsesInfo,
1028       DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS,
1029       DenseSet<GlobalVariable *> const &DynamicVariables,
1030       std::vector<Function *> const &OrderedKernels) {
1031     DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS;
1032     if (!KernelsThatIndirectlyAllocateDynamicLDS.empty()) {
1033       LLVMContext &Ctx = M.getContext();
1034       IRBuilder<> Builder(Ctx);
1035       Type *I32 = Type::getInt32Ty(Ctx);
1036 
1037       std::vector<Constant *> newDynamicLDS;
1038 
1039       // Table is built in the same order as OrderedKernels
1040       for (auto &func : OrderedKernels) {
1041 
1042         if (KernelsThatIndirectlyAllocateDynamicLDS.contains(func)) {
1043           assert(isKernelLDS(func));
1044           if (!func->hasName()) {
1045             report_fatal_error("Anonymous kernels cannot use LDS variables");
1046           }
1047 
1048           GlobalVariable *N =
1049               buildRepresentativeDynamicLDSInstance(M, LDSUsesInfo, func);
1050 
1051           KernelToCreatedDynamicLDS[func] = N;
1052 
1053           markUsedByKernel(Builder, func, N);
1054 
1055           auto emptyCharArray = ArrayType::get(Type::getInt8Ty(Ctx), 0);
1056           auto GEP = ConstantExpr::getGetElementPtr(
1057               emptyCharArray, N, ConstantInt::get(I32, 0), true);
1058           newDynamicLDS.push_back(ConstantExpr::getPtrToInt(GEP, I32));
1059         } else {
1060           newDynamicLDS.push_back(PoisonValue::get(I32));
1061         }
1062       }
1063       assert(OrderedKernels.size() == newDynamicLDS.size());
1064 
1065       ArrayType *t = ArrayType::get(I32, newDynamicLDS.size());
1066       Constant *init = ConstantArray::get(t, newDynamicLDS);
1067       GlobalVariable *table = new GlobalVariable(
1068           M, t, true, GlobalValue::InternalLinkage, init,
1069           "llvm.amdgcn.dynlds.offset.table", nullptr,
1070           GlobalValue::NotThreadLocal, AMDGPUAS::CONSTANT_ADDRESS);
1071 
1072       for (GlobalVariable *GV : DynamicVariables) {
1073         for (Use &U : make_early_inc_range(GV->uses())) {
1074           auto *I = dyn_cast<Instruction>(U.getUser());
1075           if (!I)
1076             continue;
1077           if (isKernelLDS(I->getFunction()))
1078             continue;
1079 
1080           replaceUseWithTableLookup(M, Builder, table, GV, U, nullptr);
1081         }
1082       }
1083     }
1084     return KernelToCreatedDynamicLDS;
1085   }
1086 
1087   static bool canElideModuleLDS(const Function &F) {
1088     return F.hasFnAttribute("amdgpu-elide-module-lds");
1089   }
1090 
1091   static void markElideModuleLDS(Function &F) {
1092     F.addFnAttr("amdgpu-elide-module-lds");
1093   }
1094 
1095   bool runOnModule(Module &M) override {
1096     CallGraph CG = CallGraph(M);
1097     bool Changed = superAlignLDSGlobals(M);
1098 
1099     Changed |= eliminateConstantExprUsesOfLDSFromAllInstructions(M);
1100 
1101     Changed = true; // todo: narrow this down
1102 
1103     // For each kernel, what variables does it access directly or through
1104     // callees
1105     LDSUsesInfoTy LDSUsesInfo = getTransitiveUsesOfLDS(CG, M);
1106 
1107     // For each variable accessed through callees, which kernels access it
1108     VariableFunctionMap LDSToKernelsThatNeedToAccessItIndirectly;
1109     for (auto &K : LDSUsesInfo.indirect_access) {
1110       Function *F = K.first;
1111       assert(isKernelLDS(F));
1112       for (GlobalVariable *GV : K.second) {
1113         LDSToKernelsThatNeedToAccessItIndirectly[GV].insert(F);
1114       }
1115     }
1116 
1117     // Partition variables accessed indirectly into the different strategies
1118     DenseSet<GlobalVariable *> ModuleScopeVariables;
1119     DenseSet<GlobalVariable *> TableLookupVariables;
1120     DenseSet<GlobalVariable *> KernelAccessVariables;
1121     DenseSet<GlobalVariable *> DynamicVariables;
1122     partitionVariablesIntoIndirectStrategies(
1123         M, LDSUsesInfo, LDSToKernelsThatNeedToAccessItIndirectly,
1124         ModuleScopeVariables, TableLookupVariables, KernelAccessVariables,
1125         DynamicVariables);
1126 
1127     // If the kernel accesses a variable that is going to be stored in the
1128     // module instance through a call then that kernel needs to allocate the
1129     // module instance
1130     const DenseSet<Function *> KernelsThatAllocateModuleLDS =
1131         kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
1132                                                         ModuleScopeVariables);
1133     const DenseSet<Function *> KernelsThatAllocateTableLDS =
1134         kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
1135                                                         TableLookupVariables);
1136 
1137     const DenseSet<Function *> KernelsThatIndirectlyAllocateDynamicLDS =
1138         kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
1139                                                         DynamicVariables);
1140 
1141     GlobalVariable *MaybeModuleScopeStruct = lowerModuleScopeStructVariables(
1142         M, ModuleScopeVariables, KernelsThatAllocateModuleLDS);
1143 
1144     DenseMap<Function *, LDSVariableReplacement> KernelToReplacement =
1145         lowerKernelScopeStructVariables(M, LDSUsesInfo, ModuleScopeVariables,
1146                                         KernelsThatAllocateModuleLDS,
1147                                         MaybeModuleScopeStruct);
1148 
1149     // Lower zero cost accesses to the kernel instances just created
1150     for (auto &GV : KernelAccessVariables) {
1151       auto &funcs = LDSToKernelsThatNeedToAccessItIndirectly[GV];
1152       assert(funcs.size() == 1); // Only one kernel can access it
1153       LDSVariableReplacement Replacement =
1154           KernelToReplacement[*(funcs.begin())];
1155 
1156       DenseSet<GlobalVariable *> Vec;
1157       Vec.insert(GV);
1158 
1159       // TODO: Looks like a latent bug, Replacement may not be marked
1160       // UsedByKernel here
1161       replaceLDSVariablesWithStruct(M, Vec, Replacement, [](Use &U) {
1162         return isa<Instruction>(U.getUser());
1163       });
1164     }
1165 
1166     // The ith element of this vector is kernel id i
1167     std::vector<Function *> OrderedKernels =
1168         assignLDSKernelIDToEachKernel(&M, KernelsThatAllocateTableLDS,
1169                                       KernelsThatIndirectlyAllocateDynamicLDS);
1170 
1171     if (!KernelsThatAllocateTableLDS.empty()) {
1172       LLVMContext &Ctx = M.getContext();
1173       IRBuilder<> Builder(Ctx);
1174 
1175       for (size_t i = 0; i < OrderedKernels.size(); i++) {
1176         markUsedByKernel(Builder, OrderedKernels[i],
1177                          KernelToReplacement[OrderedKernels[i]].SGV);
1178       }
1179 
1180       // The order must be consistent between lookup table and accesses to
1181       // lookup table
1182       std::vector<GlobalVariable *> TableLookupVariablesOrdered(
1183           TableLookupVariables.begin(), TableLookupVariables.end());
1184       llvm::sort(TableLookupVariablesOrdered.begin(),
1185                  TableLookupVariablesOrdered.end(),
1186                  [](const GlobalVariable *lhs, const GlobalVariable *rhs) {
1187                    return lhs->getName() < rhs->getName();
1188                  });
1189 
1190       GlobalVariable *LookupTable = buildLookupTable(
1191           M, TableLookupVariablesOrdered, OrderedKernels, KernelToReplacement);
1192       replaceUsesInInstructionsWithTableLookup(M, TableLookupVariablesOrdered,
1193                                                LookupTable);
1194     }
1195 
1196     DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS =
1197         lowerDynamicLDSVariables(M, LDSUsesInfo,
1198                                  KernelsThatIndirectlyAllocateDynamicLDS,
1199                                  DynamicVariables, OrderedKernels);
1200 
1201     // All kernel frames have been allocated. Calculate and record the
1202     // addresses.
1203 
1204     {
1205       const DataLayout &DL = M.getDataLayout();
1206 
1207       for (Function &Func : M.functions()) {
1208         if (Func.isDeclaration() || !isKernelLDS(&Func))
1209           continue;
1210 
1211         // All three of these are optional. The first variable is allocated at
1212         // zero. They are allocated by allocateKnownAddressLDSGlobal in the
1213         // following order:
1214         //{
1215         //  module.lds
1216         //  alignment padding
1217         //  kernel instance
1218         //  alignment padding
1219         //  dynamic lds variables
1220         //}
1221 
1222         const bool AllocateModuleScopeStruct =
1223             MaybeModuleScopeStruct && !canElideModuleLDS(Func);
1224 
1225         auto Replacement = KernelToReplacement.find(&Func);
1226         const bool AllocateKernelScopeStruct =
1227             Replacement != KernelToReplacement.end();
1228 
1229         const bool AllocateDynamicVariable =
1230             KernelToCreatedDynamicLDS.contains(&Func);
1231 
1232         uint32_t Offset = 0;
1233 
1234         if (AllocateModuleScopeStruct) {
1235           // Allocated at zero, recorded once on construction, not once per
1236           // kernel
1237           Offset += DL.getTypeAllocSize(MaybeModuleScopeStruct->getValueType());
1238         }
1239 
1240         if (AllocateKernelScopeStruct) {
1241           GlobalVariable *KernelStruct = Replacement->second.SGV;
1242 
1243           Offset = alignTo(Offset, AMDGPU::getAlign(DL, KernelStruct));
1244 
1245           recordLDSAbsoluteAddress(&M, KernelStruct, Offset);
1246 
1247           Offset += DL.getTypeAllocSize(KernelStruct->getValueType());
1248 
1249         }
1250 
1251         if (AllocateDynamicVariable) {
1252           GlobalVariable *DynamicVariable = KernelToCreatedDynamicLDS[&Func];
1253 
1254           Offset = alignTo(Offset, AMDGPU::getAlign(DL, DynamicVariable));
1255 
1256           recordLDSAbsoluteAddress(&M, DynamicVariable, Offset);
1257         }
1258       }
1259     }
1260 
1261     for (auto &GV : make_early_inc_range(M.globals()))
1262       if (AMDGPU::isLDSVariableToLower(GV)) {
1263         // probably want to remove from used lists
1264         GV.removeDeadConstantUsers();
1265         if (GV.use_empty())
1266           GV.eraseFromParent();
1267       }
1268 
1269     return Changed;
1270   }
1271 
1272 private:
1273   // Increase the alignment of LDS globals if necessary to maximise the chance
1274   // that we can use aligned LDS instructions to access them.
1275   static bool superAlignLDSGlobals(Module &M) {
1276     const DataLayout &DL = M.getDataLayout();
1277     bool Changed = false;
1278     if (!SuperAlignLDSGlobals) {
1279       return Changed;
1280     }
1281 
1282     for (auto &GV : M.globals()) {
1283       if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) {
1284         // Only changing alignment of LDS variables
1285         continue;
1286       }
1287       if (!GV.hasInitializer()) {
1288         // cuda/hip extern __shared__ variable, leave alignment alone
1289         continue;
1290       }
1291 
1292       Align Alignment = AMDGPU::getAlign(DL, &GV);
1293       TypeSize GVSize = DL.getTypeAllocSize(GV.getValueType());
1294 
1295       if (GVSize > 8) {
1296         // We might want to use a b96 or b128 load/store
1297         Alignment = std::max(Alignment, Align(16));
1298       } else if (GVSize > 4) {
1299         // We might want to use a b64 load/store
1300         Alignment = std::max(Alignment, Align(8));
1301       } else if (GVSize > 2) {
1302         // We might want to use a b32 load/store
1303         Alignment = std::max(Alignment, Align(4));
1304       } else if (GVSize > 1) {
1305         // We might want to use a b16 load/store
1306         Alignment = std::max(Alignment, Align(2));
1307       }
1308 
1309       if (Alignment != AMDGPU::getAlign(DL, &GV)) {
1310         Changed = true;
1311         GV.setAlignment(Alignment);
1312       }
1313     }
1314     return Changed;
1315   }
1316 
1317   static LDSVariableReplacement createLDSVariableReplacement(
1318       Module &M, std::string VarName,
1319       DenseSet<GlobalVariable *> const &LDSVarsToTransform) {
1320     // Create a struct instance containing LDSVarsToTransform and map from those
1321     // variables to ConstantExprGEP
1322     // Variables may be introduced to meet alignment requirements. No aliasing
1323     // metadata is useful for these as they have no uses. Erased before return.
1324 
1325     LLVMContext &Ctx = M.getContext();
1326     const DataLayout &DL = M.getDataLayout();
1327     assert(!LDSVarsToTransform.empty());
1328 
1329     SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
1330     LayoutFields.reserve(LDSVarsToTransform.size());
1331     {
1332       // The order of fields in this struct depends on the order of
1333       // varables in the argument which varies when changing how they
1334       // are identified, leading to spurious test breakage.
1335       std::vector<GlobalVariable *> Sorted(LDSVarsToTransform.begin(),
1336                                            LDSVarsToTransform.end());
1337       llvm::sort(Sorted.begin(), Sorted.end(),
1338                  [](const GlobalVariable *lhs, const GlobalVariable *rhs) {
1339                    return lhs->getName() < rhs->getName();
1340                  });
1341       for (GlobalVariable *GV : Sorted) {
1342         OptimizedStructLayoutField F(GV,
1343                                      DL.getTypeAllocSize(GV->getValueType()),
1344                                      AMDGPU::getAlign(DL, GV));
1345         LayoutFields.emplace_back(F);
1346       }
1347     }
1348 
1349     performOptimizedStructLayout(LayoutFields);
1350 
1351     std::vector<GlobalVariable *> LocalVars;
1352     BitVector IsPaddingField;
1353     LocalVars.reserve(LDSVarsToTransform.size()); // will be at least this large
1354     IsPaddingField.reserve(LDSVarsToTransform.size());
1355     {
1356       uint64_t CurrentOffset = 0;
1357       for (size_t I = 0; I < LayoutFields.size(); I++) {
1358         GlobalVariable *FGV = static_cast<GlobalVariable *>(
1359             const_cast<void *>(LayoutFields[I].Id));
1360         Align DataAlign = LayoutFields[I].Alignment;
1361 
1362         uint64_t DataAlignV = DataAlign.value();
1363         if (uint64_t Rem = CurrentOffset % DataAlignV) {
1364           uint64_t Padding = DataAlignV - Rem;
1365 
1366           // Append an array of padding bytes to meet alignment requested
1367           // Note (o +      (a - (o % a)) ) % a == 0
1368           //      (offset + Padding       ) % align == 0
1369 
1370           Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding);
1371           LocalVars.push_back(new GlobalVariable(
1372               M, ATy, false, GlobalValue::InternalLinkage, UndefValue::get(ATy),
1373               "", nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
1374               false));
1375           IsPaddingField.push_back(true);
1376           CurrentOffset += Padding;
1377         }
1378 
1379         LocalVars.push_back(FGV);
1380         IsPaddingField.push_back(false);
1381         CurrentOffset += LayoutFields[I].Size;
1382       }
1383     }
1384 
1385     std::vector<Type *> LocalVarTypes;
1386     LocalVarTypes.reserve(LocalVars.size());
1387     std::transform(
1388         LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes),
1389         [](const GlobalVariable *V) -> Type * { return V->getValueType(); });
1390 
1391     StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t");
1392 
1393     Align StructAlign = AMDGPU::getAlign(DL, LocalVars[0]);
1394 
1395     GlobalVariable *SGV = new GlobalVariable(
1396         M, LDSTy, false, GlobalValue::InternalLinkage, UndefValue::get(LDSTy),
1397         VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
1398         false);
1399     SGV->setAlignment(StructAlign);
1400 
1401     DenseMap<GlobalVariable *, Constant *> Map;
1402     Type *I32 = Type::getInt32Ty(Ctx);
1403     for (size_t I = 0; I < LocalVars.size(); I++) {
1404       GlobalVariable *GV = LocalVars[I];
1405       Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)};
1406       Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx, true);
1407       if (IsPaddingField[I]) {
1408         assert(GV->use_empty());
1409         GV->eraseFromParent();
1410       } else {
1411         Map[GV] = GEP;
1412       }
1413     }
1414     assert(Map.size() == LDSVarsToTransform.size());
1415     return {SGV, std::move(Map)};
1416   }
1417 
1418   template <typename PredicateTy>
1419   static void replaceLDSVariablesWithStruct(
1420       Module &M, DenseSet<GlobalVariable *> const &LDSVarsToTransformArg,
1421       LDSVariableReplacement Replacement, PredicateTy Predicate) {
1422     LLVMContext &Ctx = M.getContext();
1423     const DataLayout &DL = M.getDataLayout();
1424 
1425     // A hack... we need to insert the aliasing info in a predictable order for
1426     // lit tests. Would like to have them in a stable order already, ideally the
1427     // same order they get allocated, which might mean an ordered set container
1428     std::vector<GlobalVariable *> LDSVarsToTransform(
1429         LDSVarsToTransformArg.begin(), LDSVarsToTransformArg.end());
1430     llvm::sort(LDSVarsToTransform.begin(), LDSVarsToTransform.end(),
1431                [](const GlobalVariable *lhs, const GlobalVariable *rhs) {
1432                  return lhs->getName() < rhs->getName();
1433                });
1434 
1435     // Create alias.scope and their lists. Each field in the new structure
1436     // does not alias with all other fields.
1437     SmallVector<MDNode *> AliasScopes;
1438     SmallVector<Metadata *> NoAliasList;
1439     const size_t NumberVars = LDSVarsToTransform.size();
1440     if (NumberVars > 1) {
1441       MDBuilder MDB(Ctx);
1442       AliasScopes.reserve(NumberVars);
1443       MDNode *Domain = MDB.createAnonymousAliasScopeDomain();
1444       for (size_t I = 0; I < NumberVars; I++) {
1445         MDNode *Scope = MDB.createAnonymousAliasScope(Domain);
1446         AliasScopes.push_back(Scope);
1447       }
1448       NoAliasList.append(&AliasScopes[1], AliasScopes.end());
1449     }
1450 
1451     // Replace uses of ith variable with a constantexpr to the corresponding
1452     // field of the instance that will be allocated by AMDGPUMachineFunction
1453     for (size_t I = 0; I < NumberVars; I++) {
1454       GlobalVariable *GV = LDSVarsToTransform[I];
1455       Constant *GEP = Replacement.LDSVarsToConstantGEP[GV];
1456 
1457       GV->replaceUsesWithIf(GEP, Predicate);
1458 
1459       APInt APOff(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
1460       GEP->stripAndAccumulateInBoundsConstantOffsets(DL, APOff);
1461       uint64_t Offset = APOff.getZExtValue();
1462 
1463       Align A =
1464           commonAlignment(Replacement.SGV->getAlign().valueOrOne(), Offset);
1465 
1466       if (I)
1467         NoAliasList[I - 1] = AliasScopes[I - 1];
1468       MDNode *NoAlias =
1469           NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList);
1470       MDNode *AliasScope =
1471           AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]});
1472 
1473       refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias);
1474     }
1475   }
1476 
1477   static void refineUsesAlignmentAndAA(Value *Ptr, Align A,
1478                                        const DataLayout &DL, MDNode *AliasScope,
1479                                        MDNode *NoAlias, unsigned MaxDepth = 5) {
1480     if (!MaxDepth || (A == 1 && !AliasScope))
1481       return;
1482 
1483     for (User *U : Ptr->users()) {
1484       if (auto *I = dyn_cast<Instruction>(U)) {
1485         if (AliasScope && I->mayReadOrWriteMemory()) {
1486           MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope);
1487           AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope)
1488                    : AliasScope);
1489           I->setMetadata(LLVMContext::MD_alias_scope, AS);
1490 
1491           MDNode *NA = I->getMetadata(LLVMContext::MD_noalias);
1492           NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias);
1493           I->setMetadata(LLVMContext::MD_noalias, NA);
1494         }
1495       }
1496 
1497       if (auto *LI = dyn_cast<LoadInst>(U)) {
1498         LI->setAlignment(std::max(A, LI->getAlign()));
1499         continue;
1500       }
1501       if (auto *SI = dyn_cast<StoreInst>(U)) {
1502         if (SI->getPointerOperand() == Ptr)
1503           SI->setAlignment(std::max(A, SI->getAlign()));
1504         continue;
1505       }
1506       if (auto *AI = dyn_cast<AtomicRMWInst>(U)) {
1507         // None of atomicrmw operations can work on pointers, but let's
1508         // check it anyway in case it will or we will process ConstantExpr.
1509         if (AI->getPointerOperand() == Ptr)
1510           AI->setAlignment(std::max(A, AI->getAlign()));
1511         continue;
1512       }
1513       if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) {
1514         if (AI->getPointerOperand() == Ptr)
1515           AI->setAlignment(std::max(A, AI->getAlign()));
1516         continue;
1517       }
1518       if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
1519         unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1520         APInt Off(BitWidth, 0);
1521         if (GEP->getPointerOperand() == Ptr) {
1522           Align GA;
1523           if (GEP->accumulateConstantOffset(DL, Off))
1524             GA = commonAlignment(A, Off.getLimitedValue());
1525           refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias,
1526                                    MaxDepth - 1);
1527         }
1528         continue;
1529       }
1530       if (auto *I = dyn_cast<Instruction>(U)) {
1531         if (I->getOpcode() == Instruction::BitCast ||
1532             I->getOpcode() == Instruction::AddrSpaceCast)
1533           refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1);
1534       }
1535     }
1536   }
1537 };
1538 
1539 } // namespace
1540 char AMDGPULowerModuleLDS::ID = 0;
1541 
1542 char &llvm::AMDGPULowerModuleLDSID = AMDGPULowerModuleLDS::ID;
1543 
1544 INITIALIZE_PASS(AMDGPULowerModuleLDS, DEBUG_TYPE,
1545                 "Lower uses of LDS variables from non-kernel functions", false,
1546                 false)
1547 
1548 ModulePass *llvm::createAMDGPULowerModuleLDSPass() {
1549   return new AMDGPULowerModuleLDS();
1550 }
1551 
1552 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M,
1553                                                 ModuleAnalysisManager &) {
1554   return AMDGPULowerModuleLDS().runOnModule(M) ? PreservedAnalyses::none()
1555                                                : PreservedAnalyses::all();
1556 }
1557