xref: /llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp (revision d4569d42b5cb8ba076f0115d3d21d89f68e6ce9d)
1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass eliminates local data store, LDS, uses from non-kernel functions.
10 // LDS is contiguous memory allocated per kernel execution.
11 //
12 // Background.
13 //
14 // The programming model is global variables, or equivalently function local
15 // static variables, accessible from kernels or other functions. For uses from
16 // kernels this is straightforward - assign an integer to the kernel for the
17 // memory required by all the variables combined, allocate them within that.
18 // For uses from functions there are performance tradeoffs to choose between.
19 //
20 // This model means the GPU runtime can specify the amount of memory allocated.
21 // If this is more than the kernel assumed, the excess can be made available
22 // using a language specific feature, which IR represents as a variable with
23 // no initializer. This feature is referred to here as "Dynamic LDS" and is
24 // lowered slightly differently to the normal case.
25 //
26 // Consequences of this GPU feature:
27 // - memory is limited and exceeding it halts compilation
28 // - a global accessed by one kernel exists independent of other kernels
29 // - a global exists independent of simultaneous execution of the same kernel
30 // - the address of the global may be different from different kernels as they
31 //   do not alias, which permits only allocating variables they use
32 // - if the address is allowed to differ, functions need help to find it
33 //
34 // Uses from kernels are implemented here by grouping them in a per-kernel
35 // struct instance. This duplicates the variables, accurately modelling their
36 // aliasing properties relative to a single global representation. It also
37 // permits control over alignment via padding.
38 //
39 // Uses from functions are more complicated and the primary purpose of this
40 // IR pass. Several different lowering are chosen between to meet requirements
41 // to avoid allocating any LDS where it is not necessary, as that impacts
42 // occupancy and may fail the compilation, while not imposing overhead on a
43 // feature whose primary advantage over global memory is performance. The basic
44 // design goal is to avoid one kernel imposing overhead on another.
45 //
46 // Implementation.
47 //
48 // LDS variables with constant annotation or non-undef initializer are passed
49 // through unchanged for simplification or error diagnostics in later passes.
50 // Non-undef initializers are not yet implemented for LDS.
51 //
52 // LDS variables that are always allocated at the same address can be found
53 // by lookup at that address. Otherwise runtime information/cost is required.
54 //
55 // The simplest strategy possible is to group all LDS variables in a single
56 // struct and allocate that struct in every kernel such that the original
57 // variables are always at the same address. LDS is however a limited resource
58 // so this strategy is unusable in practice. It is not implemented here.
59 //
60 // Strategy | Precise allocation | Zero runtime cost | General purpose |
61 //  --------+--------------------+-------------------+-----------------+
62 //   Module |                 No |               Yes |             Yes |
63 //    Table |                Yes |                No |             Yes |
64 //   Kernel |                Yes |               Yes |              No |
65 //   Hybrid |                Yes |           Partial |             Yes |
66 //
67 // "Module" spends LDS memory to save cycles. "Table" spends cycles and global
68 // memory to save LDS. "Kernel" is as fast as kernel allocation but only works
69 // for variables that are known reachable from a single kernel. "Hybrid" picks
70 // between all three. When forced to choose between LDS and cycles we minimise
71 // LDS use.
72 
73 // The "module" lowering implemented here finds LDS variables which are used by
74 // non-kernel functions and creates a new struct with a field for each of those
75 // LDS variables. Variables that are only used from kernels are excluded.
76 //
77 // The "table" lowering implemented here has three components.
78 // First kernels are assigned a unique integer identifier which is available in
79 // functions it calls through the intrinsic amdgcn_lds_kernel_id. The integer
80 // is passed through a specific SGPR, thus works with indirect calls.
81 // Second, each kernel allocates LDS variables independent of other kernels and
82 // writes the addresses it chose for each variable into an array in consistent
83 // order. If the kernel does not allocate a given variable, it writes undef to
84 // the corresponding array location. These arrays are written to a constant
85 // table in the order matching the kernel unique integer identifier.
86 // Third, uses from non-kernel functions are replaced with a table lookup using
87 // the intrinsic function to find the address of the variable.
88 //
89 // "Kernel" lowering is only applicable for variables that are unambiguously
90 // reachable from exactly one kernel. For those cases, accesses to the variable
91 // can be lowered to ConstantExpr address of a struct instance specific to that
92 // one kernel. This is zero cost in space and in compute. It will raise a fatal
93 // error on any variable that might be reachable from multiple kernels and is
94 // thus most easily used as part of the hybrid lowering strategy.
95 //
96 // Hybrid lowering is a mixture of the above. It uses the zero cost kernel
97 // lowering where it can. It lowers the variable accessed by the greatest
98 // number of kernels using the module strategy as that is free for the first
99 // variable. Any futher variables that can be lowered with the module strategy
100 // without incurring LDS memory overhead are. The remaining ones are lowered
101 // via table.
102 //
103 // Consequences
104 // - No heuristics or user controlled magic numbers, hybrid is the right choice
105 // - Kernels that don't use functions (or have had them all inlined) are not
106 //   affected by any lowering for kernels that do.
107 // - Kernels that don't make indirect function calls are not affected by those
108 //   that do.
109 // - Variables which are used by lots of kernels, e.g. those injected by a
110 //   language runtime in most kernels, are expected to have no overhead
111 // - Implementations that instantiate templates per-kernel where those templates
112 //   use LDS are expected to hit the "Kernel" lowering strategy
113 // - The runtime properties impose a cost in compiler implementation complexity
114 //
115 // Dynamic LDS implementation
116 // Dynamic LDS is lowered similarly to the "table" strategy above and uses the
117 // same intrinsic to identify which kernel is at the root of the dynamic call
118 // graph. This relies on the specified behaviour that all dynamic LDS variables
119 // alias one another, i.e. are at the same address, with respect to a given
120 // kernel. Therefore this pass creates new dynamic LDS variables for each kernel
121 // that allocates any dynamic LDS and builds a table of addresses out of those.
122 // The AMDGPUPromoteAlloca pass skips kernels that use dynamic LDS.
123 // The corresponding optimisation for "kernel" lowering where the table lookup
124 // is elided is not implemented.
125 //
126 //
127 // Implementation notes / limitations
128 // A single LDS global variable represents an instance per kernel that can reach
129 // said variables. This pass essentially specialises said variables per kernel.
130 // Handling ConstantExpr during the pass complicated this significantly so now
131 // all ConstantExpr uses of LDS variables are expanded to instructions. This
132 // may need amending when implementing non-undef initialisers.
133 //
134 // Lowering is split between this IR pass and the back end. This pass chooses
135 // where given variables should be allocated and marks them with metadata,
136 // MD_absolute_symbol. The backend places the variables in coincidentally the
137 // same location and raises a fatal error if something has gone awry. This works
138 // in practice because the only pass between this one and the backend that
139 // changes LDS is PromoteAlloca and the changes it makes do not conflict.
140 //
141 // Addresses are written to constant global arrays based on the same metadata.
142 //
143 // The backend lowers LDS variables in the order of traversal of the function.
144 // This is at odds with the deterministic layout required. The workaround is to
145 // allocate the fixed-address variables immediately upon starting the function
146 // where they can be placed as intended. This requires a means of mapping from
147 // the function to the variables that it allocates. For the module scope lds,
148 // this is via metadata indicating whether the variable is not required. If a
149 // pass deletes that metadata, a fatal error on disagreement with the absolute
150 // symbol metadata will occur. For kernel scope and dynamic, this is by _name_
151 // correspondence between the function and the variable. It requires the
152 // kernel to have a name (which is only a limitation for tests in practice) and
153 // for nothing to rename the corresponding symbols. This is a hazard if the pass
154 // is run multiple times during debugging. Alternative schemes considered all
155 // involve bespoke metadata.
156 //
157 // If the name correspondence can be replaced, multiple distinct kernels that
158 // have the same memory layout can map to the same kernel id (as the address
159 // itself is handled by the absolute symbol metadata) and that will allow more
160 // uses of the "kernel" style faster lowering and reduce the size of the lookup
161 // tables.
162 //
163 // There is a test that checks this does not fire for a graphics shader. This
164 // lowering is expected to work for graphics if the isKernel test is changed.
165 //
166 // The current markUsedByKernel is sufficient for PromoteAlloca but is elided
167 // before codegen. Replacing this with an equivalent intrinsic which lasts until
168 // shortly after the machine function lowering of LDS would help break the name
169 // mapping. The other part needed is probably to amend PromoteAlloca to embed
170 // the LDS variables it creates in the same struct created here. That avoids the
171 // current hazard where a PromoteAlloca LDS variable might be allocated before
172 // the kernel scope (and thus error on the address check). Given a new invariant
173 // that no LDS variables exist outside of the structs managed here, and an
174 // intrinsic that lasts until after the LDS frame lowering, it should be
175 // possible to drop the name mapping and fold equivalent memory layouts.
176 //
177 //===----------------------------------------------------------------------===//
178 
179 #include "AMDGPU.h"
180 #include "AMDGPUTargetMachine.h"
181 #include "Utils/AMDGPUBaseInfo.h"
182 #include "Utils/AMDGPUMemoryUtils.h"
183 #include "llvm/ADT/BitVector.h"
184 #include "llvm/ADT/DenseMap.h"
185 #include "llvm/ADT/DenseSet.h"
186 #include "llvm/ADT/STLExtras.h"
187 #include "llvm/ADT/SetOperations.h"
188 #include "llvm/Analysis/CallGraph.h"
189 #include "llvm/CodeGen/TargetPassConfig.h"
190 #include "llvm/IR/Constants.h"
191 #include "llvm/IR/DerivedTypes.h"
192 #include "llvm/IR/IRBuilder.h"
193 #include "llvm/IR/InlineAsm.h"
194 #include "llvm/IR/Instructions.h"
195 #include "llvm/IR/IntrinsicsAMDGPU.h"
196 #include "llvm/IR/MDBuilder.h"
197 #include "llvm/IR/ReplaceConstant.h"
198 #include "llvm/InitializePasses.h"
199 #include "llvm/Pass.h"
200 #include "llvm/Support/CommandLine.h"
201 #include "llvm/Support/Debug.h"
202 #include "llvm/Support/Format.h"
203 #include "llvm/Support/OptimizedStructLayout.h"
204 #include "llvm/Support/raw_ostream.h"
205 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
206 #include "llvm/Transforms/Utils/ModuleUtils.h"
207 
208 #include <vector>
209 
210 #include <cstdio>
211 
212 #define DEBUG_TYPE "amdgpu-lower-module-lds"
213 
214 using namespace llvm;
215 
216 namespace {
217 
218 cl::opt<bool> SuperAlignLDSGlobals(
219     "amdgpu-super-align-lds-globals",
220     cl::desc("Increase alignment of LDS if it is not on align boundary"),
221     cl::init(true), cl::Hidden);
222 
223 enum class LoweringKind { module, table, kernel, hybrid };
224 cl::opt<LoweringKind> LoweringKindLoc(
225     "amdgpu-lower-module-lds-strategy",
226     cl::desc("Specify lowering strategy for function LDS access:"), cl::Hidden,
227     cl::init(LoweringKind::hybrid),
228     cl::values(
229         clEnumValN(LoweringKind::table, "table", "Lower via table lookup"),
230         clEnumValN(LoweringKind::module, "module", "Lower via module struct"),
231         clEnumValN(
232             LoweringKind::kernel, "kernel",
233             "Lower variables reachable from one kernel, otherwise abort"),
234         clEnumValN(LoweringKind::hybrid, "hybrid",
235                    "Lower via mixture of above strategies")));
236 
237 bool isKernelLDS(const Function *F) {
238   // Some weirdness here. AMDGPU::isKernelCC does not call into
239   // AMDGPU::isKernel with the calling conv, it instead calls into
240   // isModuleEntryFunction which returns true for more calling conventions
241   // than AMDGPU::isKernel does. There's a FIXME on AMDGPU::isKernel.
242   // There's also a test that checks that the LDS lowering does not hit on
243   // a graphics shader, denoted amdgpu_ps, so stay with the limited case.
244   // Putting LDS in the name of the function to draw attention to this.
245   return AMDGPU::isKernel(F->getCallingConv());
246 }
247 
248 template <typename T> std::vector<T> sortByName(std::vector<T> &&V) {
249   llvm::sort(V.begin(), V.end(), [](const auto *L, const auto *R) {
250     return L->getName() < R->getName();
251   });
252   return {std::move(V)};
253 }
254 
255 class AMDGPULowerModuleLDS {
256   const AMDGPUTargetMachine &TM;
257 
258   static void
259   removeLocalVarsFromUsedLists(Module &M,
260                                const DenseSet<GlobalVariable *> &LocalVars) {
261     // The verifier rejects used lists containing an inttoptr of a constant
262     // so remove the variables from these lists before replaceAllUsesWith
263     SmallPtrSet<Constant *, 8> LocalVarsSet;
264     for (GlobalVariable *LocalVar : LocalVars)
265       LocalVarsSet.insert(cast<Constant>(LocalVar->stripPointerCasts()));
266 
267     removeFromUsedLists(
268         M, [&LocalVarsSet](Constant *C) { return LocalVarsSet.count(C); });
269 
270     for (GlobalVariable *LocalVar : LocalVars)
271       LocalVar->removeDeadConstantUsers();
272   }
273 
274   static void markUsedByKernel(Function *Func, GlobalVariable *SGV) {
275     // The llvm.amdgcn.module.lds instance is implicitly used by all kernels
276     // that might call a function which accesses a field within it. This is
277     // presently approximated to 'all kernels' if there are any such functions
278     // in the module. This implicit use is redefined as an explicit use here so
279     // that later passes, specifically PromoteAlloca, account for the required
280     // memory without any knowledge of this transform.
281 
282     // An operand bundle on llvm.donothing works because the call instruction
283     // survives until after the last pass that needs to account for LDS. It is
284     // better than inline asm as the latter survives until the end of codegen. A
285     // totally robust solution would be a function with the same semantics as
286     // llvm.donothing that takes a pointer to the instance and is lowered to a
287     // no-op after LDS is allocated, but that is not presently necessary.
288 
289     // This intrinsic is eliminated shortly before instruction selection. It
290     // does not suffice to indicate to ISel that a given global which is not
291     // immediately used by the kernel must still be allocated by it. An
292     // equivalent target specific intrinsic which lasts until immediately after
293     // codegen would suffice for that, but one would still need to ensure that
294     // the variables are allocated in the anticpated order.
295     BasicBlock *Entry = &Func->getEntryBlock();
296     IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
297 
298     Function *Decl =
299         Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
300 
301     Value *UseInstance[1] = {
302         Builder.CreateConstInBoundsGEP1_32(SGV->getValueType(), SGV, 0)};
303 
304     Builder.CreateCall(
305         Decl, {}, {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)});
306   }
307 
308   static bool eliminateConstantExprUsesOfLDSFromAllInstructions(Module &M) {
309     // Constants are uniqued within LLVM. A ConstantExpr referring to a LDS
310     // global may have uses from multiple different functions as a result.
311     // This pass specialises LDS variables with respect to the kernel that
312     // allocates them.
313 
314     // This is semantically equivalent to (the unimplemented as slow):
315     // for (auto &F : M.functions())
316     //   for (auto &BB : F)
317     //     for (auto &I : BB)
318     //       for (Use &Op : I.operands())
319     //         if (constantExprUsesLDS(Op))
320     //           replaceConstantExprInFunction(I, Op);
321 
322     SmallVector<Constant *> LDSGlobals;
323     for (auto &GV : M.globals())
324       if (AMDGPU::isLDSVariableToLower(GV))
325         LDSGlobals.push_back(&GV);
326 
327     return convertUsersOfConstantsToInstructions(LDSGlobals);
328   }
329 
330 public:
331   AMDGPULowerModuleLDS(const AMDGPUTargetMachine &TM_) : TM(TM_) {}
332 
333   using FunctionVariableMap = DenseMap<Function *, DenseSet<GlobalVariable *>>;
334 
335   using VariableFunctionMap = DenseMap<GlobalVariable *, DenseSet<Function *>>;
336 
337   static void getUsesOfLDSByFunction(CallGraph const &CG, Module &M,
338                                      FunctionVariableMap &kernels,
339                                      FunctionVariableMap &functions) {
340 
341     // Get uses from the current function, excluding uses by called functions
342     // Two output variables to avoid walking the globals list twice
343     std::optional<bool> HasAbsoluteGVs;
344     for (auto &GV : M.globals()) {
345       if (!AMDGPU::isLDSVariableToLower(GV)) {
346         continue;
347       }
348 
349       // Check if the module is consistent: either all GVs are absolute (happens
350       // when we run the pass more than once), or none are.
351       const bool IsAbsolute = GV.isAbsoluteSymbolRef();
352       if (HasAbsoluteGVs.has_value()) {
353         if (*HasAbsoluteGVs != IsAbsolute) {
354           report_fatal_error(
355               "Module cannot mix absolute and non-absolute LDS GVs");
356         }
357       } else
358         HasAbsoluteGVs = IsAbsolute;
359 
360       if (IsAbsolute)
361         continue;
362 
363       for (User *V : GV.users()) {
364         if (auto *I = dyn_cast<Instruction>(V)) {
365           Function *F = I->getFunction();
366           if (isKernelLDS(F)) {
367             kernels[F].insert(&GV);
368           } else {
369             functions[F].insert(&GV);
370           }
371         }
372       }
373     }
374   }
375 
376   struct LDSUsesInfoTy {
377     FunctionVariableMap direct_access;
378     FunctionVariableMap indirect_access;
379   };
380 
381   static LDSUsesInfoTy getTransitiveUsesOfLDS(CallGraph const &CG, Module &M) {
382 
383     FunctionVariableMap direct_map_kernel;
384     FunctionVariableMap direct_map_function;
385     getUsesOfLDSByFunction(CG, M, direct_map_kernel, direct_map_function);
386 
387     // Collect variables that are used by functions whose address has escaped
388     DenseSet<GlobalVariable *> VariablesReachableThroughFunctionPointer;
389     for (Function &F : M.functions()) {
390       if (!isKernelLDS(&F))
391         if (F.hasAddressTaken(nullptr,
392                               /* IgnoreCallbackUses */ false,
393                               /* IgnoreAssumeLikeCalls */ false,
394                               /* IgnoreLLVMUsed */ true,
395                               /* IgnoreArcAttachedCall */ false)) {
396           set_union(VariablesReachableThroughFunctionPointer,
397                     direct_map_function[&F]);
398         }
399     }
400 
401     auto functionMakesUnknownCall = [&](const Function *F) -> bool {
402       assert(!F->isDeclaration());
403       for (const CallGraphNode::CallRecord &R : *CG[F]) {
404         if (!R.second->getFunction()) {
405           return true;
406         }
407       }
408       return false;
409     };
410 
411     // Work out which variables are reachable through function calls
412     FunctionVariableMap transitive_map_function = direct_map_function;
413 
414     // If the function makes any unknown call, assume the worst case that it can
415     // access all variables accessed by functions whose address escaped
416     for (Function &F : M.functions()) {
417       if (!F.isDeclaration() && functionMakesUnknownCall(&F)) {
418         if (!isKernelLDS(&F)) {
419           set_union(transitive_map_function[&F],
420                     VariablesReachableThroughFunctionPointer);
421         }
422       }
423     }
424 
425     // Direct implementation of collecting all variables reachable from each
426     // function
427     for (Function &Func : M.functions()) {
428       if (Func.isDeclaration() || isKernelLDS(&Func))
429         continue;
430 
431       DenseSet<Function *> seen; // catches cycles
432       SmallVector<Function *, 4> wip{&Func};
433 
434       while (!wip.empty()) {
435         Function *F = wip.pop_back_val();
436 
437         // Can accelerate this by referring to transitive map for functions that
438         // have already been computed, with more care than this
439         set_union(transitive_map_function[&Func], direct_map_function[F]);
440 
441         for (const CallGraphNode::CallRecord &R : *CG[F]) {
442           Function *ith = R.second->getFunction();
443           if (ith) {
444             if (!seen.contains(ith)) {
445               seen.insert(ith);
446               wip.push_back(ith);
447             }
448           }
449         }
450       }
451     }
452 
453     // direct_map_kernel lists which variables are used by the kernel
454     // find the variables which are used through a function call
455     FunctionVariableMap indirect_map_kernel;
456 
457     for (Function &Func : M.functions()) {
458       if (Func.isDeclaration() || !isKernelLDS(&Func))
459         continue;
460 
461       for (const CallGraphNode::CallRecord &R : *CG[&Func]) {
462         Function *ith = R.second->getFunction();
463         if (ith) {
464           set_union(indirect_map_kernel[&Func], transitive_map_function[ith]);
465         } else {
466           set_union(indirect_map_kernel[&Func],
467                     VariablesReachableThroughFunctionPointer);
468         }
469       }
470     }
471 
472     return {std::move(direct_map_kernel), std::move(indirect_map_kernel)};
473   }
474 
475   struct LDSVariableReplacement {
476     GlobalVariable *SGV = nullptr;
477     DenseMap<GlobalVariable *, Constant *> LDSVarsToConstantGEP;
478   };
479 
480   // remap from lds global to a constantexpr gep to where it has been moved to
481   // for each kernel
482   // an array with an element for each kernel containing where the corresponding
483   // variable was remapped to
484 
485   static Constant *getAddressesOfVariablesInKernel(
486       LLVMContext &Ctx, ArrayRef<GlobalVariable *> Variables,
487       const DenseMap<GlobalVariable *, Constant *> &LDSVarsToConstantGEP) {
488     // Create a ConstantArray containing the address of each Variable within the
489     // kernel corresponding to LDSVarsToConstantGEP, or poison if that kernel
490     // does not allocate it
491     // TODO: Drop the ptrtoint conversion
492 
493     Type *I32 = Type::getInt32Ty(Ctx);
494 
495     ArrayType *KernelOffsetsType = ArrayType::get(I32, Variables.size());
496 
497     SmallVector<Constant *> Elements;
498     for (size_t i = 0; i < Variables.size(); i++) {
499       GlobalVariable *GV = Variables[i];
500       auto ConstantGepIt = LDSVarsToConstantGEP.find(GV);
501       if (ConstantGepIt != LDSVarsToConstantGEP.end()) {
502         auto elt = ConstantExpr::getPtrToInt(ConstantGepIt->second, I32);
503         Elements.push_back(elt);
504       } else {
505         Elements.push_back(PoisonValue::get(I32));
506       }
507     }
508     return ConstantArray::get(KernelOffsetsType, Elements);
509   }
510 
511   static GlobalVariable *buildLookupTable(
512       Module &M, ArrayRef<GlobalVariable *> Variables,
513       ArrayRef<Function *> kernels,
514       DenseMap<Function *, LDSVariableReplacement> &KernelToReplacement) {
515     if (Variables.empty()) {
516       return nullptr;
517     }
518     LLVMContext &Ctx = M.getContext();
519 
520     const size_t NumberVariables = Variables.size();
521     const size_t NumberKernels = kernels.size();
522 
523     ArrayType *KernelOffsetsType =
524         ArrayType::get(Type::getInt32Ty(Ctx), NumberVariables);
525 
526     ArrayType *AllKernelsOffsetsType =
527         ArrayType::get(KernelOffsetsType, NumberKernels);
528 
529     Constant *Missing = PoisonValue::get(KernelOffsetsType);
530     std::vector<Constant *> overallConstantExprElts(NumberKernels);
531     for (size_t i = 0; i < NumberKernels; i++) {
532       auto Replacement = KernelToReplacement.find(kernels[i]);
533       overallConstantExprElts[i] =
534           (Replacement == KernelToReplacement.end())
535               ? Missing
536               : getAddressesOfVariablesInKernel(
537                     Ctx, Variables, Replacement->second.LDSVarsToConstantGEP);
538     }
539 
540     Constant *init =
541         ConstantArray::get(AllKernelsOffsetsType, overallConstantExprElts);
542 
543     return new GlobalVariable(
544         M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, init,
545         "llvm.amdgcn.lds.offset.table", nullptr, GlobalValue::NotThreadLocal,
546         AMDGPUAS::CONSTANT_ADDRESS);
547   }
548 
549   void replaceUseWithTableLookup(Module &M, IRBuilder<> &Builder,
550                                  GlobalVariable *LookupTable,
551                                  GlobalVariable *GV, Use &U,
552                                  Value *OptionalIndex) {
553     // Table is a constant array of the same length as OrderedKernels
554     LLVMContext &Ctx = M.getContext();
555     Type *I32 = Type::getInt32Ty(Ctx);
556     auto *I = cast<Instruction>(U.getUser());
557 
558     Value *tableKernelIndex = getTableLookupKernelIndex(M, I->getFunction());
559 
560     if (auto *Phi = dyn_cast<PHINode>(I)) {
561       BasicBlock *BB = Phi->getIncomingBlock(U);
562       Builder.SetInsertPoint(&(*(BB->getFirstInsertionPt())));
563     } else {
564       Builder.SetInsertPoint(I);
565     }
566 
567     SmallVector<Value *, 3> GEPIdx = {
568         ConstantInt::get(I32, 0),
569         tableKernelIndex,
570     };
571     if (OptionalIndex)
572       GEPIdx.push_back(OptionalIndex);
573 
574     Value *Address = Builder.CreateInBoundsGEP(
575         LookupTable->getValueType(), LookupTable, GEPIdx, GV->getName());
576 
577     Value *loaded = Builder.CreateLoad(I32, Address);
578 
579     Value *replacement =
580         Builder.CreateIntToPtr(loaded, GV->getType(), GV->getName());
581 
582     U.set(replacement);
583   }
584 
585   void replaceUsesInInstructionsWithTableLookup(
586       Module &M, ArrayRef<GlobalVariable *> ModuleScopeVariables,
587       GlobalVariable *LookupTable) {
588 
589     LLVMContext &Ctx = M.getContext();
590     IRBuilder<> Builder(Ctx);
591     Type *I32 = Type::getInt32Ty(Ctx);
592 
593     for (size_t Index = 0; Index < ModuleScopeVariables.size(); Index++) {
594       auto *GV = ModuleScopeVariables[Index];
595 
596       for (Use &U : make_early_inc_range(GV->uses())) {
597         auto *I = dyn_cast<Instruction>(U.getUser());
598         if (!I)
599           continue;
600 
601         replaceUseWithTableLookup(M, Builder, LookupTable, GV, U,
602                                   ConstantInt::get(I32, Index));
603       }
604     }
605   }
606 
607   static DenseSet<Function *> kernelsThatIndirectlyAccessAnyOfPassedVariables(
608       Module &M, LDSUsesInfoTy &LDSUsesInfo,
609       DenseSet<GlobalVariable *> const &VariableSet) {
610 
611     DenseSet<Function *> KernelSet;
612 
613     if (VariableSet.empty())
614       return KernelSet;
615 
616     for (Function &Func : M.functions()) {
617       if (Func.isDeclaration() || !isKernelLDS(&Func))
618         continue;
619       for (GlobalVariable *GV : LDSUsesInfo.indirect_access[&Func]) {
620         if (VariableSet.contains(GV)) {
621           KernelSet.insert(&Func);
622           break;
623         }
624       }
625     }
626 
627     return KernelSet;
628   }
629 
630   static GlobalVariable *
631   chooseBestVariableForModuleStrategy(const DataLayout &DL,
632                                       VariableFunctionMap &LDSVars) {
633     // Find the global variable with the most indirect uses from kernels
634 
635     struct CandidateTy {
636       GlobalVariable *GV = nullptr;
637       size_t UserCount = 0;
638       size_t Size = 0;
639 
640       CandidateTy() = default;
641 
642       CandidateTy(GlobalVariable *GV, uint64_t UserCount, uint64_t AllocSize)
643           : GV(GV), UserCount(UserCount), Size(AllocSize) {}
644 
645       bool operator<(const CandidateTy &Other) const {
646         // Fewer users makes module scope variable less attractive
647         if (UserCount < Other.UserCount) {
648           return true;
649         }
650         if (UserCount > Other.UserCount) {
651           return false;
652         }
653 
654         // Bigger makes module scope variable less attractive
655         if (Size < Other.Size) {
656           return false;
657         }
658 
659         if (Size > Other.Size) {
660           return true;
661         }
662 
663         // Arbitrary but consistent
664         return GV->getName() < Other.GV->getName();
665       }
666     };
667 
668     CandidateTy MostUsed;
669 
670     for (auto &K : LDSVars) {
671       GlobalVariable *GV = K.first;
672       if (K.second.size() <= 1) {
673         // A variable reachable by only one kernel is best lowered with kernel
674         // strategy
675         continue;
676       }
677       CandidateTy Candidate(
678           GV, K.second.size(),
679           DL.getTypeAllocSize(GV->getValueType()).getFixedValue());
680       if (MostUsed < Candidate)
681         MostUsed = Candidate;
682     }
683 
684     return MostUsed.GV;
685   }
686 
687   static void recordLDSAbsoluteAddress(Module *M, GlobalVariable *GV,
688                                        uint32_t Address) {
689     // Write the specified address into metadata where it can be retrieved by
690     // the assembler. Format is a half open range, [Address Address+1)
691     LLVMContext &Ctx = M->getContext();
692     auto *IntTy =
693         M->getDataLayout().getIntPtrType(Ctx, AMDGPUAS::LOCAL_ADDRESS);
694     auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address));
695     auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address + 1));
696     GV->setMetadata(LLVMContext::MD_absolute_symbol,
697                     MDNode::get(Ctx, {MinC, MaxC}));
698   }
699 
700   DenseMap<Function *, Value *> tableKernelIndexCache;
701   Value *getTableLookupKernelIndex(Module &M, Function *F) {
702     // Accesses from a function use the amdgcn_lds_kernel_id intrinsic which
703     // lowers to a read from a live in register. Emit it once in the entry
704     // block to spare deduplicating it later.
705     auto [It, Inserted] = tableKernelIndexCache.try_emplace(F);
706     if (Inserted) {
707       Function *Decl =
708           Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_lds_kernel_id, {});
709 
710       auto InsertAt = F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
711       IRBuilder<> Builder(&*InsertAt);
712 
713       It->second = Builder.CreateCall(Decl, {});
714     }
715 
716     return It->second;
717   }
718 
719   static std::vector<Function *> assignLDSKernelIDToEachKernel(
720       Module *M, DenseSet<Function *> const &KernelsThatAllocateTableLDS,
721       DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS) {
722     // Associate kernels in the set with an arbirary but reproducible order and
723     // annotate them with that order in metadata. This metadata is recognised by
724     // the backend and lowered to a SGPR which can be read from using
725     // amdgcn_lds_kernel_id.
726 
727     std::vector<Function *> OrderedKernels;
728     if (!KernelsThatAllocateTableLDS.empty() ||
729         !KernelsThatIndirectlyAllocateDynamicLDS.empty()) {
730 
731       for (Function &Func : M->functions()) {
732         if (Func.isDeclaration())
733           continue;
734         if (!isKernelLDS(&Func))
735           continue;
736 
737         if (KernelsThatAllocateTableLDS.contains(&Func) ||
738             KernelsThatIndirectlyAllocateDynamicLDS.contains(&Func)) {
739           assert(Func.hasName()); // else fatal error earlier
740           OrderedKernels.push_back(&Func);
741         }
742       }
743 
744       // Put them in an arbitrary but reproducible order
745       OrderedKernels = sortByName(std::move(OrderedKernels));
746 
747       // Annotate the kernels with their order in this vector
748       LLVMContext &Ctx = M->getContext();
749       IRBuilder<> Builder(Ctx);
750 
751       if (OrderedKernels.size() > UINT32_MAX) {
752         // 32 bit keeps it in one SGPR. > 2**32 kernels won't fit on the GPU
753         report_fatal_error("Unimplemented LDS lowering for > 2**32 kernels");
754       }
755 
756       for (size_t i = 0; i < OrderedKernels.size(); i++) {
757         Metadata *AttrMDArgs[1] = {
758             ConstantAsMetadata::get(Builder.getInt32(i)),
759         };
760         OrderedKernels[i]->setMetadata("llvm.amdgcn.lds.kernel.id",
761                                        MDNode::get(Ctx, AttrMDArgs));
762       }
763     }
764     return OrderedKernels;
765   }
766 
767   static void partitionVariablesIntoIndirectStrategies(
768       Module &M, LDSUsesInfoTy const &LDSUsesInfo,
769       VariableFunctionMap &LDSToKernelsThatNeedToAccessItIndirectly,
770       DenseSet<GlobalVariable *> &ModuleScopeVariables,
771       DenseSet<GlobalVariable *> &TableLookupVariables,
772       DenseSet<GlobalVariable *> &KernelAccessVariables,
773       DenseSet<GlobalVariable *> &DynamicVariables) {
774 
775     GlobalVariable *HybridModuleRoot =
776         LoweringKindLoc != LoweringKind::hybrid
777             ? nullptr
778             : chooseBestVariableForModuleStrategy(
779                   M.getDataLayout(), LDSToKernelsThatNeedToAccessItIndirectly);
780 
781     DenseSet<Function *> const EmptySet;
782     DenseSet<Function *> const &HybridModuleRootKernels =
783         HybridModuleRoot
784             ? LDSToKernelsThatNeedToAccessItIndirectly[HybridModuleRoot]
785             : EmptySet;
786 
787     for (auto &K : LDSToKernelsThatNeedToAccessItIndirectly) {
788       // Each iteration of this loop assigns exactly one global variable to
789       // exactly one of the implementation strategies.
790 
791       GlobalVariable *GV = K.first;
792       assert(AMDGPU::isLDSVariableToLower(*GV));
793       assert(K.second.size() != 0);
794 
795       if (AMDGPU::isDynamicLDS(*GV)) {
796         DynamicVariables.insert(GV);
797         continue;
798       }
799 
800       switch (LoweringKindLoc) {
801       case LoweringKind::module:
802         ModuleScopeVariables.insert(GV);
803         break;
804 
805       case LoweringKind::table:
806         TableLookupVariables.insert(GV);
807         break;
808 
809       case LoweringKind::kernel:
810         if (K.second.size() == 1) {
811           KernelAccessVariables.insert(GV);
812         } else {
813           report_fatal_error(
814               "cannot lower LDS '" + GV->getName() +
815               "' to kernel access as it is reachable from multiple kernels");
816         }
817         break;
818 
819       case LoweringKind::hybrid: {
820         if (GV == HybridModuleRoot) {
821           assert(K.second.size() != 1);
822           ModuleScopeVariables.insert(GV);
823         } else if (K.second.size() == 1) {
824           KernelAccessVariables.insert(GV);
825         } else if (set_is_subset(K.second, HybridModuleRootKernels)) {
826           ModuleScopeVariables.insert(GV);
827         } else {
828           TableLookupVariables.insert(GV);
829         }
830         break;
831       }
832       }
833     }
834 
835     // All LDS variables accessed indirectly have now been partitioned into
836     // the distinct lowering strategies.
837     assert(ModuleScopeVariables.size() + TableLookupVariables.size() +
838                KernelAccessVariables.size() + DynamicVariables.size() ==
839            LDSToKernelsThatNeedToAccessItIndirectly.size());
840   }
841 
842   static GlobalVariable *lowerModuleScopeStructVariables(
843       Module &M, DenseSet<GlobalVariable *> const &ModuleScopeVariables,
844       DenseSet<Function *> const &KernelsThatAllocateModuleLDS) {
845     // Create a struct to hold the ModuleScopeVariables
846     // Replace all uses of those variables from non-kernel functions with the
847     // new struct instance Replace only the uses from kernel functions that will
848     // allocate this instance. That is a space optimisation - kernels that use a
849     // subset of the module scope struct and do not need to allocate it for
850     // indirect calls will only allocate the subset they use (they do so as part
851     // of the per-kernel lowering).
852     if (ModuleScopeVariables.empty()) {
853       return nullptr;
854     }
855 
856     LLVMContext &Ctx = M.getContext();
857 
858     LDSVariableReplacement ModuleScopeReplacement =
859         createLDSVariableReplacement(M, "llvm.amdgcn.module.lds",
860                                      ModuleScopeVariables);
861 
862     appendToCompilerUsed(M, {static_cast<GlobalValue *>(
863                                 ConstantExpr::getPointerBitCastOrAddrSpaceCast(
864                                     cast<Constant>(ModuleScopeReplacement.SGV),
865                                     PointerType::getUnqual(Ctx)))});
866 
867     // module.lds will be allocated at zero in any kernel that allocates it
868     recordLDSAbsoluteAddress(&M, ModuleScopeReplacement.SGV, 0);
869 
870     // historic
871     removeLocalVarsFromUsedLists(M, ModuleScopeVariables);
872 
873     // Replace all uses of module scope variable from non-kernel functions
874     replaceLDSVariablesWithStruct(
875         M, ModuleScopeVariables, ModuleScopeReplacement, [&](Use &U) {
876           Instruction *I = dyn_cast<Instruction>(U.getUser());
877           if (!I) {
878             return false;
879           }
880           Function *F = I->getFunction();
881           return !isKernelLDS(F);
882         });
883 
884     // Replace uses of module scope variable from kernel functions that
885     // allocate the module scope variable, otherwise leave them unchanged
886     // Record on each kernel whether the module scope global is used by it
887 
888     for (Function &Func : M.functions()) {
889       if (Func.isDeclaration() || !isKernelLDS(&Func))
890         continue;
891 
892       if (KernelsThatAllocateModuleLDS.contains(&Func)) {
893         replaceLDSVariablesWithStruct(
894             M, ModuleScopeVariables, ModuleScopeReplacement, [&](Use &U) {
895               Instruction *I = dyn_cast<Instruction>(U.getUser());
896               if (!I) {
897                 return false;
898               }
899               Function *F = I->getFunction();
900               return F == &Func;
901             });
902 
903         markUsedByKernel(&Func, ModuleScopeReplacement.SGV);
904       }
905     }
906 
907     return ModuleScopeReplacement.SGV;
908   }
909 
910   static DenseMap<Function *, LDSVariableReplacement>
911   lowerKernelScopeStructVariables(
912       Module &M, LDSUsesInfoTy &LDSUsesInfo,
913       DenseSet<GlobalVariable *> const &ModuleScopeVariables,
914       DenseSet<Function *> const &KernelsThatAllocateModuleLDS,
915       GlobalVariable *MaybeModuleScopeStruct) {
916 
917     // Create a struct for each kernel for the non-module-scope variables.
918 
919     DenseMap<Function *, LDSVariableReplacement> KernelToReplacement;
920     for (Function &Func : M.functions()) {
921       if (Func.isDeclaration() || !isKernelLDS(&Func))
922         continue;
923 
924       DenseSet<GlobalVariable *> KernelUsedVariables;
925       // Allocating variables that are used directly in this struct to get
926       // alignment aware allocation and predictable frame size.
927       for (auto &v : LDSUsesInfo.direct_access[&Func]) {
928         if (!AMDGPU::isDynamicLDS(*v)) {
929           KernelUsedVariables.insert(v);
930         }
931       }
932 
933       // Allocating variables that are accessed indirectly so that a lookup of
934       // this struct instance can find them from nested functions.
935       for (auto &v : LDSUsesInfo.indirect_access[&Func]) {
936         if (!AMDGPU::isDynamicLDS(*v)) {
937           KernelUsedVariables.insert(v);
938         }
939       }
940 
941       // Variables allocated in module lds must all resolve to that struct,
942       // not to the per-kernel instance.
943       if (KernelsThatAllocateModuleLDS.contains(&Func)) {
944         for (GlobalVariable *v : ModuleScopeVariables) {
945           KernelUsedVariables.erase(v);
946         }
947       }
948 
949       if (KernelUsedVariables.empty()) {
950         // Either used no LDS, or the LDS it used was all in the module struct
951         // or dynamically sized
952         continue;
953       }
954 
955       // The association between kernel function and LDS struct is done by
956       // symbol name, which only works if the function in question has a
957       // name This is not expected to be a problem in practice as kernels
958       // are called by name making anonymous ones (which are named by the
959       // backend) difficult to use. This does mean that llvm test cases need
960       // to name the kernels.
961       if (!Func.hasName()) {
962         report_fatal_error("Anonymous kernels cannot use LDS variables");
963       }
964 
965       std::string VarName =
966           (Twine("llvm.amdgcn.kernel.") + Func.getName() + ".lds").str();
967 
968       auto Replacement =
969           createLDSVariableReplacement(M, VarName, KernelUsedVariables);
970 
971       // If any indirect uses, create a direct use to ensure allocation
972       // TODO: Simpler to unconditionally mark used but that regresses
973       // codegen in test/CodeGen/AMDGPU/noclobber-barrier.ll
974       auto Accesses = LDSUsesInfo.indirect_access.find(&Func);
975       if ((Accesses != LDSUsesInfo.indirect_access.end()) &&
976           !Accesses->second.empty())
977         markUsedByKernel(&Func, Replacement.SGV);
978 
979       // remove preserves existing codegen
980       removeLocalVarsFromUsedLists(M, KernelUsedVariables);
981       KernelToReplacement[&Func] = Replacement;
982 
983       // Rewrite uses within kernel to the new struct
984       replaceLDSVariablesWithStruct(
985           M, KernelUsedVariables, Replacement, [&Func](Use &U) {
986             Instruction *I = dyn_cast<Instruction>(U.getUser());
987             return I && I->getFunction() == &Func;
988           });
989     }
990     return KernelToReplacement;
991   }
992 
993   static GlobalVariable *
994   buildRepresentativeDynamicLDSInstance(Module &M, LDSUsesInfoTy &LDSUsesInfo,
995                                         Function *func) {
996     // Create a dynamic lds variable with a name associated with the passed
997     // function that has the maximum alignment of any dynamic lds variable
998     // reachable from this kernel. Dynamic LDS is allocated after the static LDS
999     // allocation, possibly after alignment padding. The representative variable
1000     // created here has the maximum alignment of any other dynamic variable
1001     // reachable by that kernel. All dynamic LDS variables are allocated at the
1002     // same address in each kernel in order to provide the documented aliasing
1003     // semantics. Setting the alignment here allows this IR pass to accurately
1004     // predict the exact constant at which it will be allocated.
1005 
1006     assert(isKernelLDS(func));
1007 
1008     LLVMContext &Ctx = M.getContext();
1009     const DataLayout &DL = M.getDataLayout();
1010     Align MaxDynamicAlignment(1);
1011 
1012     auto UpdateMaxAlignment = [&MaxDynamicAlignment, &DL](GlobalVariable *GV) {
1013       if (AMDGPU::isDynamicLDS(*GV)) {
1014         MaxDynamicAlignment =
1015             std::max(MaxDynamicAlignment, AMDGPU::getAlign(DL, GV));
1016       }
1017     };
1018 
1019     for (GlobalVariable *GV : LDSUsesInfo.indirect_access[func]) {
1020       UpdateMaxAlignment(GV);
1021     }
1022 
1023     for (GlobalVariable *GV : LDSUsesInfo.direct_access[func]) {
1024       UpdateMaxAlignment(GV);
1025     }
1026 
1027     assert(func->hasName()); // Checked by caller
1028     auto emptyCharArray = ArrayType::get(Type::getInt8Ty(Ctx), 0);
1029     GlobalVariable *N = new GlobalVariable(
1030         M, emptyCharArray, false, GlobalValue::ExternalLinkage, nullptr,
1031         Twine("llvm.amdgcn." + func->getName() + ".dynlds"), nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
1032         false);
1033     N->setAlignment(MaxDynamicAlignment);
1034 
1035     assert(AMDGPU::isDynamicLDS(*N));
1036     return N;
1037   }
1038 
1039   /// Strip "amdgpu-no-lds-kernel-id" from any functions where we may have
1040   /// introduced its use. If AMDGPUAttributor ran prior to the pass, we inferred
1041   /// the lack of llvm.amdgcn.lds.kernel.id calls.
1042   void removeNoLdsKernelIdFromReachable(CallGraph &CG, Function *KernelRoot) {
1043     KernelRoot->removeFnAttr("amdgpu-no-lds-kernel-id");
1044 
1045     SmallVector<Function *> Tmp({CG[KernelRoot]->getFunction()});
1046     if (!Tmp.back())
1047       return;
1048 
1049     SmallPtrSet<Function *, 8> Visited;
1050     bool SeenUnknownCall = false;
1051 
1052     do {
1053       Function *F = Tmp.pop_back_val();
1054 
1055       for (auto &N : *CG[F]) {
1056         if (!N.second)
1057           continue;
1058 
1059         Function *Callee = N.second->getFunction();
1060         if (!Callee) {
1061           if (!SeenUnknownCall) {
1062             SeenUnknownCall = true;
1063 
1064             // If we see any indirect calls, assume nothing about potential
1065             // targets.
1066             // TODO: This could be refined to possible LDS global users.
1067             for (auto &N : *CG.getExternalCallingNode()) {
1068               Function *PotentialCallee = N.second->getFunction();
1069               if (!isKernelLDS(PotentialCallee))
1070                 PotentialCallee->removeFnAttr("amdgpu-no-lds-kernel-id");
1071             }
1072 
1073             continue;
1074           }
1075         }
1076 
1077         Callee->removeFnAttr("amdgpu-no-lds-kernel-id");
1078         if (Visited.insert(Callee).second)
1079           Tmp.push_back(Callee);
1080       }
1081     } while (!Tmp.empty());
1082   }
1083 
1084   DenseMap<Function *, GlobalVariable *> lowerDynamicLDSVariables(
1085       Module &M, LDSUsesInfoTy &LDSUsesInfo,
1086       DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS,
1087       DenseSet<GlobalVariable *> const &DynamicVariables,
1088       std::vector<Function *> const &OrderedKernels) {
1089     DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS;
1090     if (!KernelsThatIndirectlyAllocateDynamicLDS.empty()) {
1091       LLVMContext &Ctx = M.getContext();
1092       IRBuilder<> Builder(Ctx);
1093       Type *I32 = Type::getInt32Ty(Ctx);
1094 
1095       std::vector<Constant *> newDynamicLDS;
1096 
1097       // Table is built in the same order as OrderedKernels
1098       for (auto &func : OrderedKernels) {
1099 
1100         if (KernelsThatIndirectlyAllocateDynamicLDS.contains(func)) {
1101           assert(isKernelLDS(func));
1102           if (!func->hasName()) {
1103             report_fatal_error("Anonymous kernels cannot use LDS variables");
1104           }
1105 
1106           GlobalVariable *N =
1107               buildRepresentativeDynamicLDSInstance(M, LDSUsesInfo, func);
1108 
1109           KernelToCreatedDynamicLDS[func] = N;
1110 
1111           markUsedByKernel(func, N);
1112 
1113           auto emptyCharArray = ArrayType::get(Type::getInt8Ty(Ctx), 0);
1114           auto GEP = ConstantExpr::getGetElementPtr(
1115               emptyCharArray, N, ConstantInt::get(I32, 0), true);
1116           newDynamicLDS.push_back(ConstantExpr::getPtrToInt(GEP, I32));
1117         } else {
1118           newDynamicLDS.push_back(PoisonValue::get(I32));
1119         }
1120       }
1121       assert(OrderedKernels.size() == newDynamicLDS.size());
1122 
1123       ArrayType *t = ArrayType::get(I32, newDynamicLDS.size());
1124       Constant *init = ConstantArray::get(t, newDynamicLDS);
1125       GlobalVariable *table = new GlobalVariable(
1126           M, t, true, GlobalValue::InternalLinkage, init,
1127           "llvm.amdgcn.dynlds.offset.table", nullptr,
1128           GlobalValue::NotThreadLocal, AMDGPUAS::CONSTANT_ADDRESS);
1129 
1130       for (GlobalVariable *GV : DynamicVariables) {
1131         for (Use &U : make_early_inc_range(GV->uses())) {
1132           auto *I = dyn_cast<Instruction>(U.getUser());
1133           if (!I)
1134             continue;
1135           if (isKernelLDS(I->getFunction()))
1136             continue;
1137 
1138           replaceUseWithTableLookup(M, Builder, table, GV, U, nullptr);
1139         }
1140       }
1141     }
1142     return KernelToCreatedDynamicLDS;
1143   }
1144 
1145   bool runOnModule(Module &M) {
1146     CallGraph CG = CallGraph(M);
1147     bool Changed = superAlignLDSGlobals(M);
1148 
1149     Changed |= eliminateConstantExprUsesOfLDSFromAllInstructions(M);
1150 
1151     Changed = true; // todo: narrow this down
1152 
1153     // For each kernel, what variables does it access directly or through
1154     // callees
1155     LDSUsesInfoTy LDSUsesInfo = getTransitiveUsesOfLDS(CG, M);
1156 
1157     // For each variable accessed through callees, which kernels access it
1158     VariableFunctionMap LDSToKernelsThatNeedToAccessItIndirectly;
1159     for (auto &K : LDSUsesInfo.indirect_access) {
1160       Function *F = K.first;
1161       assert(isKernelLDS(F));
1162       for (GlobalVariable *GV : K.second) {
1163         LDSToKernelsThatNeedToAccessItIndirectly[GV].insert(F);
1164       }
1165     }
1166 
1167     // Partition variables accessed indirectly into the different strategies
1168     DenseSet<GlobalVariable *> ModuleScopeVariables;
1169     DenseSet<GlobalVariable *> TableLookupVariables;
1170     DenseSet<GlobalVariable *> KernelAccessVariables;
1171     DenseSet<GlobalVariable *> DynamicVariables;
1172     partitionVariablesIntoIndirectStrategies(
1173         M, LDSUsesInfo, LDSToKernelsThatNeedToAccessItIndirectly,
1174         ModuleScopeVariables, TableLookupVariables, KernelAccessVariables,
1175         DynamicVariables);
1176 
1177     // If the kernel accesses a variable that is going to be stored in the
1178     // module instance through a call then that kernel needs to allocate the
1179     // module instance
1180     const DenseSet<Function *> KernelsThatAllocateModuleLDS =
1181         kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
1182                                                         ModuleScopeVariables);
1183     const DenseSet<Function *> KernelsThatAllocateTableLDS =
1184         kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
1185                                                         TableLookupVariables);
1186 
1187     const DenseSet<Function *> KernelsThatIndirectlyAllocateDynamicLDS =
1188         kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
1189                                                         DynamicVariables);
1190 
1191     GlobalVariable *MaybeModuleScopeStruct = lowerModuleScopeStructVariables(
1192         M, ModuleScopeVariables, KernelsThatAllocateModuleLDS);
1193 
1194     DenseMap<Function *, LDSVariableReplacement> KernelToReplacement =
1195         lowerKernelScopeStructVariables(M, LDSUsesInfo, ModuleScopeVariables,
1196                                         KernelsThatAllocateModuleLDS,
1197                                         MaybeModuleScopeStruct);
1198 
1199     // Lower zero cost accesses to the kernel instances just created
1200     for (auto &GV : KernelAccessVariables) {
1201       auto &funcs = LDSToKernelsThatNeedToAccessItIndirectly[GV];
1202       assert(funcs.size() == 1); // Only one kernel can access it
1203       LDSVariableReplacement Replacement =
1204           KernelToReplacement[*(funcs.begin())];
1205 
1206       DenseSet<GlobalVariable *> Vec;
1207       Vec.insert(GV);
1208 
1209       replaceLDSVariablesWithStruct(M, Vec, Replacement, [](Use &U) {
1210         return isa<Instruction>(U.getUser());
1211       });
1212     }
1213 
1214     // The ith element of this vector is kernel id i
1215     std::vector<Function *> OrderedKernels =
1216         assignLDSKernelIDToEachKernel(&M, KernelsThatAllocateTableLDS,
1217                                       KernelsThatIndirectlyAllocateDynamicLDS);
1218 
1219     if (!KernelsThatAllocateTableLDS.empty()) {
1220       LLVMContext &Ctx = M.getContext();
1221       IRBuilder<> Builder(Ctx);
1222 
1223       // The order must be consistent between lookup table and accesses to
1224       // lookup table
1225       auto TableLookupVariablesOrdered =
1226           sortByName(std::vector<GlobalVariable *>(TableLookupVariables.begin(),
1227                                                    TableLookupVariables.end()));
1228 
1229       GlobalVariable *LookupTable = buildLookupTable(
1230           M, TableLookupVariablesOrdered, OrderedKernels, KernelToReplacement);
1231       replaceUsesInInstructionsWithTableLookup(M, TableLookupVariablesOrdered,
1232                                                LookupTable);
1233 
1234       // Strip amdgpu-no-lds-kernel-id from all functions reachable from the
1235       // kernel. We may have inferred this wasn't used prior to the pass.
1236       //
1237       // TODO: We could filter out subgraphs that do not access LDS globals.
1238       for (Function *F : KernelsThatAllocateTableLDS)
1239         removeNoLdsKernelIdFromReachable(CG, F);
1240     }
1241 
1242     DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS =
1243         lowerDynamicLDSVariables(M, LDSUsesInfo,
1244                                  KernelsThatIndirectlyAllocateDynamicLDS,
1245                                  DynamicVariables, OrderedKernels);
1246 
1247     // All kernel frames have been allocated. Calculate and record the
1248     // addresses.
1249     {
1250       const DataLayout &DL = M.getDataLayout();
1251 
1252       for (Function &Func : M.functions()) {
1253         if (Func.isDeclaration() || !isKernelLDS(&Func))
1254           continue;
1255 
1256         // All three of these are optional. The first variable is allocated at
1257         // zero. They are allocated by AMDGPUMachineFunction as one block.
1258         // Layout:
1259         //{
1260         //  module.lds
1261         //  alignment padding
1262         //  kernel instance
1263         //  alignment padding
1264         //  dynamic lds variables
1265         //}
1266 
1267         const bool AllocateModuleScopeStruct =
1268             MaybeModuleScopeStruct &&
1269             KernelsThatAllocateModuleLDS.contains(&Func);
1270 
1271         auto Replacement = KernelToReplacement.find(&Func);
1272         const bool AllocateKernelScopeStruct =
1273             Replacement != KernelToReplacement.end();
1274 
1275         const bool AllocateDynamicVariable =
1276             KernelToCreatedDynamicLDS.contains(&Func);
1277 
1278         uint32_t Offset = 0;
1279 
1280         if (AllocateModuleScopeStruct) {
1281           // Allocated at zero, recorded once on construction, not once per
1282           // kernel
1283           Offset += DL.getTypeAllocSize(MaybeModuleScopeStruct->getValueType());
1284         }
1285 
1286         if (AllocateKernelScopeStruct) {
1287           GlobalVariable *KernelStruct = Replacement->second.SGV;
1288           Offset = alignTo(Offset, AMDGPU::getAlign(DL, KernelStruct));
1289           recordLDSAbsoluteAddress(&M, KernelStruct, Offset);
1290           Offset += DL.getTypeAllocSize(KernelStruct->getValueType());
1291         }
1292 
1293         // If there is dynamic allocation, the alignment needed is included in
1294         // the static frame size. There may be no reference to the dynamic
1295         // variable in the kernel itself, so without including it here, that
1296         // alignment padding could be missed.
1297         if (AllocateDynamicVariable) {
1298           GlobalVariable *DynamicVariable = KernelToCreatedDynamicLDS[&Func];
1299           Offset = alignTo(Offset, AMDGPU::getAlign(DL, DynamicVariable));
1300           recordLDSAbsoluteAddress(&M, DynamicVariable, Offset);
1301         }
1302 
1303         if (Offset != 0) {
1304           (void)TM; // TODO: Account for target maximum LDS
1305           std::string Buffer;
1306           raw_string_ostream SS{Buffer};
1307           SS << format("%u", Offset);
1308 
1309           // Instead of explictly marking kernels that access dynamic variables
1310           // using special case metadata, annotate with min-lds == max-lds, i.e.
1311           // that there is no more space available for allocating more static
1312           // LDS variables. That is the right condition to prevent allocating
1313           // more variables which would collide with the addresses assigned to
1314           // dynamic variables.
1315           if (AllocateDynamicVariable)
1316             SS << format(",%u", Offset);
1317 
1318           Func.addFnAttr("amdgpu-lds-size", Buffer);
1319         }
1320       }
1321     }
1322 
1323     for (auto &GV : make_early_inc_range(M.globals()))
1324       if (AMDGPU::isLDSVariableToLower(GV)) {
1325         // probably want to remove from used lists
1326         GV.removeDeadConstantUsers();
1327         if (GV.use_empty())
1328           GV.eraseFromParent();
1329       }
1330 
1331     return Changed;
1332   }
1333 
1334 private:
1335   // Increase the alignment of LDS globals if necessary to maximise the chance
1336   // that we can use aligned LDS instructions to access them.
1337   static bool superAlignLDSGlobals(Module &M) {
1338     const DataLayout &DL = M.getDataLayout();
1339     bool Changed = false;
1340     if (!SuperAlignLDSGlobals) {
1341       return Changed;
1342     }
1343 
1344     for (auto &GV : M.globals()) {
1345       if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) {
1346         // Only changing alignment of LDS variables
1347         continue;
1348       }
1349       if (!GV.hasInitializer()) {
1350         // cuda/hip extern __shared__ variable, leave alignment alone
1351         continue;
1352       }
1353 
1354       Align Alignment = AMDGPU::getAlign(DL, &GV);
1355       TypeSize GVSize = DL.getTypeAllocSize(GV.getValueType());
1356 
1357       if (GVSize > 8) {
1358         // We might want to use a b96 or b128 load/store
1359         Alignment = std::max(Alignment, Align(16));
1360       } else if (GVSize > 4) {
1361         // We might want to use a b64 load/store
1362         Alignment = std::max(Alignment, Align(8));
1363       } else if (GVSize > 2) {
1364         // We might want to use a b32 load/store
1365         Alignment = std::max(Alignment, Align(4));
1366       } else if (GVSize > 1) {
1367         // We might want to use a b16 load/store
1368         Alignment = std::max(Alignment, Align(2));
1369       }
1370 
1371       if (Alignment != AMDGPU::getAlign(DL, &GV)) {
1372         Changed = true;
1373         GV.setAlignment(Alignment);
1374       }
1375     }
1376     return Changed;
1377   }
1378 
1379   static LDSVariableReplacement createLDSVariableReplacement(
1380       Module &M, std::string VarName,
1381       DenseSet<GlobalVariable *> const &LDSVarsToTransform) {
1382     // Create a struct instance containing LDSVarsToTransform and map from those
1383     // variables to ConstantExprGEP
1384     // Variables may be introduced to meet alignment requirements. No aliasing
1385     // metadata is useful for these as they have no uses. Erased before return.
1386 
1387     LLVMContext &Ctx = M.getContext();
1388     const DataLayout &DL = M.getDataLayout();
1389     assert(!LDSVarsToTransform.empty());
1390 
1391     SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
1392     LayoutFields.reserve(LDSVarsToTransform.size());
1393     {
1394       // The order of fields in this struct depends on the order of
1395       // varables in the argument which varies when changing how they
1396       // are identified, leading to spurious test breakage.
1397       auto Sorted = sortByName(std::vector<GlobalVariable *>(
1398           LDSVarsToTransform.begin(), LDSVarsToTransform.end()));
1399 
1400       for (GlobalVariable *GV : Sorted) {
1401         OptimizedStructLayoutField F(GV,
1402                                      DL.getTypeAllocSize(GV->getValueType()),
1403                                      AMDGPU::getAlign(DL, GV));
1404         LayoutFields.emplace_back(F);
1405       }
1406     }
1407 
1408     performOptimizedStructLayout(LayoutFields);
1409 
1410     std::vector<GlobalVariable *> LocalVars;
1411     BitVector IsPaddingField;
1412     LocalVars.reserve(LDSVarsToTransform.size()); // will be at least this large
1413     IsPaddingField.reserve(LDSVarsToTransform.size());
1414     {
1415       uint64_t CurrentOffset = 0;
1416       for (size_t I = 0; I < LayoutFields.size(); I++) {
1417         GlobalVariable *FGV = static_cast<GlobalVariable *>(
1418             const_cast<void *>(LayoutFields[I].Id));
1419         Align DataAlign = LayoutFields[I].Alignment;
1420 
1421         uint64_t DataAlignV = DataAlign.value();
1422         if (uint64_t Rem = CurrentOffset % DataAlignV) {
1423           uint64_t Padding = DataAlignV - Rem;
1424 
1425           // Append an array of padding bytes to meet alignment requested
1426           // Note (o +      (a - (o % a)) ) % a == 0
1427           //      (offset + Padding       ) % align == 0
1428 
1429           Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding);
1430           LocalVars.push_back(new GlobalVariable(
1431               M, ATy, false, GlobalValue::InternalLinkage,
1432               PoisonValue::get(ATy), "", nullptr, GlobalValue::NotThreadLocal,
1433               AMDGPUAS::LOCAL_ADDRESS, false));
1434           IsPaddingField.push_back(true);
1435           CurrentOffset += Padding;
1436         }
1437 
1438         LocalVars.push_back(FGV);
1439         IsPaddingField.push_back(false);
1440         CurrentOffset += LayoutFields[I].Size;
1441       }
1442     }
1443 
1444     std::vector<Type *> LocalVarTypes;
1445     LocalVarTypes.reserve(LocalVars.size());
1446     std::transform(
1447         LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes),
1448         [](const GlobalVariable *V) -> Type * { return V->getValueType(); });
1449 
1450     StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t");
1451 
1452     Align StructAlign = AMDGPU::getAlign(DL, LocalVars[0]);
1453 
1454     GlobalVariable *SGV = new GlobalVariable(
1455         M, LDSTy, false, GlobalValue::InternalLinkage, PoisonValue::get(LDSTy),
1456         VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
1457         false);
1458     SGV->setAlignment(StructAlign);
1459 
1460     DenseMap<GlobalVariable *, Constant *> Map;
1461     Type *I32 = Type::getInt32Ty(Ctx);
1462     for (size_t I = 0; I < LocalVars.size(); I++) {
1463       GlobalVariable *GV = LocalVars[I];
1464       Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)};
1465       Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx, true);
1466       if (IsPaddingField[I]) {
1467         assert(GV->use_empty());
1468         GV->eraseFromParent();
1469       } else {
1470         Map[GV] = GEP;
1471       }
1472     }
1473     assert(Map.size() == LDSVarsToTransform.size());
1474     return {SGV, std::move(Map)};
1475   }
1476 
1477   template <typename PredicateTy>
1478   static void replaceLDSVariablesWithStruct(
1479       Module &M, DenseSet<GlobalVariable *> const &LDSVarsToTransformArg,
1480       const LDSVariableReplacement &Replacement, PredicateTy Predicate) {
1481     LLVMContext &Ctx = M.getContext();
1482     const DataLayout &DL = M.getDataLayout();
1483 
1484     // A hack... we need to insert the aliasing info in a predictable order for
1485     // lit tests. Would like to have them in a stable order already, ideally the
1486     // same order they get allocated, which might mean an ordered set container
1487     auto LDSVarsToTransform = sortByName(std::vector<GlobalVariable *>(
1488         LDSVarsToTransformArg.begin(), LDSVarsToTransformArg.end()));
1489 
1490     // Create alias.scope and their lists. Each field in the new structure
1491     // does not alias with all other fields.
1492     SmallVector<MDNode *> AliasScopes;
1493     SmallVector<Metadata *> NoAliasList;
1494     const size_t NumberVars = LDSVarsToTransform.size();
1495     if (NumberVars > 1) {
1496       MDBuilder MDB(Ctx);
1497       AliasScopes.reserve(NumberVars);
1498       MDNode *Domain = MDB.createAnonymousAliasScopeDomain();
1499       for (size_t I = 0; I < NumberVars; I++) {
1500         MDNode *Scope = MDB.createAnonymousAliasScope(Domain);
1501         AliasScopes.push_back(Scope);
1502       }
1503       NoAliasList.append(&AliasScopes[1], AliasScopes.end());
1504     }
1505 
1506     // Replace uses of ith variable with a constantexpr to the corresponding
1507     // field of the instance that will be allocated by AMDGPUMachineFunction
1508     for (size_t I = 0; I < NumberVars; I++) {
1509       GlobalVariable *GV = LDSVarsToTransform[I];
1510       Constant *GEP = Replacement.LDSVarsToConstantGEP.at(GV);
1511 
1512       GV->replaceUsesWithIf(GEP, Predicate);
1513 
1514       APInt APOff(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
1515       GEP->stripAndAccumulateInBoundsConstantOffsets(DL, APOff);
1516       uint64_t Offset = APOff.getZExtValue();
1517 
1518       Align A =
1519           commonAlignment(Replacement.SGV->getAlign().valueOrOne(), Offset);
1520 
1521       if (I)
1522         NoAliasList[I - 1] = AliasScopes[I - 1];
1523       MDNode *NoAlias =
1524           NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList);
1525       MDNode *AliasScope =
1526           AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]});
1527 
1528       refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias);
1529     }
1530   }
1531 
1532   static void refineUsesAlignmentAndAA(Value *Ptr, Align A,
1533                                        const DataLayout &DL, MDNode *AliasScope,
1534                                        MDNode *NoAlias, unsigned MaxDepth = 5) {
1535     if (!MaxDepth || (A == 1 && !AliasScope))
1536       return;
1537 
1538     for (User *U : Ptr->users()) {
1539       if (auto *I = dyn_cast<Instruction>(U)) {
1540         if (AliasScope && I->mayReadOrWriteMemory()) {
1541           MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope);
1542           AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope)
1543                    : AliasScope);
1544           I->setMetadata(LLVMContext::MD_alias_scope, AS);
1545 
1546           MDNode *NA = I->getMetadata(LLVMContext::MD_noalias);
1547           NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias);
1548           I->setMetadata(LLVMContext::MD_noalias, NA);
1549         }
1550       }
1551 
1552       if (auto *LI = dyn_cast<LoadInst>(U)) {
1553         LI->setAlignment(std::max(A, LI->getAlign()));
1554         continue;
1555       }
1556       if (auto *SI = dyn_cast<StoreInst>(U)) {
1557         if (SI->getPointerOperand() == Ptr)
1558           SI->setAlignment(std::max(A, SI->getAlign()));
1559         continue;
1560       }
1561       if (auto *AI = dyn_cast<AtomicRMWInst>(U)) {
1562         // None of atomicrmw operations can work on pointers, but let's
1563         // check it anyway in case it will or we will process ConstantExpr.
1564         if (AI->getPointerOperand() == Ptr)
1565           AI->setAlignment(std::max(A, AI->getAlign()));
1566         continue;
1567       }
1568       if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) {
1569         if (AI->getPointerOperand() == Ptr)
1570           AI->setAlignment(std::max(A, AI->getAlign()));
1571         continue;
1572       }
1573       if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
1574         unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1575         APInt Off(BitWidth, 0);
1576         if (GEP->getPointerOperand() == Ptr) {
1577           Align GA;
1578           if (GEP->accumulateConstantOffset(DL, Off))
1579             GA = commonAlignment(A, Off.getLimitedValue());
1580           refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias,
1581                                    MaxDepth - 1);
1582         }
1583         continue;
1584       }
1585       if (auto *I = dyn_cast<Instruction>(U)) {
1586         if (I->getOpcode() == Instruction::BitCast ||
1587             I->getOpcode() == Instruction::AddrSpaceCast)
1588           refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1);
1589       }
1590     }
1591   }
1592 };
1593 
1594 class AMDGPULowerModuleLDSLegacy : public ModulePass {
1595 public:
1596   const AMDGPUTargetMachine *TM;
1597   static char ID;
1598 
1599   AMDGPULowerModuleLDSLegacy(const AMDGPUTargetMachine *TM_ = nullptr)
1600       : ModulePass(ID), TM(TM_) {
1601     initializeAMDGPULowerModuleLDSLegacyPass(*PassRegistry::getPassRegistry());
1602   }
1603 
1604   void getAnalysisUsage(AnalysisUsage &AU) const override {
1605     if (!TM)
1606       AU.addRequired<TargetPassConfig>();
1607   }
1608 
1609   bool runOnModule(Module &M) override {
1610     if (!TM) {
1611       auto &TPC = getAnalysis<TargetPassConfig>();
1612       TM = &TPC.getTM<AMDGPUTargetMachine>();
1613     }
1614 
1615     return AMDGPULowerModuleLDS(*TM).runOnModule(M);
1616   }
1617 };
1618 
1619 } // namespace
1620 char AMDGPULowerModuleLDSLegacy::ID = 0;
1621 
1622 char &llvm::AMDGPULowerModuleLDSLegacyPassID = AMDGPULowerModuleLDSLegacy::ID;
1623 
1624 INITIALIZE_PASS_BEGIN(AMDGPULowerModuleLDSLegacy, DEBUG_TYPE,
1625                       "Lower uses of LDS variables from non-kernel functions",
1626                       false, false)
1627 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
1628 INITIALIZE_PASS_END(AMDGPULowerModuleLDSLegacy, DEBUG_TYPE,
1629                     "Lower uses of LDS variables from non-kernel functions",
1630                     false, false)
1631 
1632 ModulePass *
1633 llvm::createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM) {
1634   return new AMDGPULowerModuleLDSLegacy(TM);
1635 }
1636 
1637 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M,
1638                                                 ModuleAnalysisManager &) {
1639   return AMDGPULowerModuleLDS(TM).runOnModule(M) ? PreservedAnalyses::none()
1640                                                  : PreservedAnalyses::all();
1641 }
1642