xref: /llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp (revision 80fd5fa5269ce102b3126d2de49970e3af6719d5)
1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass eliminates LDS uses from non-kernel functions.
10 //
11 // The strategy is to create a new struct with a field for each LDS variable
12 // and allocate that struct at the same address for every kernel. Uses of the
13 // original LDS variables are then replaced with compile time offsets from that
14 // known address. AMDGPUMachineFunction allocates the LDS global.
15 //
16 // Local variables with constant annotation or non-undef initializer are passed
17 // through unchanged for simplication or error diagnostics in later passes.
18 //
19 // To reduce the memory overhead variables that are only used by kernels are
20 // excluded from this transform. The analysis to determine whether a variable
21 // is only used by a kernel is cheap and conservative so this may allocate
22 // a variable in every kernel when it was not strictly necessary to do so.
23 //
24 // A possible future refinement is to specialise the structure per-kernel, so
25 // that fields can be elided based on more expensive analysis.
26 //
27 // NOTE: Since this pass will directly pack LDS (assume large LDS) into a struct
28 // type which would cause allocating huge memory for struct instance within
29 // every kernel. Hence, before running this pass, it is advisable to run the
30 // pass "amdgpu-replace-lds-use-with-pointer" which will replace LDS uses within
31 // non-kernel functions by pointers and thereby minimizes the unnecessary per
32 // kernel allocation of LDS memory.
33 //
34 //===----------------------------------------------------------------------===//
35 
36 #include "AMDGPU.h"
37 #include "Utils/AMDGPUBaseInfo.h"
38 #include "Utils/AMDGPULDSUtils.h"
39 #include "llvm/ADT/STLExtras.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/IR/InlineAsm.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/InitializePasses.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Transforms/Utils/ModuleUtils.h"
50 #include <algorithm>
51 #include <vector>
52 
53 #define DEBUG_TYPE "amdgpu-lower-module-lds"
54 
55 using namespace llvm;
56 
57 static cl::opt<bool> SuperAlignLDSGlobals(
58     "amdgpu-super-align-lds-globals",
59     cl::desc("Increase alignment of LDS if it is not on align boundary"),
60     cl::init(true), cl::Hidden);
61 
62 namespace {
63 
64 class AMDGPULowerModuleLDS : public ModulePass {
65 
66   static void removeFromUsedList(Module &M, StringRef Name,
67                                  SmallPtrSetImpl<Constant *> &ToRemove) {
68     GlobalVariable *GV = M.getNamedGlobal(Name);
69     if (!GV || ToRemove.empty()) {
70       return;
71     }
72 
73     SmallVector<Constant *, 16> Init;
74     auto *CA = cast<ConstantArray>(GV->getInitializer());
75     for (auto &Op : CA->operands()) {
76       // ModuleUtils::appendToUsed only inserts Constants
77       Constant *C = cast<Constant>(Op);
78       if (!ToRemove.contains(C->stripPointerCasts())) {
79         Init.push_back(C);
80       }
81     }
82 
83     if (Init.size() == CA->getNumOperands()) {
84       return; // none to remove
85     }
86 
87     GV->eraseFromParent();
88 
89     for (Constant *C : ToRemove) {
90       C->removeDeadConstantUsers();
91     }
92 
93     if (!Init.empty()) {
94       ArrayType *ATy =
95           ArrayType::get(Type::getInt8PtrTy(M.getContext()), Init.size());
96       GV =
97           new llvm::GlobalVariable(M, ATy, false, GlobalValue::AppendingLinkage,
98                                    ConstantArray::get(ATy, Init), Name);
99       GV->setSection("llvm.metadata");
100     }
101   }
102 
103   static void
104   removeFromUsedLists(Module &M,
105                       const std::vector<GlobalVariable *> &LocalVars) {
106     SmallPtrSet<Constant *, 32> LocalVarsSet;
107     for (size_t I = 0; I < LocalVars.size(); I++) {
108       if (Constant *C = dyn_cast<Constant>(LocalVars[I]->stripPointerCasts())) {
109         LocalVarsSet.insert(C);
110       }
111     }
112     removeFromUsedList(M, "llvm.used", LocalVarsSet);
113     removeFromUsedList(M, "llvm.compiler.used", LocalVarsSet);
114   }
115 
116   static void markUsedByKernel(IRBuilder<> &Builder, Function *Func,
117                                GlobalVariable *SGV) {
118     // The llvm.amdgcn.module.lds instance is implicitly used by all kernels
119     // that might call a function which accesses a field within it. This is
120     // presently approximated to 'all kernels' if there are any such functions
121     // in the module. This implicit use is reified as an explicit use here so
122     // that later passes, specifically PromoteAlloca, account for the required
123     // memory without any knowledge of this transform.
124 
125     // An operand bundle on llvm.donothing works because the call instruction
126     // survives until after the last pass that needs to account for LDS. It is
127     // better than inline asm as the latter survives until the end of codegen. A
128     // totally robust solution would be a function with the same semantics as
129     // llvm.donothing that takes a pointer to the instance and is lowered to a
130     // no-op after LDS is allocated, but that is not presently necessary.
131 
132     LLVMContext &Ctx = Func->getContext();
133 
134     Builder.SetInsertPoint(Func->getEntryBlock().getFirstNonPHI());
135 
136     FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx), {});
137 
138     Function *Decl =
139         Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
140 
141     Value *UseInstance[1] = {Builder.CreateInBoundsGEP(
142         SGV->getValueType(), SGV, ConstantInt::get(Type::getInt32Ty(Ctx), 0))};
143 
144     Builder.CreateCall(FTy, Decl, {},
145                        {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)},
146                        "");
147   }
148 
149 private:
150   SmallPtrSet<GlobalValue *, 32> UsedList;
151 
152 public:
153   static char ID;
154 
155   AMDGPULowerModuleLDS() : ModulePass(ID) {
156     initializeAMDGPULowerModuleLDSPass(*PassRegistry::getPassRegistry());
157   }
158 
159   bool runOnModule(Module &M) override {
160     UsedList = AMDGPU::getUsedList(M);
161 
162     bool Changed = processUsedLDS(M);
163 
164     for (Function &F : M.functions()) {
165       if (!AMDGPU::isKernelCC(&F))
166         continue;
167       Changed |= processUsedLDS(M, &F);
168     }
169 
170     UsedList.clear();
171     return Changed;
172   }
173 
174 private:
175   bool processUsedLDS(Module &M, Function *F = nullptr) {
176     LLVMContext &Ctx = M.getContext();
177     const DataLayout &DL = M.getDataLayout();
178 
179     // Find variables to move into new struct instance
180     std::vector<GlobalVariable *> FoundLocalVars =
181         AMDGPU::findVariablesToLower(M, F);
182 
183     if (FoundLocalVars.empty()) {
184       // No variables to rewrite, no changes made.
185       return false;
186     }
187 
188     // Increase the alignment of LDS globals if necessary to maximise the chance
189     // that we can use aligned LDS instructions to access them.
190     if (SuperAlignLDSGlobals) {
191       for (auto *GV : FoundLocalVars) {
192         Align Alignment = AMDGPU::getAlign(DL, GV);
193         TypeSize GVSize = DL.getTypeAllocSize(GV->getValueType());
194 
195         if (GVSize > 8) {
196           // We might want to use a b96 or b128 load/store
197           Alignment = std::max(Alignment, Align(16));
198         } else if (GVSize > 4) {
199           // We might want to use a b64 load/store
200           Alignment = std::max(Alignment, Align(8));
201         } else if (GVSize > 2) {
202           // We might want to use a b32 load/store
203           Alignment = std::max(Alignment, Align(4));
204         } else if (GVSize > 1) {
205           // We might want to use a b16 load/store
206           Alignment = std::max(Alignment, Align(2));
207         }
208 
209         GV->setAlignment(Alignment);
210       }
211     }
212 
213     // Sort by alignment, descending, to minimise padding.
214     // On ties, sort by size, descending, then by name, lexicographical.
215     llvm::stable_sort(
216         FoundLocalVars,
217         [&](const GlobalVariable *LHS, const GlobalVariable *RHS) -> bool {
218           Align ALHS = AMDGPU::getAlign(DL, LHS);
219           Align ARHS = AMDGPU::getAlign(DL, RHS);
220           if (ALHS != ARHS) {
221             return ALHS > ARHS;
222           }
223 
224           TypeSize SLHS = DL.getTypeAllocSize(LHS->getValueType());
225           TypeSize SRHS = DL.getTypeAllocSize(RHS->getValueType());
226           if (SLHS != SRHS) {
227             return SLHS > SRHS;
228           }
229 
230           // By variable name on tie for predictable order in test cases.
231           return LHS->getName() < RHS->getName();
232         });
233 
234     std::vector<GlobalVariable *> LocalVars;
235     LocalVars.reserve(FoundLocalVars.size()); // will be at least this large
236     {
237       // This usually won't need to insert any padding, perhaps avoid the alloc
238       uint64_t CurrentOffset = 0;
239       for (size_t I = 0; I < FoundLocalVars.size(); I++) {
240         GlobalVariable *FGV = FoundLocalVars[I];
241         Align DataAlign = AMDGPU::getAlign(DL, FGV);
242 
243         uint64_t DataAlignV = DataAlign.value();
244         if (uint64_t Rem = CurrentOffset % DataAlignV) {
245           uint64_t Padding = DataAlignV - Rem;
246 
247           // Append an array of padding bytes to meet alignment requested
248           // Note (o +      (a - (o % a)) ) % a == 0
249           //      (offset + Padding       ) % align == 0
250 
251           Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding);
252           LocalVars.push_back(new GlobalVariable(
253               M, ATy, false, GlobalValue::InternalLinkage, UndefValue::get(ATy),
254               "", nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
255               false));
256           CurrentOffset += Padding;
257         }
258 
259         LocalVars.push_back(FGV);
260         CurrentOffset += DL.getTypeAllocSize(FGV->getValueType());
261       }
262     }
263 
264     std::vector<Type *> LocalVarTypes;
265     LocalVarTypes.reserve(LocalVars.size());
266     std::transform(
267         LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes),
268         [](const GlobalVariable *V) -> Type * { return V->getValueType(); });
269 
270     std::string VarName(
271         F ? (Twine("llvm.amdgcn.kernel.") + F->getName() + ".lds").str()
272           : "llvm.amdgcn.module.lds");
273     StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t");
274 
275     Align MaxAlign =
276         AMDGPU::getAlign(DL, LocalVars[0]); // was sorted on alignment
277 
278     GlobalVariable *SGV = new GlobalVariable(
279         M, LDSTy, false, GlobalValue::InternalLinkage, UndefValue::get(LDSTy),
280         VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
281         false);
282     SGV->setAlignment(MaxAlign);
283     if (!F) {
284       appendToCompilerUsed(
285           M, {static_cast<GlobalValue *>(
286                  ConstantExpr::getPointerBitCastOrAddrSpaceCast(
287                      cast<Constant>(SGV), Type::getInt8PtrTy(Ctx)))});
288     }
289 
290     // The verifier rejects used lists containing an inttoptr of a constant
291     // so remove the variables from these lists before replaceAllUsesWith
292     removeFromUsedLists(M, LocalVars);
293 
294     // Replace uses of ith variable with a constantexpr to the ith field of the
295     // instance that will be allocated by AMDGPUMachineFunction
296     Type *I32 = Type::getInt32Ty(Ctx);
297     for (size_t I = 0; I < LocalVars.size(); I++) {
298       GlobalVariable *GV = LocalVars[I];
299       Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)};
300       Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx);
301       if (F) {
302         // Replace all constant uses with instructions if they belong to the
303         // current kernel.
304         for (User *U : make_early_inc_range(GV->users())) {
305           if (ConstantExpr *C = dyn_cast<ConstantExpr>(U))
306             AMDGPU::replaceConstantUsesInFunction(C, F);
307         }
308 
309         GV->removeDeadConstantUsers();
310 
311         GV->replaceUsesWithIf(GEP, [F](Use &U) {
312           Instruction *I = dyn_cast<Instruction>(U.getUser());
313           return I && I->getFunction() == F;
314         });
315       } else {
316         GV->replaceAllUsesWith(GEP);
317       }
318       if (GV->use_empty()) {
319         UsedList.erase(GV);
320         GV->eraseFromParent();
321       }
322     }
323 
324     // Mark kernels with asm that reads the address of the allocated structure
325     // This is not necessary for lowering. This lets other passes, specifically
326     // PromoteAlloca, accurately calculate how much LDS will be used by the
327     // kernel after lowering.
328     if (!F) {
329       IRBuilder<> Builder(Ctx);
330       SmallPtrSet<Function *, 32> Kernels;
331       for (auto &I : M.functions()) {
332         Function *Func = &I;
333         if (AMDGPU::isKernelCC(Func) && !Kernels.contains(Func)) {
334           markUsedByKernel(Builder, Func, SGV);
335           Kernels.insert(Func);
336         }
337       }
338     }
339     return true;
340   }
341 };
342 
343 } // namespace
344 char AMDGPULowerModuleLDS::ID = 0;
345 
346 char &llvm::AMDGPULowerModuleLDSID = AMDGPULowerModuleLDS::ID;
347 
348 INITIALIZE_PASS(AMDGPULowerModuleLDS, DEBUG_TYPE,
349                 "Lower uses of LDS variables from non-kernel functions", false,
350                 false)
351 
352 ModulePass *llvm::createAMDGPULowerModuleLDSPass() {
353   return new AMDGPULowerModuleLDS();
354 }
355 
356 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M,
357                                                 ModuleAnalysisManager &) {
358   return AMDGPULowerModuleLDS().runOnModule(M) ? PreservedAnalyses::none()
359                                                : PreservedAnalyses::all();
360 }
361