xref: /llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp (revision d395befa65be57e7394637053281ddaa5de975c4)
1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass eliminates LDS uses from non-kernel functions.
10 //
11 // The strategy is to create a new struct with a field for each LDS variable
12 // and allocate that struct at the same address for every kernel. Uses of the
13 // original LDS variables are then replaced with compile time offsets from that
14 // known address. AMDGPUMachineFunction allocates the LDS global.
15 //
16 // Local variables with constant annotation or non-undef initializer are passed
17 // through unchanged for simplication or error diagnostics in later passes.
18 //
19 // To reduce the memory overhead variables that are only used by kernels are
20 // excluded from this transform. The analysis to determine whether a variable
21 // is only used by a kernel is cheap and conservative so this may allocate
22 // a variable in every kernel when it was not strictly necessary to do so.
23 //
24 // A possible future refinement is to specialise the structure per-kernel, so
25 // that fields can be elided based on more expensive analysis.
26 //
27 //===----------------------------------------------------------------------===//
28 
29 #include "AMDGPU.h"
30 #include "Utils/AMDGPUBaseInfo.h"
31 #include "Utils/AMDGPULDSUtils.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DerivedTypes.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/MDBuilder.h"
39 #include "llvm/InitializePasses.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/OptimizedStructLayout.h"
44 #include "llvm/Transforms/Utils/ModuleUtils.h"
45 #include <vector>
46 
47 #define DEBUG_TYPE "amdgpu-lower-module-lds"
48 
49 using namespace llvm;
50 
51 static cl::opt<bool> SuperAlignLDSGlobals(
52     "amdgpu-super-align-lds-globals",
53     cl::desc("Increase alignment of LDS if it is not on align boundary"),
54     cl::init(true), cl::Hidden);
55 
56 namespace {
57 
58 SmallPtrSet<GlobalValue *, 32> getUsedList(Module &M) {
59   SmallPtrSet<GlobalValue *, 32> UsedList;
60 
61   SmallVector<GlobalValue *, 32> TmpVec;
62   collectUsedGlobalVariables(M, TmpVec, true);
63   UsedList.insert(TmpVec.begin(), TmpVec.end());
64 
65   TmpVec.clear();
66   collectUsedGlobalVariables(M, TmpVec, false);
67   UsedList.insert(TmpVec.begin(), TmpVec.end());
68 
69   return UsedList;
70 }
71 
72 class AMDGPULowerModuleLDS : public ModulePass {
73 
74   static void removeFromUsedList(Module &M, StringRef Name,
75                                  SmallPtrSetImpl<Constant *> &ToRemove) {
76     GlobalVariable *GV = M.getNamedGlobal(Name);
77     if (!GV || ToRemove.empty()) {
78       return;
79     }
80 
81     SmallVector<Constant *, 16> Init;
82     auto *CA = cast<ConstantArray>(GV->getInitializer());
83     for (auto &Op : CA->operands()) {
84       // ModuleUtils::appendToUsed only inserts Constants
85       Constant *C = cast<Constant>(Op);
86       if (!ToRemove.contains(C->stripPointerCasts())) {
87         Init.push_back(C);
88       }
89     }
90 
91     if (Init.size() == CA->getNumOperands()) {
92       return; // none to remove
93     }
94 
95     GV->eraseFromParent();
96 
97     for (Constant *C : ToRemove) {
98       C->removeDeadConstantUsers();
99     }
100 
101     if (!Init.empty()) {
102       ArrayType *ATy =
103           ArrayType::get(Type::getInt8PtrTy(M.getContext()), Init.size());
104       GV =
105           new llvm::GlobalVariable(M, ATy, false, GlobalValue::AppendingLinkage,
106                                    ConstantArray::get(ATy, Init), Name);
107       GV->setSection("llvm.metadata");
108     }
109   }
110 
111   static void
112   removeFromUsedLists(Module &M,
113                       const std::vector<GlobalVariable *> &LocalVars) {
114     SmallPtrSet<Constant *, 32> LocalVarsSet;
115     for (GlobalVariable *LocalVar : LocalVars)
116       if (Constant *C = dyn_cast<Constant>(LocalVar->stripPointerCasts()))
117         LocalVarsSet.insert(C);
118     removeFromUsedList(M, "llvm.used", LocalVarsSet);
119     removeFromUsedList(M, "llvm.compiler.used", LocalVarsSet);
120   }
121 
122   static void markUsedByKernel(IRBuilder<> &Builder, Function *Func,
123                                GlobalVariable *SGV) {
124     // The llvm.amdgcn.module.lds instance is implicitly used by all kernels
125     // that might call a function which accesses a field within it. This is
126     // presently approximated to 'all kernels' if there are any such functions
127     // in the module. This implicit use is redefined as an explicit use here so
128     // that later passes, specifically PromoteAlloca, account for the required
129     // memory without any knowledge of this transform.
130 
131     // An operand bundle on llvm.donothing works because the call instruction
132     // survives until after the last pass that needs to account for LDS. It is
133     // better than inline asm as the latter survives until the end of codegen. A
134     // totally robust solution would be a function with the same semantics as
135     // llvm.donothing that takes a pointer to the instance and is lowered to a
136     // no-op after LDS is allocated, but that is not presently necessary.
137 
138     LLVMContext &Ctx = Func->getContext();
139 
140     Builder.SetInsertPoint(Func->getEntryBlock().getFirstNonPHI());
141 
142     FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx), {});
143 
144     Function *Decl =
145         Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
146 
147     Value *UseInstance[1] = {Builder.CreateInBoundsGEP(
148         SGV->getValueType(), SGV, ConstantInt::get(Type::getInt32Ty(Ctx), 0))};
149 
150     Builder.CreateCall(FTy, Decl, {},
151                        {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)},
152                        "");
153   }
154 
155 private:
156   SmallPtrSet<GlobalValue *, 32> UsedList;
157 
158 public:
159   static char ID;
160 
161   AMDGPULowerModuleLDS() : ModulePass(ID) {
162     initializeAMDGPULowerModuleLDSPass(*PassRegistry::getPassRegistry());
163   }
164 
165   bool runOnModule(Module &M) override {
166     UsedList = getUsedList(M);
167 
168     bool Changed = processUsedLDS(M);
169 
170     for (Function &F : M.functions()) {
171       if (F.isDeclaration())
172         continue;
173 
174       // Only lower compute kernels' LDS.
175       if (!AMDGPU::isKernel(F.getCallingConv()))
176         continue;
177       Changed |= processUsedLDS(M, &F);
178     }
179 
180     UsedList.clear();
181     return Changed;
182   }
183 
184 private:
185   bool processUsedLDS(Module &M, Function *F = nullptr) {
186     LLVMContext &Ctx = M.getContext();
187     const DataLayout &DL = M.getDataLayout();
188 
189     // Find variables to move into new struct instance
190     std::vector<GlobalVariable *> FoundLocalVars =
191         AMDGPU::findVariablesToLower(M, F);
192 
193     if (FoundLocalVars.empty()) {
194       // No variables to rewrite, no changes made.
195       return false;
196     }
197 
198     // Increase the alignment of LDS globals if necessary to maximise the chance
199     // that we can use aligned LDS instructions to access them.
200     if (SuperAlignLDSGlobals) {
201       for (auto *GV : FoundLocalVars) {
202         Align Alignment = AMDGPU::getAlign(DL, GV);
203         TypeSize GVSize = DL.getTypeAllocSize(GV->getValueType());
204 
205         if (GVSize > 8) {
206           // We might want to use a b96 or b128 load/store
207           Alignment = std::max(Alignment, Align(16));
208         } else if (GVSize > 4) {
209           // We might want to use a b64 load/store
210           Alignment = std::max(Alignment, Align(8));
211         } else if (GVSize > 2) {
212           // We might want to use a b32 load/store
213           Alignment = std::max(Alignment, Align(4));
214         } else if (GVSize > 1) {
215           // We might want to use a b16 load/store
216           Alignment = std::max(Alignment, Align(2));
217         }
218 
219         GV->setAlignment(Alignment);
220       }
221     }
222 
223     SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
224     LayoutFields.reserve(FoundLocalVars.size());
225     for (GlobalVariable *GV : FoundLocalVars) {
226       OptimizedStructLayoutField F(GV, DL.getTypeAllocSize(GV->getValueType()),
227                                    AMDGPU::getAlign(DL, GV));
228       LayoutFields.emplace_back(F);
229     }
230 
231     performOptimizedStructLayout(LayoutFields);
232 
233     std::vector<GlobalVariable *> LocalVars;
234     LocalVars.reserve(FoundLocalVars.size()); // will be at least this large
235     {
236       // This usually won't need to insert any padding, perhaps avoid the alloc
237       uint64_t CurrentOffset = 0;
238       for (size_t I = 0; I < LayoutFields.size(); I++) {
239         GlobalVariable *FGV = static_cast<GlobalVariable *>(
240             const_cast<void *>(LayoutFields[I].Id));
241         Align DataAlign = LayoutFields[I].Alignment;
242 
243         uint64_t DataAlignV = DataAlign.value();
244         if (uint64_t Rem = CurrentOffset % DataAlignV) {
245           uint64_t Padding = DataAlignV - Rem;
246 
247           // Append an array of padding bytes to meet alignment requested
248           // Note (o +      (a - (o % a)) ) % a == 0
249           //      (offset + Padding       ) % align == 0
250 
251           Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding);
252           LocalVars.push_back(new GlobalVariable(
253               M, ATy, false, GlobalValue::InternalLinkage, UndefValue::get(ATy),
254               "", nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
255               false));
256           CurrentOffset += Padding;
257         }
258 
259         LocalVars.push_back(FGV);
260         CurrentOffset += LayoutFields[I].Size;
261       }
262     }
263 
264     std::vector<Type *> LocalVarTypes;
265     LocalVarTypes.reserve(LocalVars.size());
266     std::transform(
267         LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes),
268         [](const GlobalVariable *V) -> Type * { return V->getValueType(); });
269 
270     std::string VarName(
271         F ? (Twine("llvm.amdgcn.kernel.") + F->getName() + ".lds").str()
272           : "llvm.amdgcn.module.lds");
273     StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t");
274 
275     Align StructAlign =
276         AMDGPU::getAlign(DL, LocalVars[0]);
277 
278     GlobalVariable *SGV = new GlobalVariable(
279         M, LDSTy, false, GlobalValue::InternalLinkage, UndefValue::get(LDSTy),
280         VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
281         false);
282     SGV->setAlignment(StructAlign);
283     if (!F) {
284       appendToCompilerUsed(
285           M, {static_cast<GlobalValue *>(
286                  ConstantExpr::getPointerBitCastOrAddrSpaceCast(
287                      cast<Constant>(SGV), Type::getInt8PtrTy(Ctx)))});
288     }
289 
290     // The verifier rejects used lists containing an inttoptr of a constant
291     // so remove the variables from these lists before replaceAllUsesWith
292     removeFromUsedLists(M, LocalVars);
293 
294     // Create alias.scope and their lists. Each field in the new structure
295     // does not alias with all other fields.
296     SmallVector<MDNode *> AliasScopes;
297     SmallVector<Metadata *> NoAliasList;
298     if (LocalVars.size() > 1) {
299       MDBuilder MDB(Ctx);
300       AliasScopes.reserve(LocalVars.size());
301       MDNode *Domain = MDB.createAnonymousAliasScopeDomain();
302       for (size_t I = 0; I < LocalVars.size(); I++) {
303         MDNode *Scope = MDB.createAnonymousAliasScope(Domain);
304         AliasScopes.push_back(Scope);
305       }
306       NoAliasList.append(&AliasScopes[1], AliasScopes.end());
307     }
308 
309     // Replace uses of ith variable with a constantexpr to the ith field of the
310     // instance that will be allocated by AMDGPUMachineFunction
311     Type *I32 = Type::getInt32Ty(Ctx);
312     for (size_t I = 0; I < LocalVars.size(); I++) {
313       GlobalVariable *GV = LocalVars[I];
314       Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)};
315       Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx);
316       if (F) {
317         // Replace all constant uses with instructions if they belong to the
318         // current kernel.
319         for (User *U : make_early_inc_range(GV->users())) {
320           if (ConstantExpr *C = dyn_cast<ConstantExpr>(U))
321             AMDGPU::replaceConstantUsesInFunction(C, F);
322         }
323 
324         GV->removeDeadConstantUsers();
325 
326         GV->replaceUsesWithIf(GEP, [F](Use &U) {
327           Instruction *I = dyn_cast<Instruction>(U.getUser());
328           return I && I->getFunction() == F;
329         });
330       } else {
331         GV->replaceAllUsesWith(GEP);
332       }
333       if (GV->use_empty()) {
334         UsedList.erase(GV);
335         GV->eraseFromParent();
336       }
337 
338       uint64_t Off = DL.getStructLayout(LDSTy)->getElementOffset(I);
339       Align A = commonAlignment(StructAlign, Off);
340 
341       if (I)
342         NoAliasList[I - 1] = AliasScopes[I - 1];
343       MDNode *NoAlias =
344           NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList);
345       MDNode *AliasScope =
346           AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]});
347 
348       refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias);
349     }
350 
351     // This ensures the variable is allocated when called functions access it.
352     // It also lets other passes, specifically PromoteAlloca, accurately
353     // calculate how much LDS will be used by the kernel after lowering.
354     if (!F) {
355       IRBuilder<> Builder(Ctx);
356       for (Function &Func : M.functions()) {
357         if (!Func.isDeclaration() && AMDGPU::isKernelCC(&Func)) {
358           markUsedByKernel(Builder, &Func, SGV);
359         }
360       }
361     }
362     return true;
363   }
364 
365   void refineUsesAlignmentAndAA(Value *Ptr, Align A, const DataLayout &DL,
366                                 MDNode *AliasScope, MDNode *NoAlias,
367                                 unsigned MaxDepth = 5) {
368     if (!MaxDepth || (A == 1 && !AliasScope))
369       return;
370 
371     for (User *U : Ptr->users()) {
372       if (auto *I = dyn_cast<Instruction>(U)) {
373         if (AliasScope && I->mayReadOrWriteMemory()) {
374           MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope);
375           AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope)
376                    : AliasScope);
377           I->setMetadata(LLVMContext::MD_alias_scope, AS);
378 
379           MDNode *NA = I->getMetadata(LLVMContext::MD_noalias);
380           NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias);
381           I->setMetadata(LLVMContext::MD_noalias, NA);
382         }
383       }
384 
385       if (auto *LI = dyn_cast<LoadInst>(U)) {
386         LI->setAlignment(std::max(A, LI->getAlign()));
387         continue;
388       }
389       if (auto *SI = dyn_cast<StoreInst>(U)) {
390         if (SI->getPointerOperand() == Ptr)
391           SI->setAlignment(std::max(A, SI->getAlign()));
392         continue;
393       }
394       if (auto *AI = dyn_cast<AtomicRMWInst>(U)) {
395         // None of atomicrmw operations can work on pointers, but let's
396         // check it anyway in case it will or we will process ConstantExpr.
397         if (AI->getPointerOperand() == Ptr)
398           AI->setAlignment(std::max(A, AI->getAlign()));
399         continue;
400       }
401       if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) {
402         if (AI->getPointerOperand() == Ptr)
403           AI->setAlignment(std::max(A, AI->getAlign()));
404         continue;
405       }
406       if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
407         unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
408         APInt Off(BitWidth, 0);
409         if (GEP->getPointerOperand() == Ptr) {
410           Align GA;
411           if (GEP->accumulateConstantOffset(DL, Off))
412             GA = commonAlignment(A, Off.getLimitedValue());
413           refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias,
414                                    MaxDepth - 1);
415         }
416         continue;
417       }
418       if (auto *I = dyn_cast<Instruction>(U)) {
419         if (I->getOpcode() == Instruction::BitCast ||
420             I->getOpcode() == Instruction::AddrSpaceCast)
421           refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1);
422       }
423     }
424   }
425 };
426 
427 } // namespace
428 char AMDGPULowerModuleLDS::ID = 0;
429 
430 char &llvm::AMDGPULowerModuleLDSID = AMDGPULowerModuleLDS::ID;
431 
432 INITIALIZE_PASS(AMDGPULowerModuleLDS, DEBUG_TYPE,
433                 "Lower uses of LDS variables from non-kernel functions", false,
434                 false)
435 
436 ModulePass *llvm::createAMDGPULowerModuleLDSPass() {
437   return new AMDGPULowerModuleLDS();
438 }
439 
440 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M,
441                                                 ModuleAnalysisManager &) {
442   return AMDGPULowerModuleLDS().runOnModule(M) ? PreservedAnalyses::none()
443                                                : PreservedAnalyses::all();
444 }
445