xref: /llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp (revision f0e3b39a5d0168b30bae84703bbd23c820f41a83)
1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass eliminates LDS uses from non-kernel functions.
10 //
11 // The strategy is to create a new struct with a field for each LDS variable
12 // and allocate that struct at the same address for every kernel. Uses of the
13 // original LDS variables are then replaced with compile time offsets from that
14 // known address. AMDGPUMachineFunction allocates the LDS global.
15 //
16 // Local variables with constant annotation or non-undef initializer are passed
17 // through unchanged for simplication or error diagnostics in later passes.
18 //
19 // To reduce the memory overhead variables that are only used by kernels are
20 // excluded from this transform. The analysis to determine whether a variable
21 // is only used by a kernel is cheap and conservative so this may allocate
22 // a variable in every kernel when it was not strictly necessary to do so.
23 //
24 // A possible future refinement is to specialise the structure per-kernel, so
25 // that fields can be elided based on more expensive analysis.
26 //
27 //===----------------------------------------------------------------------===//
28 
29 #include "AMDGPU.h"
30 #include "Utils/AMDGPUBaseInfo.h"
31 #include "Utils/AMDGPULDSUtils.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DerivedTypes.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/MDBuilder.h"
39 #include "llvm/InitializePasses.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/OptimizedStructLayout.h"
44 #include "llvm/Transforms/Utils/ModuleUtils.h"
45 #include <vector>
46 
47 #define DEBUG_TYPE "amdgpu-lower-module-lds"
48 
49 using namespace llvm;
50 
51 static cl::opt<bool> SuperAlignLDSGlobals(
52     "amdgpu-super-align-lds-globals",
53     cl::desc("Increase alignment of LDS if it is not on align boundary"),
54     cl::init(true), cl::Hidden);
55 
56 namespace {
57 
58 SmallPtrSet<GlobalValue *, 32> getUsedList(Module &M) {
59   SmallPtrSet<GlobalValue *, 32> UsedList;
60 
61   SmallVector<GlobalValue *, 32> TmpVec;
62   collectUsedGlobalVariables(M, TmpVec, true);
63   UsedList.insert(TmpVec.begin(), TmpVec.end());
64 
65   TmpVec.clear();
66   collectUsedGlobalVariables(M, TmpVec, false);
67   UsedList.insert(TmpVec.begin(), TmpVec.end());
68 
69   return UsedList;
70 }
71 
72 class AMDGPULowerModuleLDS : public ModulePass {
73 
74   static void removeFromUsedList(Module &M, StringRef Name,
75                                  SmallPtrSetImpl<Constant *> &ToRemove) {
76     GlobalVariable *GV = M.getNamedGlobal(Name);
77     if (!GV || ToRemove.empty()) {
78       return;
79     }
80 
81     SmallVector<Constant *, 16> Init;
82     auto *CA = cast<ConstantArray>(GV->getInitializer());
83     for (auto &Op : CA->operands()) {
84       // ModuleUtils::appendToUsed only inserts Constants
85       Constant *C = cast<Constant>(Op);
86       if (!ToRemove.contains(C->stripPointerCasts())) {
87         Init.push_back(C);
88       }
89     }
90 
91     if (Init.size() == CA->getNumOperands()) {
92       return; // none to remove
93     }
94 
95     GV->eraseFromParent();
96 
97     for (Constant *C : ToRemove) {
98       C->removeDeadConstantUsers();
99     }
100 
101     if (!Init.empty()) {
102       ArrayType *ATy =
103           ArrayType::get(Type::getInt8PtrTy(M.getContext()), Init.size());
104       GV =
105           new llvm::GlobalVariable(M, ATy, false, GlobalValue::AppendingLinkage,
106                                    ConstantArray::get(ATy, Init), Name);
107       GV->setSection("llvm.metadata");
108     }
109   }
110 
111   static void
112   removeFromUsedLists(Module &M,
113                       const std::vector<GlobalVariable *> &LocalVars) {
114     SmallPtrSet<Constant *, 32> LocalVarsSet;
115     for (size_t I = 0; I < LocalVars.size(); I++) {
116       if (Constant *C = dyn_cast<Constant>(LocalVars[I]->stripPointerCasts())) {
117         LocalVarsSet.insert(C);
118       }
119     }
120     removeFromUsedList(M, "llvm.used", LocalVarsSet);
121     removeFromUsedList(M, "llvm.compiler.used", LocalVarsSet);
122   }
123 
124   static void markUsedByKernel(IRBuilder<> &Builder, Function *Func,
125                                GlobalVariable *SGV) {
126     // The llvm.amdgcn.module.lds instance is implicitly used by all kernels
127     // that might call a function which accesses a field within it. This is
128     // presently approximated to 'all kernels' if there are any such functions
129     // in the module. This implicit use is redefined as an explicit use here so
130     // that later passes, specifically PromoteAlloca, account for the required
131     // memory without any knowledge of this transform.
132 
133     // An operand bundle on llvm.donothing works because the call instruction
134     // survives until after the last pass that needs to account for LDS. It is
135     // better than inline asm as the latter survives until the end of codegen. A
136     // totally robust solution would be a function with the same semantics as
137     // llvm.donothing that takes a pointer to the instance and is lowered to a
138     // no-op after LDS is allocated, but that is not presently necessary.
139 
140     LLVMContext &Ctx = Func->getContext();
141 
142     Builder.SetInsertPoint(Func->getEntryBlock().getFirstNonPHI());
143 
144     FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx), {});
145 
146     Function *Decl =
147         Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
148 
149     Value *UseInstance[1] = {Builder.CreateInBoundsGEP(
150         SGV->getValueType(), SGV, ConstantInt::get(Type::getInt32Ty(Ctx), 0))};
151 
152     Builder.CreateCall(FTy, Decl, {},
153                        {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)},
154                        "");
155   }
156 
157 private:
158   SmallPtrSet<GlobalValue *, 32> UsedList;
159 
160 public:
161   static char ID;
162 
163   AMDGPULowerModuleLDS() : ModulePass(ID) {
164     initializeAMDGPULowerModuleLDSPass(*PassRegistry::getPassRegistry());
165   }
166 
167   bool runOnModule(Module &M) override {
168     UsedList = getUsedList(M);
169 
170     bool Changed = processUsedLDS(M);
171 
172     for (Function &F : M.functions()) {
173       if (F.isDeclaration())
174         continue;
175 
176       // Only lower compute kernels' LDS.
177       if (!AMDGPU::isKernel(F.getCallingConv()))
178         continue;
179       Changed |= processUsedLDS(M, &F);
180     }
181 
182     UsedList.clear();
183     return Changed;
184   }
185 
186 private:
187   bool processUsedLDS(Module &M, Function *F = nullptr) {
188     LLVMContext &Ctx = M.getContext();
189     const DataLayout &DL = M.getDataLayout();
190 
191     // Find variables to move into new struct instance
192     std::vector<GlobalVariable *> FoundLocalVars =
193         AMDGPU::findVariablesToLower(M, F);
194 
195     if (FoundLocalVars.empty()) {
196       // No variables to rewrite, no changes made.
197       return false;
198     }
199 
200     // Increase the alignment of LDS globals if necessary to maximise the chance
201     // that we can use aligned LDS instructions to access them.
202     if (SuperAlignLDSGlobals) {
203       for (auto *GV : FoundLocalVars) {
204         Align Alignment = AMDGPU::getAlign(DL, GV);
205         TypeSize GVSize = DL.getTypeAllocSize(GV->getValueType());
206 
207         if (GVSize > 8) {
208           // We might want to use a b96 or b128 load/store
209           Alignment = std::max(Alignment, Align(16));
210         } else if (GVSize > 4) {
211           // We might want to use a b64 load/store
212           Alignment = std::max(Alignment, Align(8));
213         } else if (GVSize > 2) {
214           // We might want to use a b32 load/store
215           Alignment = std::max(Alignment, Align(4));
216         } else if (GVSize > 1) {
217           // We might want to use a b16 load/store
218           Alignment = std::max(Alignment, Align(2));
219         }
220 
221         GV->setAlignment(Alignment);
222       }
223     }
224 
225     SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
226     LayoutFields.reserve(FoundLocalVars.size());
227     for (GlobalVariable *GV : FoundLocalVars) {
228       OptimizedStructLayoutField F(GV, DL.getTypeAllocSize(GV->getValueType()),
229                                    AMDGPU::getAlign(DL, GV));
230       LayoutFields.emplace_back(F);
231     }
232 
233     performOptimizedStructLayout(LayoutFields);
234 
235     std::vector<GlobalVariable *> LocalVars;
236     LocalVars.reserve(FoundLocalVars.size()); // will be at least this large
237     {
238       // This usually won't need to insert any padding, perhaps avoid the alloc
239       uint64_t CurrentOffset = 0;
240       for (size_t I = 0; I < LayoutFields.size(); I++) {
241         GlobalVariable *FGV = static_cast<GlobalVariable *>(
242             const_cast<void *>(LayoutFields[I].Id));
243         Align DataAlign = LayoutFields[I].Alignment;
244 
245         uint64_t DataAlignV = DataAlign.value();
246         if (uint64_t Rem = CurrentOffset % DataAlignV) {
247           uint64_t Padding = DataAlignV - Rem;
248 
249           // Append an array of padding bytes to meet alignment requested
250           // Note (o +      (a - (o % a)) ) % a == 0
251           //      (offset + Padding       ) % align == 0
252 
253           Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding);
254           LocalVars.push_back(new GlobalVariable(
255               M, ATy, false, GlobalValue::InternalLinkage, UndefValue::get(ATy),
256               "", nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
257               false));
258           CurrentOffset += Padding;
259         }
260 
261         LocalVars.push_back(FGV);
262         CurrentOffset += LayoutFields[I].Size;
263       }
264     }
265 
266     std::vector<Type *> LocalVarTypes;
267     LocalVarTypes.reserve(LocalVars.size());
268     std::transform(
269         LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes),
270         [](const GlobalVariable *V) -> Type * { return V->getValueType(); });
271 
272     std::string VarName(
273         F ? (Twine("llvm.amdgcn.kernel.") + F->getName() + ".lds").str()
274           : "llvm.amdgcn.module.lds");
275     StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t");
276 
277     Align StructAlign =
278         AMDGPU::getAlign(DL, LocalVars[0]);
279 
280     GlobalVariable *SGV = new GlobalVariable(
281         M, LDSTy, false, GlobalValue::InternalLinkage, UndefValue::get(LDSTy),
282         VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
283         false);
284     SGV->setAlignment(StructAlign);
285     if (!F) {
286       appendToCompilerUsed(
287           M, {static_cast<GlobalValue *>(
288                  ConstantExpr::getPointerBitCastOrAddrSpaceCast(
289                      cast<Constant>(SGV), Type::getInt8PtrTy(Ctx)))});
290     }
291 
292     // The verifier rejects used lists containing an inttoptr of a constant
293     // so remove the variables from these lists before replaceAllUsesWith
294     removeFromUsedLists(M, LocalVars);
295 
296     // Create alias.scope and their lists. Each field in the new structure
297     // does not alias with all other fields.
298     SmallVector<MDNode *> AliasScopes;
299     SmallVector<Metadata *> NoAliasList;
300     if (LocalVars.size() > 1) {
301       MDBuilder MDB(Ctx);
302       AliasScopes.reserve(LocalVars.size());
303       MDNode *Domain = MDB.createAnonymousAliasScopeDomain();
304       for (size_t I = 0; I < LocalVars.size(); I++) {
305         MDNode *Scope = MDB.createAnonymousAliasScope(Domain);
306         AliasScopes.push_back(Scope);
307       }
308       NoAliasList.append(&AliasScopes[1], AliasScopes.end());
309     }
310 
311     // Replace uses of ith variable with a constantexpr to the ith field of the
312     // instance that will be allocated by AMDGPUMachineFunction
313     Type *I32 = Type::getInt32Ty(Ctx);
314     for (size_t I = 0; I < LocalVars.size(); I++) {
315       GlobalVariable *GV = LocalVars[I];
316       Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)};
317       Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx);
318       if (F) {
319         // Replace all constant uses with instructions if they belong to the
320         // current kernel.
321         for (User *U : make_early_inc_range(GV->users())) {
322           if (ConstantExpr *C = dyn_cast<ConstantExpr>(U))
323             AMDGPU::replaceConstantUsesInFunction(C, F);
324         }
325 
326         GV->removeDeadConstantUsers();
327 
328         GV->replaceUsesWithIf(GEP, [F](Use &U) {
329           Instruction *I = dyn_cast<Instruction>(U.getUser());
330           return I && I->getFunction() == F;
331         });
332       } else {
333         GV->replaceAllUsesWith(GEP);
334       }
335       if (GV->use_empty()) {
336         UsedList.erase(GV);
337         GV->eraseFromParent();
338       }
339 
340       uint64_t Off = DL.getStructLayout(LDSTy)->getElementOffset(I);
341       Align A = commonAlignment(StructAlign, Off);
342 
343       if (I)
344         NoAliasList[I - 1] = AliasScopes[I - 1];
345       MDNode *NoAlias =
346           NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList);
347       MDNode *AliasScope =
348           AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]});
349 
350       refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias);
351     }
352 
353     // Mark kernels with asm that reads the address of the allocated structure
354     // This is not necessary for lowering. This lets other passes, specifically
355     // PromoteAlloca, accurately calculate how much LDS will be used by the
356     // kernel after lowering.
357     if (!F) {
358       IRBuilder<> Builder(Ctx);
359       SmallPtrSet<Function *, 32> Kernels;
360       for (Function &Func : M.functions()) {
361         if (Func.isDeclaration())
362           continue;
363 
364         if (AMDGPU::isKernelCC(&Func) && !Kernels.contains(&Func)) {
365           markUsedByKernel(Builder, &Func, SGV);
366           Kernels.insert(&Func);
367         }
368       }
369     }
370     return true;
371   }
372 
373   void refineUsesAlignmentAndAA(Value *Ptr, Align A, const DataLayout &DL,
374                                 MDNode *AliasScope, MDNode *NoAlias,
375                                 unsigned MaxDepth = 5) {
376     if (!MaxDepth || (A == 1 && !AliasScope))
377       return;
378 
379     for (User *U : Ptr->users()) {
380       if (auto *I = dyn_cast<Instruction>(U)) {
381         if (AliasScope && I->mayReadOrWriteMemory()) {
382           MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope);
383           AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope)
384                    : AliasScope);
385           I->setMetadata(LLVMContext::MD_alias_scope, AS);
386 
387           MDNode *NA = I->getMetadata(LLVMContext::MD_noalias);
388           NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias);
389           I->setMetadata(LLVMContext::MD_noalias, NA);
390         }
391       }
392 
393       if (auto *LI = dyn_cast<LoadInst>(U)) {
394         LI->setAlignment(std::max(A, LI->getAlign()));
395         continue;
396       }
397       if (auto *SI = dyn_cast<StoreInst>(U)) {
398         if (SI->getPointerOperand() == Ptr)
399           SI->setAlignment(std::max(A, SI->getAlign()));
400         continue;
401       }
402       if (auto *AI = dyn_cast<AtomicRMWInst>(U)) {
403         // None of atomicrmw operations can work on pointers, but let's
404         // check it anyway in case it will or we will process ConstantExpr.
405         if (AI->getPointerOperand() == Ptr)
406           AI->setAlignment(std::max(A, AI->getAlign()));
407         continue;
408       }
409       if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) {
410         if (AI->getPointerOperand() == Ptr)
411           AI->setAlignment(std::max(A, AI->getAlign()));
412         continue;
413       }
414       if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
415         unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
416         APInt Off(BitWidth, 0);
417         if (GEP->getPointerOperand() == Ptr) {
418           Align GA;
419           if (GEP->accumulateConstantOffset(DL, Off))
420             GA = commonAlignment(A, Off.getLimitedValue());
421           refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias,
422                                    MaxDepth - 1);
423         }
424         continue;
425       }
426       if (auto *I = dyn_cast<Instruction>(U)) {
427         if (I->getOpcode() == Instruction::BitCast ||
428             I->getOpcode() == Instruction::AddrSpaceCast)
429           refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1);
430       }
431     }
432   }
433 };
434 
435 } // namespace
436 char AMDGPULowerModuleLDS::ID = 0;
437 
438 char &llvm::AMDGPULowerModuleLDSID = AMDGPULowerModuleLDS::ID;
439 
440 INITIALIZE_PASS(AMDGPULowerModuleLDS, DEBUG_TYPE,
441                 "Lower uses of LDS variables from non-kernel functions", false,
442                 false)
443 
444 ModulePass *llvm::createAMDGPULowerModuleLDSPass() {
445   return new AMDGPULowerModuleLDS();
446 }
447 
448 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M,
449                                                 ModuleAnalysisManager &) {
450   return AMDGPULowerModuleLDS().runOnModule(M) ? PreservedAnalyses::none()
451                                                : PreservedAnalyses::all();
452 }
453