xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This pass replaces accesses to kernel arguments with loads from
10 /// offsets from the kernarg base pointer.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "GCNSubtarget.h"
16 #include "llvm/CodeGen/TargetPassConfig.h"
17 #include "llvm/IR/IRBuilder.h"
18 #include "llvm/IR/IntrinsicsAMDGPU.h"
19 #include "llvm/IR/MDBuilder.h"
20 #include "llvm/Target/TargetMachine.h"
21 
22 #define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
23 
24 using namespace llvm;
25 
26 namespace {
27 
28 class PreloadKernelArgInfo {
29 private:
30   Function &F;
31   const GCNSubtarget &ST;
32   unsigned NumFreeUserSGPRs;
33 
34 public:
35   SmallVector<llvm::Metadata *, 8> KernelArgMetadata;
36 
37   PreloadKernelArgInfo(Function &F, const GCNSubtarget &ST) : F(F), ST(ST) {
38     setInitialFreeUserSGPRsCount();
39   }
40 
41   // Returns the maximum number of user SGPRs that we have available to preload
42   // arguments.
43   void setInitialFreeUserSGPRsCount() {
44     const unsigned MaxUserSGPRs = ST.getMaxNumUserSGPRs();
45     GCNUserSGPRUsageInfo UserSGPRInfo(F, ST);
46 
47     NumFreeUserSGPRs = MaxUserSGPRs - UserSGPRInfo.getNumUsedUserSGPRs();
48   }
49 
50   bool tryAllocPreloadSGPRs(unsigned AllocSize, uint64_t ArgOffset,
51                             uint64_t LastExplicitArgOffset) {
52     //  Check if this argument may be loaded into the same register as the
53     //  previous argument.
54     if (!isAligned(Align(4), ArgOffset) && AllocSize < 4)
55       return true;
56 
57     // Pad SGPRs for kernarg alignment.
58     unsigned Padding = ArgOffset - LastExplicitArgOffset;
59     unsigned PaddingSGPRs = alignTo(Padding, 4) / 4;
60     unsigned NumPreloadSGPRs = alignTo(AllocSize, 4) / 4;
61     if (NumPreloadSGPRs + PaddingSGPRs > NumFreeUserSGPRs)
62       return false;
63 
64     NumFreeUserSGPRs -= (NumPreloadSGPRs + PaddingSGPRs);
65     return true;
66   }
67 };
68 
69 class AMDGPULowerKernelArguments : public FunctionPass {
70 public:
71   static char ID;
72 
73   AMDGPULowerKernelArguments() : FunctionPass(ID) {}
74 
75   bool runOnFunction(Function &F) override;
76 
77   void getAnalysisUsage(AnalysisUsage &AU) const override {
78     AU.addRequired<TargetPassConfig>();
79     AU.setPreservesAll();
80  }
81 };
82 
83 } // end anonymous namespace
84 
85 // skip allocas
86 static BasicBlock::iterator getInsertPt(BasicBlock &BB) {
87   BasicBlock::iterator InsPt = BB.getFirstInsertionPt();
88   for (BasicBlock::iterator E = BB.end(); InsPt != E; ++InsPt) {
89     AllocaInst *AI = dyn_cast<AllocaInst>(&*InsPt);
90 
91     // If this is a dynamic alloca, the value may depend on the loaded kernargs,
92     // so loads will need to be inserted before it.
93     if (!AI || !AI->isStaticAlloca())
94       break;
95   }
96 
97   return InsPt;
98 }
99 
100 static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
101   CallingConv::ID CC = F.getCallingConv();
102   if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty())
103     return false;
104 
105   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
106   LLVMContext &Ctx = F.getParent()->getContext();
107   const DataLayout &DL = F.getParent()->getDataLayout();
108   BasicBlock &EntryBlock = *F.begin();
109   IRBuilder<> Builder(&*getInsertPt(EntryBlock));
110 
111   const Align KernArgBaseAlign(16); // FIXME: Increase if necessary
112   const uint64_t BaseOffset = ST.getExplicitKernelArgOffset();
113 
114   Align MaxAlign;
115   // FIXME: Alignment is broken with explicit arg offset.;
116   const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
117   if (TotalKernArgSize == 0)
118     return false;
119 
120   CallInst *KernArgSegment =
121       Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, {}, {},
122                               nullptr, F.getName() + ".kernarg.segment");
123 
124   KernArgSegment->addRetAttr(Attribute::NonNull);
125   KernArgSegment->addRetAttr(
126       Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));
127 
128   uint64_t ExplicitArgOffset = 0;
129   // Preloaded kernel arguments must be sequential.
130   bool InPreloadSequence = true;
131   PreloadKernelArgInfo PreloadInfo(F, ST);
132 
133   for (Argument &Arg : F.args()) {
134     const bool IsByRef = Arg.hasByRefAttr();
135     Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
136     MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt;
137     Align ABITypeAlign = DL.getValueOrABITypeAlignment(ParamAlign, ArgTy);
138 
139     uint64_t Size = DL.getTypeSizeInBits(ArgTy);
140     uint64_t AllocSize = DL.getTypeAllocSize(ArgTy);
141 
142     uint64_t EltOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + BaseOffset;
143     uint64_t LastExplicitArgOffset = ExplicitArgOffset;
144     ExplicitArgOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + AllocSize;
145 
146     // Try to preload this argument into user SGPRs.
147     if (Arg.hasInRegAttr() && InPreloadSequence && ST.hasKernargPreload() &&
148         !ST.needsKernargPreloadBackwardsCompatibility() &&
149         !Arg.getType()->isAggregateType())
150       if (PreloadInfo.tryAllocPreloadSGPRs(AllocSize, EltOffset,
151                                            LastExplicitArgOffset))
152         continue;
153 
154     InPreloadSequence = false;
155 
156     if (Arg.use_empty())
157       continue;
158 
159     // If this is byval, the loads are already explicit in the function. We just
160     // need to rewrite the pointer values.
161     if (IsByRef) {
162       Value *ArgOffsetPtr = Builder.CreateConstInBoundsGEP1_64(
163           Builder.getInt8Ty(), KernArgSegment, EltOffset,
164           Arg.getName() + ".byval.kernarg.offset");
165 
166       Value *CastOffsetPtr =
167           Builder.CreateAddrSpaceCast(ArgOffsetPtr, Arg.getType());
168       Arg.replaceAllUsesWith(CastOffsetPtr);
169       continue;
170     }
171 
172     if (PointerType *PT = dyn_cast<PointerType>(ArgTy)) {
173       // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing
174       // modes on SI to know the high bits are 0 so pointer adds don't wrap. We
175       // can't represent this with range metadata because it's only allowed for
176       // integer types.
177       if ((PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
178            PT->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) &&
179           !ST.hasUsableDSOffset())
180         continue;
181 
182       // FIXME: We can replace this with equivalent alias.scope/noalias
183       // metadata, but this appears to be a lot of work.
184       if (Arg.hasNoAliasAttr())
185         continue;
186     }
187 
188     auto *VT = dyn_cast<FixedVectorType>(ArgTy);
189     bool IsV3 = VT && VT->getNumElements() == 3;
190     bool DoShiftOpt = Size < 32 && !ArgTy->isAggregateType();
191 
192     VectorType *V4Ty = nullptr;
193 
194     int64_t AlignDownOffset = alignDown(EltOffset, 4);
195     int64_t OffsetDiff = EltOffset - AlignDownOffset;
196     Align AdjustedAlign = commonAlignment(
197         KernArgBaseAlign, DoShiftOpt ? AlignDownOffset : EltOffset);
198 
199     Value *ArgPtr;
200     Type *AdjustedArgTy;
201     if (DoShiftOpt) { // FIXME: Handle aggregate types
202       // Since we don't have sub-dword scalar loads, avoid doing an extload by
203       // loading earlier than the argument address, and extracting the relevant
204       // bits.
205       //
206       // Additionally widen any sub-dword load to i32 even if suitably aligned,
207       // so that CSE between different argument loads works easily.
208       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
209           Builder.getInt8Ty(), KernArgSegment, AlignDownOffset,
210           Arg.getName() + ".kernarg.offset.align.down");
211       AdjustedArgTy = Builder.getInt32Ty();
212     } else {
213       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
214           Builder.getInt8Ty(), KernArgSegment, EltOffset,
215           Arg.getName() + ".kernarg.offset");
216       AdjustedArgTy = ArgTy;
217     }
218 
219     if (IsV3 && Size >= 32) {
220       V4Ty = FixedVectorType::get(VT->getElementType(), 4);
221       // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
222       AdjustedArgTy = V4Ty;
223     }
224 
225     LoadInst *Load =
226         Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign);
227     Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
228 
229     MDBuilder MDB(Ctx);
230 
231     if (isa<PointerType>(ArgTy)) {
232       if (Arg.hasNonNullAttr())
233         Load->setMetadata(LLVMContext::MD_nonnull, MDNode::get(Ctx, {}));
234 
235       uint64_t DerefBytes = Arg.getDereferenceableBytes();
236       if (DerefBytes != 0) {
237         Load->setMetadata(
238           LLVMContext::MD_dereferenceable,
239           MDNode::get(Ctx,
240                       MDB.createConstant(
241                         ConstantInt::get(Builder.getInt64Ty(), DerefBytes))));
242       }
243 
244       uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
245       if (DerefOrNullBytes != 0) {
246         Load->setMetadata(
247           LLVMContext::MD_dereferenceable_or_null,
248           MDNode::get(Ctx,
249                       MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
250                                                           DerefOrNullBytes))));
251       }
252 
253       if (MaybeAlign ParamAlign = Arg.getParamAlign()) {
254         Load->setMetadata(
255             LLVMContext::MD_align,
256             MDNode::get(Ctx, MDB.createConstant(ConstantInt::get(
257                                  Builder.getInt64Ty(), ParamAlign->value()))));
258       }
259     }
260 
261     // TODO: Convert noalias arg to !noalias
262 
263     if (DoShiftOpt) {
264       Value *ExtractBits = OffsetDiff == 0 ?
265         Load : Builder.CreateLShr(Load, OffsetDiff * 8);
266 
267       IntegerType *ArgIntTy = Builder.getIntNTy(Size);
268       Value *Trunc = Builder.CreateTrunc(ExtractBits, ArgIntTy);
269       Value *NewVal = Builder.CreateBitCast(Trunc, ArgTy,
270                                             Arg.getName() + ".load");
271       Arg.replaceAllUsesWith(NewVal);
272     } else if (IsV3) {
273       Value *Shuf = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 2},
274                                                 Arg.getName() + ".load");
275       Arg.replaceAllUsesWith(Shuf);
276     } else {
277       Load->setName(Arg.getName() + ".load");
278       Arg.replaceAllUsesWith(Load);
279     }
280   }
281 
282   KernArgSegment->addRetAttr(
283       Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
284 
285   return true;
286 }
287 
288 bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
289   auto &TPC = getAnalysis<TargetPassConfig>();
290   const TargetMachine &TM = TPC.getTM<TargetMachine>();
291   return lowerKernelArguments(F, TM);
292 }
293 
294 INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE,
295                       "AMDGPU Lower Kernel Arguments", false, false)
296 INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments",
297                     false, false)
298 
299 char AMDGPULowerKernelArguments::ID = 0;
300 
301 FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() {
302   return new AMDGPULowerKernelArguments();
303 }
304 
305 PreservedAnalyses
306 AMDGPULowerKernelArgumentsPass::run(Function &F, FunctionAnalysisManager &AM) {
307   bool Changed = lowerKernelArguments(F, TM);
308   if (Changed) {
309     // TODO: Preserves a lot more.
310     PreservedAnalyses PA;
311     PA.preserveSet<CFGAnalyses>();
312     return PA;
313   }
314 
315   return PreservedAnalyses::all();
316 }
317