xref: /llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp (revision 6e4105574ebb1c4a664c5b24a4fb2b6cbc51d73e)
1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Eliminates allocas by either converting them into vectors or by migrating
10 // them to local address space.
11 //
12 // Two passes are exposed by this file:
13 //    - "promote-alloca-to-vector", which runs early in the pipeline and only
14 //      promotes to vector. Promotion to vector is almost always profitable
15 //      except when the alloca is too big and the promotion would result in
16 //      very high register pressure.
17 //    - "promote-alloca", which does both promotion to vector and LDS and runs
18 //      much later in the pipeline. This runs after SROA because promoting to
19 //      LDS is of course less profitable than getting rid of the alloca or
20 //      vectorizing it, thus we only want to do it when the only alternative is
21 //      lowering the alloca to stack.
22 //
23 // Note that both of them exist for the old and new PMs. The new PM passes are
24 // declared in AMDGPU.h and the legacy PM ones are declared here.s
25 //
26 //===----------------------------------------------------------------------===//
27 
28 #include "AMDGPU.h"
29 #include "GCNSubtarget.h"
30 #include "Utils/AMDGPUBaseInfo.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Analysis/CaptureTracking.h"
33 #include "llvm/Analysis/InstSimplifyFolder.h"
34 #include "llvm/Analysis/InstructionSimplify.h"
35 #include "llvm/Analysis/LoopInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/CodeGen/TargetPassConfig.h"
38 #include "llvm/IR/IRBuilder.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/IntrinsicsAMDGPU.h"
41 #include "llvm/IR/IntrinsicsR600.h"
42 #include "llvm/IR/PatternMatch.h"
43 #include "llvm/InitializePasses.h"
44 #include "llvm/Pass.h"
45 #include "llvm/Target/TargetMachine.h"
46 #include "llvm/Transforms/Utils/SSAUpdater.h"
47 
48 #define DEBUG_TYPE "amdgpu-promote-alloca"
49 
50 using namespace llvm;
51 
52 namespace {
53 
54 static cl::opt<bool>
55     DisablePromoteAllocaToVector("disable-promote-alloca-to-vector",
56                                  cl::desc("Disable promote alloca to vector"),
57                                  cl::init(false));
58 
59 static cl::opt<bool>
60     DisablePromoteAllocaToLDS("disable-promote-alloca-to-lds",
61                               cl::desc("Disable promote alloca to LDS"),
62                               cl::init(false));
63 
64 static cl::opt<unsigned> PromoteAllocaToVectorLimit(
65     "amdgpu-promote-alloca-to-vector-limit",
66     cl::desc("Maximum byte size to consider promote alloca to vector"),
67     cl::init(0));
68 
69 static cl::opt<unsigned>
70     LoopUserWeight("promote-alloca-vector-loop-user-weight",
71                    cl::desc("The bonus weight of users of allocas within loop "
72                             "when sorting profitable allocas"),
73                    cl::init(4));
74 
75 // Shared implementation which can do both promotion to vector and to LDS.
76 class AMDGPUPromoteAllocaImpl {
77 private:
78   const TargetMachine &TM;
79   LoopInfo &LI;
80   Module *Mod = nullptr;
81   const DataLayout *DL = nullptr;
82 
83   // FIXME: This should be per-kernel.
84   uint32_t LocalMemLimit = 0;
85   uint32_t CurrentLocalMemUsage = 0;
86   unsigned MaxVGPRs;
87 
88   bool IsAMDGCN = false;
89   bool IsAMDHSA = false;
90 
91   std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
92   Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
93 
94   /// BaseAlloca is the alloca root the search started from.
95   /// Val may be that alloca or a recursive user of it.
96   bool collectUsesWithPtrTypes(Value *BaseAlloca, Value *Val,
97                                std::vector<Value *> &WorkList) const;
98 
99   /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
100   /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
101   /// Returns true if both operands are derived from the same alloca. Val should
102   /// be the same value as one of the input operands of UseInst.
103   bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
104                                        Instruction *UseInst, int OpIdx0,
105                                        int OpIdx1) const;
106 
107   /// Check whether we have enough local memory for promotion.
108   bool hasSufficientLocalMem(const Function &F);
109 
110   bool tryPromoteAllocaToVector(AllocaInst &I);
111   bool tryPromoteAllocaToLDS(AllocaInst &I, bool SufficientLDS);
112 
113   void sortAllocasToPromote(SmallVectorImpl<AllocaInst *> &Allocas);
114 
115 public:
116   AMDGPUPromoteAllocaImpl(TargetMachine &TM, LoopInfo &LI) : TM(TM), LI(LI) {
117 
118     const Triple &TT = TM.getTargetTriple();
119     IsAMDGCN = TT.getArch() == Triple::amdgcn;
120     IsAMDHSA = TT.getOS() == Triple::AMDHSA;
121   }
122 
123   bool run(Function &F, bool PromoteToLDS);
124 };
125 
126 // FIXME: This can create globals so should be a module pass.
127 class AMDGPUPromoteAlloca : public FunctionPass {
128 public:
129   static char ID;
130 
131   AMDGPUPromoteAlloca() : FunctionPass(ID) {}
132 
133   bool runOnFunction(Function &F) override {
134     if (skipFunction(F))
135       return false;
136     if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
137       return AMDGPUPromoteAllocaImpl(
138                  TPC->getTM<TargetMachine>(),
139                  getAnalysis<LoopInfoWrapperPass>().getLoopInfo())
140           .run(F, /*PromoteToLDS*/ true);
141     return false;
142   }
143 
144   StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
145 
146   void getAnalysisUsage(AnalysisUsage &AU) const override {
147     AU.setPreservesCFG();
148     AU.addRequired<LoopInfoWrapperPass>();
149     FunctionPass::getAnalysisUsage(AU);
150   }
151 };
152 
153 class AMDGPUPromoteAllocaToVector : public FunctionPass {
154 public:
155   static char ID;
156 
157   AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
158 
159   bool runOnFunction(Function &F) override {
160     if (skipFunction(F))
161       return false;
162     if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
163       return AMDGPUPromoteAllocaImpl(
164                  TPC->getTM<TargetMachine>(),
165                  getAnalysis<LoopInfoWrapperPass>().getLoopInfo())
166           .run(F, /*PromoteToLDS*/ false);
167     return false;
168   }
169 
170   StringRef getPassName() const override {
171     return "AMDGPU Promote Alloca to vector";
172   }
173 
174   void getAnalysisUsage(AnalysisUsage &AU) const override {
175     AU.setPreservesCFG();
176     AU.addRequired<LoopInfoWrapperPass>();
177     FunctionPass::getAnalysisUsage(AU);
178   }
179 };
180 
181 unsigned getMaxVGPRs(const TargetMachine &TM, const Function &F) {
182   if (!TM.getTargetTriple().isAMDGCN())
183     return 128;
184 
185   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
186   unsigned MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
187 
188   // A non-entry function has only 32 caller preserved registers.
189   // Do not promote alloca which will force spilling unless we know the function
190   // will be inlined.
191   if (!F.hasFnAttribute(Attribute::AlwaysInline) &&
192       !AMDGPU::isEntryFunctionCC(F.getCallingConv()))
193     MaxVGPRs = std::min(MaxVGPRs, 32u);
194   return MaxVGPRs;
195 }
196 
197 } // end anonymous namespace
198 
199 char AMDGPUPromoteAlloca::ID = 0;
200 char AMDGPUPromoteAllocaToVector::ID = 0;
201 
202 INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,
203                       "AMDGPU promote alloca to vector or LDS", false, false)
204 // Move LDS uses from functions to kernels before promote alloca for accurate
205 // estimation of LDS available
206 INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDSLegacy)
207 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
208 INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,
209                     "AMDGPU promote alloca to vector or LDS", false, false)
210 
211 INITIALIZE_PASS_BEGIN(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
212                       "AMDGPU promote alloca to vector", false, false)
213 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
214 INITIALIZE_PASS_END(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
215                     "AMDGPU promote alloca to vector", false, false)
216 
217 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
218 char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
219 
220 PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F,
221                                                FunctionAnalysisManager &AM) {
222   auto &LI = AM.getResult<LoopAnalysis>(F);
223   bool Changed = AMDGPUPromoteAllocaImpl(TM, LI).run(F, /*PromoteToLDS=*/true);
224   if (Changed) {
225     PreservedAnalyses PA;
226     PA.preserveSet<CFGAnalyses>();
227     return PA;
228   }
229   return PreservedAnalyses::all();
230 }
231 
232 PreservedAnalyses
233 AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) {
234   auto &LI = AM.getResult<LoopAnalysis>(F);
235   bool Changed = AMDGPUPromoteAllocaImpl(TM, LI).run(F, /*PromoteToLDS=*/false);
236   if (Changed) {
237     PreservedAnalyses PA;
238     PA.preserveSet<CFGAnalyses>();
239     return PA;
240   }
241   return PreservedAnalyses::all();
242 }
243 
244 FunctionPass *llvm::createAMDGPUPromoteAlloca() {
245   return new AMDGPUPromoteAlloca();
246 }
247 
248 FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
249   return new AMDGPUPromoteAllocaToVector();
250 }
251 
252 static void collectAllocaUses(AllocaInst &Alloca,
253                               SmallVectorImpl<Use *> &Uses) {
254   SmallVector<Instruction *, 4> WorkList({&Alloca});
255   while (!WorkList.empty()) {
256     auto *Cur = WorkList.pop_back_val();
257     for (auto &U : Cur->uses()) {
258       Uses.push_back(&U);
259 
260       if (isa<GetElementPtrInst>(U.getUser()))
261         WorkList.push_back(cast<Instruction>(U.getUser()));
262     }
263   }
264 }
265 
266 void AMDGPUPromoteAllocaImpl::sortAllocasToPromote(
267     SmallVectorImpl<AllocaInst *> &Allocas) {
268   DenseMap<AllocaInst *, unsigned> Scores;
269 
270   for (auto *Alloca : Allocas) {
271     LLVM_DEBUG(dbgs() << "Scoring: " << *Alloca << "\n");
272     unsigned &Score = Scores[Alloca];
273     // Increment score by one for each user + a bonus for users within loops.
274     SmallVector<Use *, 8> Uses;
275     collectAllocaUses(*Alloca, Uses);
276     for (auto *U : Uses) {
277       Instruction *Inst = cast<Instruction>(U->getUser());
278       if (isa<GetElementPtrInst>(Inst))
279         continue;
280       unsigned UserScore =
281           1 + (LoopUserWeight * LI.getLoopDepth(Inst->getParent()));
282       LLVM_DEBUG(dbgs() << "  [+" << UserScore << "]:\t" << *Inst << "\n");
283       Score += UserScore;
284     }
285     LLVM_DEBUG(dbgs() << "  => Final Score:" << Score << "\n");
286   }
287 
288   stable_sort(Allocas, [&](AllocaInst *A, AllocaInst *B) {
289     return Scores.at(A) > Scores.at(B);
290   });
291 
292   // clang-format off
293   LLVM_DEBUG(
294     dbgs() << "Sorted Worklist:\n";
295     for (auto *A: Allocas)
296       dbgs() << "  " << *A << "\n";
297   );
298   // clang-format on
299 }
300 
301 bool AMDGPUPromoteAllocaImpl::run(Function &F, bool PromoteToLDS) {
302   Mod = F.getParent();
303   DL = &Mod->getDataLayout();
304 
305   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
306   if (!ST.isPromoteAllocaEnabled())
307     return false;
308 
309   MaxVGPRs = getMaxVGPRs(TM, F);
310 
311   bool SufficientLDS = PromoteToLDS ? hasSufficientLocalMem(F) : false;
312 
313   // Use up to 1/4 of available register budget for vectorization.
314   // FIXME: Increase the limit for whole function budgets? Perhaps x2?
315   unsigned VectorizationBudget =
316       (PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
317                                   : (MaxVGPRs * 32)) /
318       4;
319 
320   SmallVector<AllocaInst *, 16> Allocas;
321   for (Instruction &I : F.getEntryBlock()) {
322     if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
323       // Array allocations are probably not worth handling, since an allocation
324       // of the array type is the canonical form.
325       if (!AI->isStaticAlloca() || AI->isArrayAllocation())
326         continue;
327       Allocas.push_back(AI);
328     }
329   }
330 
331   sortAllocasToPromote(Allocas);
332 
333   bool Changed = false;
334   for (AllocaInst *AI : Allocas) {
335     const unsigned AllocaCost = DL->getTypeSizeInBits(AI->getAllocatedType());
336     // First, check if we have enough budget to vectorize this alloca.
337     if (AllocaCost <= VectorizationBudget) {
338       // If we do, attempt vectorization, otherwise, fall through and try
339       // promoting to LDS instead.
340       if (tryPromoteAllocaToVector(*AI)) {
341         Changed = true;
342         assert((VectorizationBudget - AllocaCost) < VectorizationBudget &&
343                "Underflow!");
344         VectorizationBudget -= AllocaCost;
345         LLVM_DEBUG(dbgs() << "  Remaining vectorization budget:"
346                           << VectorizationBudget << "\n");
347         continue;
348       }
349     } else {
350       LLVM_DEBUG(dbgs() << "Alloca too big for vectorization (size:"
351                         << AllocaCost << ", budget:" << VectorizationBudget
352                         << "): " << *AI << "\n");
353     }
354 
355     if (PromoteToLDS && tryPromoteAllocaToLDS(*AI, SufficientLDS))
356       Changed = true;
357   }
358 
359   // NOTE: tryPromoteAllocaToVector removes the alloca, so Allocas contains
360   // dangling pointers. If we want to reuse it past this point, the loop above
361   // would need to be updated to remove successfully promoted allocas.
362 
363   return Changed;
364 }
365 
366 struct MemTransferInfo {
367   ConstantInt *SrcIndex = nullptr;
368   ConstantInt *DestIndex = nullptr;
369 };
370 
371 // Checks if the instruction I is a memset user of the alloca AI that we can
372 // deal with. Currently, only non-volatile memsets that affect the whole alloca
373 // are handled.
374 static bool isSupportedMemset(MemSetInst *I, AllocaInst *AI,
375                               const DataLayout &DL) {
376   using namespace PatternMatch;
377   // For now we only care about non-volatile memsets that affect the whole type
378   // (start at index 0 and fill the whole alloca).
379   //
380   // TODO: Now that we moved to PromoteAlloca we could handle any memsets
381   // (except maybe volatile ones?) - we just need to use shufflevector if it
382   // only affects a subset of the vector.
383   const unsigned Size = DL.getTypeStoreSize(AI->getAllocatedType());
384   return I->getOperand(0) == AI &&
385          match(I->getOperand(2), m_SpecificInt(Size)) && !I->isVolatile();
386 }
387 
388 static Value *
389 calculateVectorIndex(Value *Ptr,
390                      const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
391   auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
392   if (!GEP)
393     return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));
394 
395   auto I = GEPIdx.find(GEP);
396   assert(I != GEPIdx.end() && "Must have entry for GEP!");
397   return I->second;
398 }
399 
400 static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
401                                Type *VecElemTy, const DataLayout &DL) {
402   // TODO: Extracting a "multiple of X" from a GEP might be a useful generic
403   // helper.
404   unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
405   SmallMapVector<Value *, APInt, 4> VarOffsets;
406   APInt ConstOffset(BW, 0);
407   if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
408       !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
409     return nullptr;
410 
411   unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
412   if (VarOffsets.size() > 1)
413     return nullptr;
414 
415   if (VarOffsets.size() == 1) {
416     // Only handle cases where we don't need to insert extra arithmetic
417     // instructions.
418     const auto &VarOffset = VarOffsets.front();
419     if (!ConstOffset.isZero() || VarOffset.second != VecElemSize)
420       return nullptr;
421     return VarOffset.first;
422   }
423 
424   APInt Quot;
425   uint64_t Rem;
426   APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem);
427   if (Rem != 0)
428     return nullptr;
429 
430   return ConstantInt::get(GEP->getContext(), Quot);
431 }
432 
433 /// Promotes a single user of the alloca to a vector form.
434 ///
435 /// \param Inst           Instruction to be promoted.
436 /// \param DL             Module Data Layout.
437 /// \param VectorTy       Vectorized Type.
438 /// \param VecStoreSize   Size of \p VectorTy in bytes.
439 /// \param ElementSize    Size of \p VectorTy element type in bytes.
440 /// \param TransferInfo   MemTransferInst info map.
441 /// \param GEPVectorIdx   GEP -> VectorIdx cache.
442 /// \param CurVal         Current value of the vector (e.g. last stored value)
443 /// \param[out]  DeferredLoads \p Inst is added to this vector if it can't
444 ///              be promoted now. This happens when promoting requires \p
445 ///              CurVal, but \p CurVal is nullptr.
446 /// \return the stored value if \p Inst would have written to the alloca, or
447 ///         nullptr otherwise.
448 static Value *promoteAllocaUserToVector(
449     Instruction *Inst, const DataLayout &DL, FixedVectorType *VectorTy,
450     unsigned VecStoreSize, unsigned ElementSize,
451     DenseMap<MemTransferInst *, MemTransferInfo> &TransferInfo,
452     std::map<GetElementPtrInst *, Value *> &GEPVectorIdx, Value *CurVal,
453     SmallVectorImpl<LoadInst *> &DeferredLoads) {
454   // Note: we use InstSimplifyFolder because it can leverage the DataLayout
455   // to do more folding, especially in the case of vector splats.
456   IRBuilder<InstSimplifyFolder> Builder(Inst->getContext(),
457                                         InstSimplifyFolder(DL));
458   Builder.SetInsertPoint(Inst);
459 
460   const auto GetOrLoadCurrentVectorValue = [&]() -> Value * {
461     if (CurVal)
462       return CurVal;
463 
464     // If the current value is not known, insert a dummy load and lower it on
465     // the second pass.
466     LoadInst *Dummy =
467         Builder.CreateLoad(VectorTy, PoisonValue::get(Builder.getPtrTy()),
468                            "promotealloca.dummyload");
469     DeferredLoads.push_back(Dummy);
470     return Dummy;
471   };
472 
473   const auto CreateTempPtrIntCast = [&Builder, DL](Value *Val,
474                                                    Type *PtrTy) -> Value * {
475     assert(DL.getTypeStoreSize(Val->getType()) == DL.getTypeStoreSize(PtrTy));
476     const unsigned Size = DL.getTypeStoreSizeInBits(PtrTy);
477     if (!PtrTy->isVectorTy())
478       return Builder.CreateBitOrPointerCast(Val, Builder.getIntNTy(Size));
479     const unsigned NumPtrElts = cast<FixedVectorType>(PtrTy)->getNumElements();
480     // If we want to cast to cast, e.g. a <2 x ptr> into a <4 x i32>, we need to
481     // first cast the ptr vector to <2 x i64>.
482     assert((Size % NumPtrElts == 0) && "Vector size not divisble");
483     Type *EltTy = Builder.getIntNTy(Size / NumPtrElts);
484     return Builder.CreateBitOrPointerCast(
485         Val, FixedVectorType::get(EltTy, NumPtrElts));
486   };
487 
488   Type *VecEltTy = VectorTy->getElementType();
489 
490   switch (Inst->getOpcode()) {
491   case Instruction::Load: {
492     // Loads can only be lowered if the value is known.
493     if (!CurVal) {
494       DeferredLoads.push_back(cast<LoadInst>(Inst));
495       return nullptr;
496     }
497 
498     Value *Index = calculateVectorIndex(
499         cast<LoadInst>(Inst)->getPointerOperand(), GEPVectorIdx);
500 
501     // We're loading the full vector.
502     Type *AccessTy = Inst->getType();
503     TypeSize AccessSize = DL.getTypeStoreSize(AccessTy);
504     if (Constant *CI = dyn_cast<Constant>(Index)) {
505       if (CI->isZeroValue() && AccessSize == VecStoreSize) {
506         if (AccessTy->isPtrOrPtrVectorTy())
507           CurVal = CreateTempPtrIntCast(CurVal, AccessTy);
508         else if (CurVal->getType()->isPtrOrPtrVectorTy())
509           CurVal = CreateTempPtrIntCast(CurVal, CurVal->getType());
510         Value *NewVal = Builder.CreateBitOrPointerCast(CurVal, AccessTy);
511         Inst->replaceAllUsesWith(NewVal);
512         return nullptr;
513       }
514     }
515 
516     // Loading a subvector.
517     if (isa<FixedVectorType>(AccessTy)) {
518       assert(AccessSize.isKnownMultipleOf(DL.getTypeStoreSize(VecEltTy)));
519       const unsigned NumLoadedElts = AccessSize / DL.getTypeStoreSize(VecEltTy);
520       auto *SubVecTy = FixedVectorType::get(VecEltTy, NumLoadedElts);
521       assert(DL.getTypeStoreSize(SubVecTy) == DL.getTypeStoreSize(AccessTy));
522 
523       Value *SubVec = PoisonValue::get(SubVecTy);
524       for (unsigned K = 0; K < NumLoadedElts; ++K) {
525         Value *CurIdx =
526             Builder.CreateAdd(Index, ConstantInt::get(Index->getType(), K));
527         SubVec = Builder.CreateInsertElement(
528             SubVec, Builder.CreateExtractElement(CurVal, CurIdx), K);
529       }
530 
531       if (AccessTy->isPtrOrPtrVectorTy())
532         SubVec = CreateTempPtrIntCast(SubVec, AccessTy);
533       else if (SubVecTy->isPtrOrPtrVectorTy())
534         SubVec = CreateTempPtrIntCast(SubVec, SubVecTy);
535 
536       SubVec = Builder.CreateBitOrPointerCast(SubVec, AccessTy);
537       Inst->replaceAllUsesWith(SubVec);
538       return nullptr;
539     }
540 
541     // We're loading one element.
542     Value *ExtractElement = Builder.CreateExtractElement(CurVal, Index);
543     if (AccessTy != VecEltTy)
544       ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, AccessTy);
545 
546     Inst->replaceAllUsesWith(ExtractElement);
547     return nullptr;
548   }
549   case Instruction::Store: {
550     // For stores, it's a bit trickier and it depends on whether we're storing
551     // the full vector or not. If we're storing the full vector, we don't need
552     // to know the current value. If this is a store of a single element, we
553     // need to know the value.
554     StoreInst *SI = cast<StoreInst>(Inst);
555     Value *Index = calculateVectorIndex(SI->getPointerOperand(), GEPVectorIdx);
556     Value *Val = SI->getValueOperand();
557 
558     // We're storing the full vector, we can handle this without knowing CurVal.
559     Type *AccessTy = Val->getType();
560     TypeSize AccessSize = DL.getTypeStoreSize(AccessTy);
561     if (Constant *CI = dyn_cast<Constant>(Index)) {
562       if (CI->isZeroValue() && AccessSize == VecStoreSize) {
563         if (AccessTy->isPtrOrPtrVectorTy())
564           Val = CreateTempPtrIntCast(Val, AccessTy);
565         else if (VectorTy->isPtrOrPtrVectorTy())
566           Val = CreateTempPtrIntCast(Val, VectorTy);
567         return Builder.CreateBitOrPointerCast(Val, VectorTy);
568       }
569     }
570 
571     // Storing a subvector.
572     if (isa<FixedVectorType>(AccessTy)) {
573       assert(AccessSize.isKnownMultipleOf(DL.getTypeStoreSize(VecEltTy)));
574       const unsigned NumWrittenElts =
575           AccessSize / DL.getTypeStoreSize(VecEltTy);
576       const unsigned NumVecElts = VectorTy->getNumElements();
577       auto *SubVecTy = FixedVectorType::get(VecEltTy, NumWrittenElts);
578       assert(DL.getTypeStoreSize(SubVecTy) == DL.getTypeStoreSize(AccessTy));
579 
580       if (SubVecTy->isPtrOrPtrVectorTy())
581         Val = CreateTempPtrIntCast(Val, SubVecTy);
582       else if (AccessTy->isPtrOrPtrVectorTy())
583         Val = CreateTempPtrIntCast(Val, AccessTy);
584 
585       Val = Builder.CreateBitOrPointerCast(Val, SubVecTy);
586 
587       Value *CurVec = GetOrLoadCurrentVectorValue();
588       for (unsigned K = 0, NumElts = std::min(NumWrittenElts, NumVecElts);
589            K < NumElts; ++K) {
590         Value *CurIdx =
591             Builder.CreateAdd(Index, ConstantInt::get(Index->getType(), K));
592         CurVec = Builder.CreateInsertElement(
593             CurVec, Builder.CreateExtractElement(Val, K), CurIdx);
594       }
595       return CurVec;
596     }
597 
598     if (Val->getType() != VecEltTy)
599       Val = Builder.CreateBitOrPointerCast(Val, VecEltTy);
600     return Builder.CreateInsertElement(GetOrLoadCurrentVectorValue(), Val,
601                                        Index);
602   }
603   case Instruction::Call: {
604     if (auto *MTI = dyn_cast<MemTransferInst>(Inst)) {
605       // For memcpy, we need to know curval.
606       ConstantInt *Length = cast<ConstantInt>(MTI->getLength());
607       unsigned NumCopied = Length->getZExtValue() / ElementSize;
608       MemTransferInfo *TI = &TransferInfo[MTI];
609       unsigned SrcBegin = TI->SrcIndex->getZExtValue();
610       unsigned DestBegin = TI->DestIndex->getZExtValue();
611 
612       SmallVector<int> Mask;
613       for (unsigned Idx = 0; Idx < VectorTy->getNumElements(); ++Idx) {
614         if (Idx >= DestBegin && Idx < DestBegin + NumCopied) {
615           Mask.push_back(SrcBegin++);
616         } else {
617           Mask.push_back(Idx);
618         }
619       }
620 
621       return Builder.CreateShuffleVector(GetOrLoadCurrentVectorValue(), Mask);
622     }
623 
624     if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
625       // For memset, we don't need to know the previous value because we
626       // currently only allow memsets that cover the whole alloca.
627       Value *Elt = MSI->getOperand(1);
628       const unsigned BytesPerElt = DL.getTypeStoreSize(VecEltTy);
629       if (BytesPerElt > 1) {
630         Value *EltBytes = Builder.CreateVectorSplat(BytesPerElt, Elt);
631 
632         // If the element type of the vector is a pointer, we need to first cast
633         // to an integer, then use a PtrCast.
634         if (VecEltTy->isPointerTy()) {
635           Type *PtrInt = Builder.getIntNTy(BytesPerElt * 8);
636           Elt = Builder.CreateBitCast(EltBytes, PtrInt);
637           Elt = Builder.CreateIntToPtr(Elt, VecEltTy);
638         } else
639           Elt = Builder.CreateBitCast(EltBytes, VecEltTy);
640       }
641 
642       return Builder.CreateVectorSplat(VectorTy->getElementCount(), Elt);
643     }
644 
645     if (auto *Intr = dyn_cast<IntrinsicInst>(Inst)) {
646       if (Intr->getIntrinsicID() == Intrinsic::objectsize) {
647         Intr->replaceAllUsesWith(
648             Builder.getIntN(Intr->getType()->getIntegerBitWidth(),
649                             DL.getTypeAllocSize(VectorTy)));
650         return nullptr;
651       }
652     }
653 
654     llvm_unreachable("Unsupported call when promoting alloca to vector");
655   }
656 
657   default:
658     llvm_unreachable("Inconsistency in instructions promotable to vector");
659   }
660 
661   llvm_unreachable("Did not return after promoting instruction!");
662 }
663 
664 static bool isSupportedAccessType(FixedVectorType *VecTy, Type *AccessTy,
665                                   const DataLayout &DL) {
666   // Access as a vector type can work if the size of the access vector is a
667   // multiple of the size of the alloca's vector element type.
668   //
669   // Examples:
670   //    - VecTy = <8 x float>, AccessTy = <4 x float> -> OK
671   //    - VecTy = <4 x double>, AccessTy = <2 x float> -> OK
672   //    - VecTy = <4 x double>, AccessTy = <3 x float> -> NOT OK
673   //        - 3*32 is not a multiple of 64
674   //
675   // We could handle more complicated cases, but it'd make things a lot more
676   // complicated.
677   if (isa<FixedVectorType>(AccessTy)) {
678     TypeSize AccTS = DL.getTypeStoreSize(AccessTy);
679     TypeSize VecTS = DL.getTypeStoreSize(VecTy->getElementType());
680     return AccTS.isKnownMultipleOf(VecTS);
681   }
682 
683   return CastInst::isBitOrNoopPointerCastable(VecTy->getElementType(), AccessTy,
684                                               DL);
685 }
686 
687 /// Iterates over an instruction worklist that may contain multiple instructions
688 /// from the same basic block, but in a different order.
689 template <typename InstContainer>
690 static void forEachWorkListItem(const InstContainer &WorkList,
691                                 std::function<void(Instruction *)> Fn) {
692   // Bucket up uses of the alloca by the block they occur in.
693   // This is important because we have to handle multiple defs/uses in a block
694   // ourselves: SSAUpdater is purely for cross-block references.
695   DenseMap<BasicBlock *, SmallDenseSet<Instruction *>> UsesByBlock;
696   for (Instruction *User : WorkList)
697     UsesByBlock[User->getParent()].insert(User);
698 
699   for (Instruction *User : WorkList) {
700     BasicBlock *BB = User->getParent();
701     auto &BlockUses = UsesByBlock[BB];
702 
703     // Already processed, skip.
704     if (BlockUses.empty())
705       continue;
706 
707     // Only user in the block, directly process it.
708     if (BlockUses.size() == 1) {
709       Fn(User);
710       continue;
711     }
712 
713     // Multiple users in the block, do a linear scan to see users in order.
714     for (Instruction &Inst : *BB) {
715       if (!BlockUses.contains(&Inst))
716         continue;
717 
718       Fn(&Inst);
719     }
720 
721     // Clear the block so we know it's been processed.
722     BlockUses.clear();
723   }
724 }
725 
726 // FIXME: Should try to pick the most likely to be profitable allocas first.
727 bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
728   LLVM_DEBUG(dbgs() << "Trying to promote to vector: " << Alloca << '\n');
729 
730   if (DisablePromoteAllocaToVector) {
731     LLVM_DEBUG(dbgs() << "  Promote alloca to vector is disabled\n");
732     return false;
733   }
734 
735   Type *AllocaTy = Alloca.getAllocatedType();
736   auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
737   if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
738     if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
739         ArrayTy->getNumElements() > 0)
740       VectorTy = FixedVectorType::get(ArrayTy->getElementType(),
741                                       ArrayTy->getNumElements());
742   }
743 
744   // FIXME: There is no reason why we can't support larger arrays, we
745   // are just being conservative for now.
746   // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or
747   // equivalent. Potentially these could also be promoted but we don't currently
748   // handle this case
749   if (!VectorTy) {
750     LLVM_DEBUG(dbgs() << "  Cannot convert type to vector\n");
751     return false;
752   }
753 
754   if (VectorTy->getNumElements() > 16 || VectorTy->getNumElements() < 2) {
755     LLVM_DEBUG(dbgs() << "  " << *VectorTy
756                       << " has an unsupported number of elements\n");
757     return false;
758   }
759 
760   std::map<GetElementPtrInst *, Value *> GEPVectorIdx;
761   SmallVector<Instruction *> WorkList;
762   SmallVector<Instruction *> UsersToRemove;
763   SmallVector<Instruction *> DeferredInsts;
764   DenseMap<MemTransferInst *, MemTransferInfo> TransferInfo;
765 
766   const auto RejectUser = [&](Instruction *Inst, Twine Msg) {
767     LLVM_DEBUG(dbgs() << "  Cannot promote alloca to vector: " << Msg << "\n"
768                       << "    " << *Inst << "\n");
769     return false;
770   };
771 
772   SmallVector<Use *, 8> Uses;
773   collectAllocaUses(Alloca, Uses);
774 
775   LLVM_DEBUG(dbgs() << "  Attempting promotion to: " << *VectorTy << "\n");
776 
777   Type *VecEltTy = VectorTy->getElementType();
778   unsigned ElementSize = DL->getTypeSizeInBits(VecEltTy) / 8;
779   for (auto *U : Uses) {
780     Instruction *Inst = cast<Instruction>(U->getUser());
781 
782     if (Value *Ptr = getLoadStorePointerOperand(Inst)) {
783       // This is a store of the pointer, not to the pointer.
784       if (isa<StoreInst>(Inst) &&
785           U->getOperandNo() != StoreInst::getPointerOperandIndex())
786         return RejectUser(Inst, "pointer is being stored");
787 
788       Type *AccessTy = getLoadStoreType(Inst);
789       if (AccessTy->isAggregateType())
790         return RejectUser(Inst, "unsupported load/store as aggregate");
791       assert(!AccessTy->isAggregateType() || AccessTy->isArrayTy());
792 
793       // Check that this is a simple access of a vector element.
794       bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
795                                           : cast<StoreInst>(Inst)->isSimple();
796       if (!IsSimple)
797         return RejectUser(Inst, "not a simple load or store");
798 
799       Ptr = Ptr->stripPointerCasts();
800 
801       // Alloca already accessed as vector.
802       if (Ptr == &Alloca && DL->getTypeStoreSize(Alloca.getAllocatedType()) ==
803                                 DL->getTypeStoreSize(AccessTy)) {
804         WorkList.push_back(Inst);
805         continue;
806       }
807 
808       if (!isSupportedAccessType(VectorTy, AccessTy, *DL))
809         return RejectUser(Inst, "not a supported access type");
810 
811       WorkList.push_back(Inst);
812       continue;
813     }
814 
815     if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
816       // If we can't compute a vector index from this GEP, then we can't
817       // promote this alloca to vector.
818       Value *Index = GEPToVectorIndex(GEP, &Alloca, VecEltTy, *DL);
819       if (!Index)
820         return RejectUser(Inst, "cannot compute vector index for GEP");
821 
822       GEPVectorIdx[GEP] = Index;
823       UsersToRemove.push_back(Inst);
824       continue;
825     }
826 
827     if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst);
828         MSI && isSupportedMemset(MSI, &Alloca, *DL)) {
829       WorkList.push_back(Inst);
830       continue;
831     }
832 
833     if (MemTransferInst *TransferInst = dyn_cast<MemTransferInst>(Inst)) {
834       if (TransferInst->isVolatile())
835         return RejectUser(Inst, "mem transfer inst is volatile");
836 
837       ConstantInt *Len = dyn_cast<ConstantInt>(TransferInst->getLength());
838       if (!Len || (Len->getZExtValue() % ElementSize))
839         return RejectUser(Inst, "mem transfer inst length is non-constant or "
840                                 "not a multiple of the vector element size");
841 
842       if (!TransferInfo.count(TransferInst)) {
843         DeferredInsts.push_back(Inst);
844         WorkList.push_back(Inst);
845         TransferInfo[TransferInst] = MemTransferInfo();
846       }
847 
848       auto getPointerIndexOfAlloca = [&](Value *Ptr) -> ConstantInt * {
849         GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
850         if (Ptr != &Alloca && !GEPVectorIdx.count(GEP))
851           return nullptr;
852 
853         return dyn_cast<ConstantInt>(calculateVectorIndex(Ptr, GEPVectorIdx));
854       };
855 
856       unsigned OpNum = U->getOperandNo();
857       MemTransferInfo *TI = &TransferInfo[TransferInst];
858       if (OpNum == 0) {
859         Value *Dest = TransferInst->getDest();
860         ConstantInt *Index = getPointerIndexOfAlloca(Dest);
861         if (!Index)
862           return RejectUser(Inst, "could not calculate constant dest index");
863         TI->DestIndex = Index;
864       } else {
865         assert(OpNum == 1);
866         Value *Src = TransferInst->getSource();
867         ConstantInt *Index = getPointerIndexOfAlloca(Src);
868         if (!Index)
869           return RejectUser(Inst, "could not calculate constant src index");
870         TI->SrcIndex = Index;
871       }
872       continue;
873     }
874 
875     if (auto *Intr = dyn_cast<IntrinsicInst>(Inst)) {
876       if (Intr->getIntrinsicID() == Intrinsic::objectsize) {
877         WorkList.push_back(Inst);
878         continue;
879       }
880     }
881 
882     // Ignore assume-like intrinsics and comparisons used in assumes.
883     if (isAssumeLikeIntrinsic(Inst)) {
884       if (!Inst->use_empty())
885         return RejectUser(Inst, "assume-like intrinsic cannot have any users");
886       UsersToRemove.push_back(Inst);
887       continue;
888     }
889 
890     if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) {
891           return isAssumeLikeIntrinsic(cast<Instruction>(U));
892         })) {
893       UsersToRemove.push_back(Inst);
894       continue;
895     }
896 
897     return RejectUser(Inst, "unhandled alloca user");
898   }
899 
900   while (!DeferredInsts.empty()) {
901     Instruction *Inst = DeferredInsts.pop_back_val();
902     MemTransferInst *TransferInst = cast<MemTransferInst>(Inst);
903     // TODO: Support the case if the pointers are from different alloca or
904     // from different address spaces.
905     MemTransferInfo &Info = TransferInfo[TransferInst];
906     if (!Info.SrcIndex || !Info.DestIndex)
907       return RejectUser(
908           Inst, "mem transfer inst is missing constant src and/or dst index");
909   }
910 
911   LLVM_DEBUG(dbgs() << "  Converting alloca to vector " << *AllocaTy << " -> "
912                     << *VectorTy << '\n');
913   const unsigned VecStoreSize = DL->getTypeStoreSize(VectorTy);
914 
915   // Alloca is uninitialized memory. Imitate that by making the first value
916   // undef.
917   SSAUpdater Updater;
918   Updater.Initialize(VectorTy, "promotealloca");
919   Updater.AddAvailableValue(Alloca.getParent(), UndefValue::get(VectorTy));
920 
921   // First handle the initial worklist.
922   SmallVector<LoadInst *, 4> DeferredLoads;
923   forEachWorkListItem(WorkList, [&](Instruction *I) {
924     BasicBlock *BB = I->getParent();
925     // On the first pass, we only take values that are trivially known, i.e.
926     // where AddAvailableValue was already called in this block.
927     Value *Result = promoteAllocaUserToVector(
928         I, *DL, VectorTy, VecStoreSize, ElementSize, TransferInfo, GEPVectorIdx,
929         Updater.FindValueForBlock(BB), DeferredLoads);
930     if (Result)
931       Updater.AddAvailableValue(BB, Result);
932   });
933 
934   // Then handle deferred loads.
935   forEachWorkListItem(DeferredLoads, [&](Instruction *I) {
936     SmallVector<LoadInst *, 0> NewDLs;
937     BasicBlock *BB = I->getParent();
938     // On the second pass, we use GetValueInMiddleOfBlock to guarantee we always
939     // get a value, inserting PHIs as needed.
940     Value *Result = promoteAllocaUserToVector(
941         I, *DL, VectorTy, VecStoreSize, ElementSize, TransferInfo, GEPVectorIdx,
942         Updater.GetValueInMiddleOfBlock(I->getParent()), NewDLs);
943     if (Result)
944       Updater.AddAvailableValue(BB, Result);
945     assert(NewDLs.empty() && "No more deferred loads should be queued!");
946   });
947 
948   // Delete all instructions. On the first pass, new dummy loads may have been
949   // added so we need to collect them too.
950   DenseSet<Instruction *> InstsToDelete(WorkList.begin(), WorkList.end());
951   InstsToDelete.insert(DeferredLoads.begin(), DeferredLoads.end());
952   for (Instruction *I : InstsToDelete) {
953     assert(I->use_empty());
954     I->eraseFromParent();
955   }
956 
957   // Delete all the users that are known to be removeable.
958   for (Instruction *I : reverse(UsersToRemove)) {
959     I->dropDroppableUses();
960     assert(I->use_empty());
961     I->eraseFromParent();
962   }
963 
964   // Alloca should now be dead too.
965   assert(Alloca.use_empty());
966   Alloca.eraseFromParent();
967   return true;
968 }
969 
970 std::pair<Value *, Value *>
971 AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
972   Function &F = *Builder.GetInsertBlock()->getParent();
973   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
974 
975   if (!IsAMDHSA) {
976     CallInst *LocalSizeY =
977         Builder.CreateIntrinsic(Intrinsic::r600_read_local_size_y, {}, {});
978     CallInst *LocalSizeZ =
979         Builder.CreateIntrinsic(Intrinsic::r600_read_local_size_z, {}, {});
980 
981     ST.makeLIDRangeMetadata(LocalSizeY);
982     ST.makeLIDRangeMetadata(LocalSizeZ);
983 
984     return std::pair(LocalSizeY, LocalSizeZ);
985   }
986 
987   // We must read the size out of the dispatch pointer.
988   assert(IsAMDGCN);
989 
990   // We are indexing into this struct, and want to extract the workgroup_size_*
991   // fields.
992   //
993   //   typedef struct hsa_kernel_dispatch_packet_s {
994   //     uint16_t header;
995   //     uint16_t setup;
996   //     uint16_t workgroup_size_x ;
997   //     uint16_t workgroup_size_y;
998   //     uint16_t workgroup_size_z;
999   //     uint16_t reserved0;
1000   //     uint32_t grid_size_x ;
1001   //     uint32_t grid_size_y ;
1002   //     uint32_t grid_size_z;
1003   //
1004   //     uint32_t private_segment_size;
1005   //     uint32_t group_segment_size;
1006   //     uint64_t kernel_object;
1007   //
1008   // #ifdef HSA_LARGE_MODEL
1009   //     void *kernarg_address;
1010   // #elif defined HSA_LITTLE_ENDIAN
1011   //     void *kernarg_address;
1012   //     uint32_t reserved1;
1013   // #else
1014   //     uint32_t reserved1;
1015   //     void *kernarg_address;
1016   // #endif
1017   //     uint64_t reserved2;
1018   //     hsa_signal_t completion_signal; // uint64_t wrapper
1019   //   } hsa_kernel_dispatch_packet_t
1020   //
1021   CallInst *DispatchPtr =
1022       Builder.CreateIntrinsic(Intrinsic::amdgcn_dispatch_ptr, {}, {});
1023   DispatchPtr->addRetAttr(Attribute::NoAlias);
1024   DispatchPtr->addRetAttr(Attribute::NonNull);
1025   F.removeFnAttr("amdgpu-no-dispatch-ptr");
1026 
1027   // Size of the dispatch packet struct.
1028   DispatchPtr->addDereferenceableRetAttr(64);
1029 
1030   Type *I32Ty = Type::getInt32Ty(Mod->getContext());
1031 
1032   // We could do a single 64-bit load here, but it's likely that the basic
1033   // 32-bit and extract sequence is already present, and it is probably easier
1034   // to CSE this. The loads should be mergeable later anyway.
1035   Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, DispatchPtr, 1);
1036   LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
1037 
1038   Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, DispatchPtr, 2);
1039   LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
1040 
1041   MDNode *MD = MDNode::get(Mod->getContext(), {});
1042   LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
1043   LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
1044   ST.makeLIDRangeMetadata(LoadZU);
1045 
1046   // Extract y component. Upper half of LoadZU should be zero already.
1047   Value *Y = Builder.CreateLShr(LoadXY, 16);
1048 
1049   return std::pair(Y, LoadZU);
1050 }
1051 
1052 Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
1053                                               unsigned N) {
1054   Function *F = Builder.GetInsertBlock()->getParent();
1055   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F);
1056   Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
1057   StringRef AttrName;
1058 
1059   switch (N) {
1060   case 0:
1061     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
1062                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
1063     AttrName = "amdgpu-no-workitem-id-x";
1064     break;
1065   case 1:
1066     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
1067                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
1068     AttrName = "amdgpu-no-workitem-id-y";
1069     break;
1070 
1071   case 2:
1072     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
1073                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
1074     AttrName = "amdgpu-no-workitem-id-z";
1075     break;
1076   default:
1077     llvm_unreachable("invalid dimension");
1078   }
1079 
1080   Function *WorkitemIdFn = Intrinsic::getOrInsertDeclaration(Mod, IntrID);
1081   CallInst *CI = Builder.CreateCall(WorkitemIdFn);
1082   ST.makeLIDRangeMetadata(CI);
1083   F->removeFnAttr(AttrName);
1084 
1085   return CI;
1086 }
1087 
1088 static bool isCallPromotable(CallInst *CI) {
1089   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
1090   if (!II)
1091     return false;
1092 
1093   switch (II->getIntrinsicID()) {
1094   case Intrinsic::memcpy:
1095   case Intrinsic::memmove:
1096   case Intrinsic::memset:
1097   case Intrinsic::lifetime_start:
1098   case Intrinsic::lifetime_end:
1099   case Intrinsic::invariant_start:
1100   case Intrinsic::invariant_end:
1101   case Intrinsic::launder_invariant_group:
1102   case Intrinsic::strip_invariant_group:
1103   case Intrinsic::objectsize:
1104     return true;
1105   default:
1106     return false;
1107   }
1108 }
1109 
1110 bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
1111     Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
1112     int OpIdx1) const {
1113   // Figure out which operand is the one we might not be promoting.
1114   Value *OtherOp = Inst->getOperand(OpIdx0);
1115   if (Val == OtherOp)
1116     OtherOp = Inst->getOperand(OpIdx1);
1117 
1118   if (isa<ConstantPointerNull, ConstantAggregateZero>(OtherOp))
1119     return true;
1120 
1121   // TODO: getUnderlyingObject will not work on a vector getelementptr
1122   Value *OtherObj = getUnderlyingObject(OtherOp);
1123   if (!isa<AllocaInst>(OtherObj))
1124     return false;
1125 
1126   // TODO: We should be able to replace undefs with the right pointer type.
1127 
1128   // TODO: If we know the other base object is another promotable
1129   // alloca, not necessarily this alloca, we can do this. The
1130   // important part is both must have the same address space at
1131   // the end.
1132   if (OtherObj != BaseAlloca) {
1133     LLVM_DEBUG(
1134         dbgs() << "Found a binary instruction with another alloca object\n");
1135     return false;
1136   }
1137 
1138   return true;
1139 }
1140 
1141 bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
1142     Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
1143 
1144   for (User *User : Val->users()) {
1145     if (is_contained(WorkList, User))
1146       continue;
1147 
1148     if (CallInst *CI = dyn_cast<CallInst>(User)) {
1149       if (!isCallPromotable(CI))
1150         return false;
1151 
1152       WorkList.push_back(User);
1153       continue;
1154     }
1155 
1156     Instruction *UseInst = cast<Instruction>(User);
1157     if (UseInst->getOpcode() == Instruction::PtrToInt)
1158       return false;
1159 
1160     if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
1161       if (LI->isVolatile())
1162         return false;
1163       continue;
1164     }
1165 
1166     if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
1167       if (SI->isVolatile())
1168         return false;
1169 
1170       // Reject if the stored value is not the pointer operand.
1171       if (SI->getPointerOperand() != Val)
1172         return false;
1173       continue;
1174     }
1175 
1176     if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
1177       if (RMW->isVolatile())
1178         return false;
1179       continue;
1180     }
1181 
1182     if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
1183       if (CAS->isVolatile())
1184         return false;
1185       continue;
1186     }
1187 
1188     // Only promote a select if we know that the other select operand
1189     // is from another pointer that will also be promoted.
1190     if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
1191       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
1192         return false;
1193 
1194       // May need to rewrite constant operands.
1195       WorkList.push_back(ICmp);
1196       continue;
1197     }
1198 
1199     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
1200       // Be conservative if an address could be computed outside the bounds of
1201       // the alloca.
1202       if (!GEP->isInBounds())
1203         return false;
1204     } else if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
1205       // Only promote a select if we know that the other select operand is from
1206       // another pointer that will also be promoted.
1207       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
1208         return false;
1209     } else if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
1210       // Repeat for phis.
1211 
1212       // TODO: Handle more complex cases. We should be able to replace loops
1213       // over arrays.
1214       switch (Phi->getNumIncomingValues()) {
1215       case 1:
1216         break;
1217       case 2:
1218         if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
1219           return false;
1220         break;
1221       default:
1222         return false;
1223       }
1224     } else if (!isa<ExtractElementInst>(User)) {
1225       // Do not promote vector/aggregate type instructions. It is hard to track
1226       // their users.
1227 
1228       // Do not promote addrspacecast.
1229       //
1230       // TODO: If we know the address is only observed through flat pointers, we
1231       // could still promote.
1232       return false;
1233     }
1234 
1235     WorkList.push_back(User);
1236     if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
1237       return false;
1238   }
1239 
1240   return true;
1241 }
1242 
1243 bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
1244 
1245   FunctionType *FTy = F.getFunctionType();
1246   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
1247 
1248   // If the function has any arguments in the local address space, then it's
1249   // possible these arguments require the entire local memory space, so
1250   // we cannot use local memory in the pass.
1251   for (Type *ParamTy : FTy->params()) {
1252     PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
1253     if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
1254       LocalMemLimit = 0;
1255       LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
1256                            "local memory disabled.\n");
1257       return false;
1258     }
1259   }
1260 
1261   LocalMemLimit = ST.getAddressableLocalMemorySize();
1262   if (LocalMemLimit == 0)
1263     return false;
1264 
1265   SmallVector<const Constant *, 16> Stack;
1266   SmallPtrSet<const Constant *, 8> VisitedConstants;
1267   SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
1268 
1269   auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
1270     for (const User *U : Val->users()) {
1271       if (const Instruction *Use = dyn_cast<Instruction>(U)) {
1272         if (Use->getParent()->getParent() == &F)
1273           return true;
1274       } else {
1275         const Constant *C = cast<Constant>(U);
1276         if (VisitedConstants.insert(C).second)
1277           Stack.push_back(C);
1278       }
1279     }
1280 
1281     return false;
1282   };
1283 
1284   for (GlobalVariable &GV : Mod->globals()) {
1285     if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
1286       continue;
1287 
1288     if (visitUsers(&GV, &GV)) {
1289       UsedLDS.insert(&GV);
1290       Stack.clear();
1291       continue;
1292     }
1293 
1294     // For any ConstantExpr uses, we need to recursively search the users until
1295     // we see a function.
1296     while (!Stack.empty()) {
1297       const Constant *C = Stack.pop_back_val();
1298       if (visitUsers(&GV, C)) {
1299         UsedLDS.insert(&GV);
1300         Stack.clear();
1301         break;
1302       }
1303     }
1304   }
1305 
1306   const DataLayout &DL = Mod->getDataLayout();
1307   SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
1308   AllocatedSizes.reserve(UsedLDS.size());
1309 
1310   for (const GlobalVariable *GV : UsedLDS) {
1311     Align Alignment =
1312         DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
1313     uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
1314 
1315     // HIP uses an extern unsized array in local address space for dynamically
1316     // allocated shared memory.  In that case, we have to disable the promotion.
1317     if (GV->hasExternalLinkage() && AllocSize == 0) {
1318       LocalMemLimit = 0;
1319       LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "
1320                            "local memory. Promoting to local memory "
1321                            "disabled.\n");
1322       return false;
1323     }
1324 
1325     AllocatedSizes.emplace_back(AllocSize, Alignment);
1326   }
1327 
1328   // Sort to try to estimate the worst case alignment padding
1329   //
1330   // FIXME: We should really do something to fix the addresses to a more optimal
1331   // value instead
1332   llvm::sort(AllocatedSizes, llvm::less_second());
1333 
1334   // Check how much local memory is being used by global objects
1335   CurrentLocalMemUsage = 0;
1336 
1337   // FIXME: Try to account for padding here. The real padding and address is
1338   // currently determined from the inverse order of uses in the function when
1339   // legalizing, which could also potentially change. We try to estimate the
1340   // worst case here, but we probably should fix the addresses earlier.
1341   for (auto Alloc : AllocatedSizes) {
1342     CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
1343     CurrentLocalMemUsage += Alloc.first;
1344   }
1345 
1346   unsigned MaxOccupancy =
1347       ST.getOccupancyWithWorkGroupSizes(CurrentLocalMemUsage, F).second;
1348 
1349   // Restrict local memory usage so that we don't drastically reduce occupancy,
1350   // unless it is already significantly reduced.
1351 
1352   // TODO: Have some sort of hint or other heuristics to guess occupancy based
1353   // on other factors..
1354   unsigned OccupancyHint = ST.getWavesPerEU(F).second;
1355   if (OccupancyHint == 0)
1356     OccupancyHint = 7;
1357 
1358   // Clamp to max value.
1359   OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
1360 
1361   // Check the hint but ignore it if it's obviously wrong from the existing LDS
1362   // usage.
1363   MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
1364 
1365   // Round up to the next tier of usage.
1366   unsigned MaxSizeWithWaveCount =
1367       ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
1368 
1369   // Program is possibly broken by using more local mem than available.
1370   if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
1371     return false;
1372 
1373   LocalMemLimit = MaxSizeWithWaveCount;
1374 
1375   LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
1376                     << " bytes of LDS\n"
1377                     << "  Rounding size to " << MaxSizeWithWaveCount
1378                     << " with a maximum occupancy of " << MaxOccupancy << '\n'
1379                     << " and " << (LocalMemLimit - CurrentLocalMemUsage)
1380                     << " available for promotion\n");
1381 
1382   return true;
1383 }
1384 
1385 // FIXME: Should try to pick the most likely to be profitable allocas first.
1386 bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I,
1387                                                     bool SufficientLDS) {
1388   LLVM_DEBUG(dbgs() << "Trying to promote to LDS: " << I << '\n');
1389 
1390   if (DisablePromoteAllocaToLDS) {
1391     LLVM_DEBUG(dbgs() << "  Promote alloca to LDS is disabled\n");
1392     return false;
1393   }
1394 
1395   const DataLayout &DL = Mod->getDataLayout();
1396   IRBuilder<> Builder(&I);
1397 
1398   const Function &ContainingFunction = *I.getParent()->getParent();
1399   CallingConv::ID CC = ContainingFunction.getCallingConv();
1400 
1401   // Don't promote the alloca to LDS for shader calling conventions as the work
1402   // item ID intrinsics are not supported for these calling conventions.
1403   // Furthermore not all LDS is available for some of the stages.
1404   switch (CC) {
1405   case CallingConv::AMDGPU_KERNEL:
1406   case CallingConv::SPIR_KERNEL:
1407     break;
1408   default:
1409     LLVM_DEBUG(
1410         dbgs()
1411         << " promote alloca to LDS not supported with calling convention.\n");
1412     return false;
1413   }
1414 
1415   // Not likely to have sufficient local memory for promotion.
1416   if (!SufficientLDS)
1417     return false;
1418 
1419   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
1420   unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
1421 
1422   Align Alignment =
1423       DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
1424 
1425   // FIXME: This computed padding is likely wrong since it depends on inverse
1426   // usage order.
1427   //
1428   // FIXME: It is also possible that if we're allowed to use all of the memory
1429   // could end up using more than the maximum due to alignment padding.
1430 
1431   uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
1432   uint32_t AllocSize =
1433       WorkGroupSize * DL.getTypeAllocSize(I.getAllocatedType());
1434   NewSize += AllocSize;
1435 
1436   if (NewSize > LocalMemLimit) {
1437     LLVM_DEBUG(dbgs() << "  " << AllocSize
1438                       << " bytes of local memory not available to promote\n");
1439     return false;
1440   }
1441 
1442   CurrentLocalMemUsage = NewSize;
1443 
1444   std::vector<Value *> WorkList;
1445 
1446   if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
1447     LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
1448     return false;
1449   }
1450 
1451   LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
1452 
1453   Function *F = I.getParent()->getParent();
1454 
1455   Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
1456   GlobalVariable *GV = new GlobalVariable(
1457       *Mod, GVTy, false, GlobalValue::InternalLinkage, PoisonValue::get(GVTy),
1458       Twine(F->getName()) + Twine('.') + I.getName(), nullptr,
1459       GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS);
1460   GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1461   GV->setAlignment(I.getAlign());
1462 
1463   Value *TCntY, *TCntZ;
1464 
1465   std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
1466   Value *TIdX = getWorkitemID(Builder, 0);
1467   Value *TIdY = getWorkitemID(Builder, 1);
1468   Value *TIdZ = getWorkitemID(Builder, 2);
1469 
1470   Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
1471   Tmp0 = Builder.CreateMul(Tmp0, TIdX);
1472   Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
1473   Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
1474   TID = Builder.CreateAdd(TID, TIdZ);
1475 
1476   LLVMContext &Context = Mod->getContext();
1477   Value *Indices[] = {Constant::getNullValue(Type::getInt32Ty(Context)), TID};
1478 
1479   Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
1480   I.mutateType(Offset->getType());
1481   I.replaceAllUsesWith(Offset);
1482   I.eraseFromParent();
1483 
1484   SmallVector<IntrinsicInst *> DeferredIntrs;
1485 
1486   PointerType *NewPtrTy = PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS);
1487 
1488   for (Value *V : WorkList) {
1489     CallInst *Call = dyn_cast<CallInst>(V);
1490     if (!Call) {
1491       if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
1492         Value *LHS = CI->getOperand(0);
1493         Value *RHS = CI->getOperand(1);
1494 
1495         Type *NewTy = LHS->getType()->getWithNewType(NewPtrTy);
1496         if (isa<ConstantPointerNull, ConstantAggregateZero>(LHS))
1497           CI->setOperand(0, Constant::getNullValue(NewTy));
1498 
1499         if (isa<ConstantPointerNull, ConstantAggregateZero>(RHS))
1500           CI->setOperand(1, Constant::getNullValue(NewTy));
1501 
1502         continue;
1503       }
1504 
1505       // The operand's value should be corrected on its own and we don't want to
1506       // touch the users.
1507       if (isa<AddrSpaceCastInst>(V))
1508         continue;
1509 
1510       assert(V->getType()->isPtrOrPtrVectorTy());
1511 
1512       Type *NewTy = V->getType()->getWithNewType(NewPtrTy);
1513       V->mutateType(NewTy);
1514 
1515       // Adjust the types of any constant operands.
1516       if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1517         if (isa<ConstantPointerNull, ConstantAggregateZero>(SI->getOperand(1)))
1518           SI->setOperand(1, Constant::getNullValue(NewTy));
1519 
1520         if (isa<ConstantPointerNull, ConstantAggregateZero>(SI->getOperand(2)))
1521           SI->setOperand(2, Constant::getNullValue(NewTy));
1522       } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
1523         for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1524           if (isa<ConstantPointerNull, ConstantAggregateZero>(
1525                   Phi->getIncomingValue(I)))
1526             Phi->setIncomingValue(I, Constant::getNullValue(NewTy));
1527         }
1528       }
1529 
1530       continue;
1531     }
1532 
1533     IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
1534     Builder.SetInsertPoint(Intr);
1535     switch (Intr->getIntrinsicID()) {
1536     case Intrinsic::lifetime_start:
1537     case Intrinsic::lifetime_end:
1538       // These intrinsics are for address space 0 only
1539       Intr->eraseFromParent();
1540       continue;
1541     case Intrinsic::memcpy:
1542     case Intrinsic::memmove:
1543       // These have 2 pointer operands. In case if second pointer also needs
1544       // to be replaced we defer processing of these intrinsics until all
1545       // other values are processed.
1546       DeferredIntrs.push_back(Intr);
1547       continue;
1548     case Intrinsic::memset: {
1549       MemSetInst *MemSet = cast<MemSetInst>(Intr);
1550       Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
1551                            MemSet->getLength(), MemSet->getDestAlign(),
1552                            MemSet->isVolatile());
1553       Intr->eraseFromParent();
1554       continue;
1555     }
1556     case Intrinsic::invariant_start:
1557     case Intrinsic::invariant_end:
1558     case Intrinsic::launder_invariant_group:
1559     case Intrinsic::strip_invariant_group: {
1560       SmallVector<Value *> Args;
1561       if (Intr->getIntrinsicID() == Intrinsic::invariant_start) {
1562         Args.emplace_back(Intr->getArgOperand(0));
1563       } else if (Intr->getIntrinsicID() == Intrinsic::invariant_end) {
1564         Args.emplace_back(Intr->getArgOperand(0));
1565         Args.emplace_back(Intr->getArgOperand(1));
1566       }
1567       Args.emplace_back(Offset);
1568       Function *F = Intrinsic::getOrInsertDeclaration(
1569           Intr->getModule(), Intr->getIntrinsicID(), Offset->getType());
1570       CallInst *NewIntr =
1571           CallInst::Create(F, Args, Intr->getName(), Intr->getIterator());
1572       Intr->mutateType(NewIntr->getType());
1573       Intr->replaceAllUsesWith(NewIntr);
1574       Intr->eraseFromParent();
1575       continue;
1576     }
1577     case Intrinsic::objectsize: {
1578       Value *Src = Intr->getOperand(0);
1579 
1580       CallInst *NewCall = Builder.CreateIntrinsic(
1581           Intrinsic::objectsize,
1582           {Intr->getType(), PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS)},
1583           {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1584       Intr->replaceAllUsesWith(NewCall);
1585       Intr->eraseFromParent();
1586       continue;
1587     }
1588     default:
1589       Intr->print(errs());
1590       llvm_unreachable("Don't know how to promote alloca intrinsic use.");
1591     }
1592   }
1593 
1594   for (IntrinsicInst *Intr : DeferredIntrs) {
1595     Builder.SetInsertPoint(Intr);
1596     Intrinsic::ID ID = Intr->getIntrinsicID();
1597     assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove);
1598 
1599     MemTransferInst *MI = cast<MemTransferInst>(Intr);
1600     auto *B = Builder.CreateMemTransferInst(
1601         ID, MI->getRawDest(), MI->getDestAlign(), MI->getRawSource(),
1602         MI->getSourceAlign(), MI->getLength(), MI->isVolatile());
1603 
1604     for (unsigned I = 0; I != 2; ++I) {
1605       if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1606         B->addDereferenceableParamAttr(I, Bytes);
1607       }
1608     }
1609 
1610     Intr->eraseFromParent();
1611   }
1612 
1613   return true;
1614 }
1615