xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
10b57cec5SDimitry Andric //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
906c3fb27SDimitry Andric // Eliminates allocas by either converting them into vectors or by migrating
1006c3fb27SDimitry Andric // them to local address space.
1106c3fb27SDimitry Andric //
1206c3fb27SDimitry Andric // Two passes are exposed by this file:
1306c3fb27SDimitry Andric //    - "promote-alloca-to-vector", which runs early in the pipeline and only
1406c3fb27SDimitry Andric //      promotes to vector. Promotion to vector is almost always profitable
1506c3fb27SDimitry Andric //      except when the alloca is too big and the promotion would result in
1606c3fb27SDimitry Andric //      very high register pressure.
1706c3fb27SDimitry Andric //    - "promote-alloca", which does both promotion to vector and LDS and runs
1806c3fb27SDimitry Andric //      much later in the pipeline. This runs after SROA because promoting to
1906c3fb27SDimitry Andric //      LDS is of course less profitable than getting rid of the alloca or
2006c3fb27SDimitry Andric //      vectorizing it, thus we only want to do it when the only alternative is
2106c3fb27SDimitry Andric //      lowering the alloca to stack.
2206c3fb27SDimitry Andric //
2306c3fb27SDimitry Andric // Note that both of them exist for the old and new PMs. The new PM passes are
2406c3fb27SDimitry Andric // declared in AMDGPU.h and the legacy PM ones are declared here.s
250b57cec5SDimitry Andric //
260b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
270b57cec5SDimitry Andric 
280b57cec5SDimitry Andric #include "AMDGPU.h"
29e8d8bef9SDimitry Andric #include "GCNSubtarget.h"
301fd87a68SDimitry Andric #include "Utils/AMDGPUBaseInfo.h"
3106c3fb27SDimitry Andric #include "llvm/ADT/STLExtras.h"
320b57cec5SDimitry Andric #include "llvm/Analysis/CaptureTracking.h"
3306c3fb27SDimitry Andric #include "llvm/Analysis/InstSimplifyFolder.h"
3406c3fb27SDimitry Andric #include "llvm/Analysis/InstructionSimplify.h"
35*0fca6ea1SDimitry Andric #include "llvm/Analysis/LoopInfo.h"
360b57cec5SDimitry Andric #include "llvm/Analysis/ValueTracking.h"
370b57cec5SDimitry Andric #include "llvm/CodeGen/TargetPassConfig.h"
380b57cec5SDimitry Andric #include "llvm/IR/IRBuilder.h"
391fd87a68SDimitry Andric #include "llvm/IR/IntrinsicInst.h"
40480093f4SDimitry Andric #include "llvm/IR/IntrinsicsAMDGPU.h"
41480093f4SDimitry Andric #include "llvm/IR/IntrinsicsR600.h"
4206c3fb27SDimitry Andric #include "llvm/IR/PatternMatch.h"
43*0fca6ea1SDimitry Andric #include "llvm/InitializePasses.h"
440b57cec5SDimitry Andric #include "llvm/Pass.h"
450b57cec5SDimitry Andric #include "llvm/Target/TargetMachine.h"
4606c3fb27SDimitry Andric #include "llvm/Transforms/Utils/SSAUpdater.h"
470b57cec5SDimitry Andric 
480b57cec5SDimitry Andric #define DEBUG_TYPE "amdgpu-promote-alloca"
490b57cec5SDimitry Andric 
500b57cec5SDimitry Andric using namespace llvm;
510b57cec5SDimitry Andric 
520b57cec5SDimitry Andric namespace {
530b57cec5SDimitry Andric 
5406c3fb27SDimitry Andric static cl::opt<bool>
5506c3fb27SDimitry Andric     DisablePromoteAllocaToVector("disable-promote-alloca-to-vector",
560b57cec5SDimitry Andric                                  cl::desc("Disable promote alloca to vector"),
570b57cec5SDimitry Andric                                  cl::init(false));
580b57cec5SDimitry Andric 
5906c3fb27SDimitry Andric static cl::opt<bool>
6006c3fb27SDimitry Andric     DisablePromoteAllocaToLDS("disable-promote-alloca-to-lds",
610b57cec5SDimitry Andric                               cl::desc("Disable promote alloca to LDS"),
620b57cec5SDimitry Andric                               cl::init(false));
630b57cec5SDimitry Andric 
645ffd83dbSDimitry Andric static cl::opt<unsigned> PromoteAllocaToVectorLimit(
655ffd83dbSDimitry Andric     "amdgpu-promote-alloca-to-vector-limit",
665ffd83dbSDimitry Andric     cl::desc("Maximum byte size to consider promote alloca to vector"),
675ffd83dbSDimitry Andric     cl::init(0));
685ffd83dbSDimitry Andric 
69*0fca6ea1SDimitry Andric static cl::opt<unsigned>
70*0fca6ea1SDimitry Andric     LoopUserWeight("promote-alloca-vector-loop-user-weight",
71*0fca6ea1SDimitry Andric                    cl::desc("The bonus weight of users of allocas within loop "
72*0fca6ea1SDimitry Andric                             "when sorting profitable allocas"),
73*0fca6ea1SDimitry Andric                    cl::init(4));
74*0fca6ea1SDimitry Andric 
7506c3fb27SDimitry Andric // Shared implementation which can do both promotion to vector and to LDS.
76e8d8bef9SDimitry Andric class AMDGPUPromoteAllocaImpl {
770b57cec5SDimitry Andric private:
78e8d8bef9SDimitry Andric   const TargetMachine &TM;
79*0fca6ea1SDimitry Andric   LoopInfo &LI;
800b57cec5SDimitry Andric   Module *Mod = nullptr;
810b57cec5SDimitry Andric   const DataLayout *DL = nullptr;
820b57cec5SDimitry Andric 
830b57cec5SDimitry Andric   // FIXME: This should be per-kernel.
840b57cec5SDimitry Andric   uint32_t LocalMemLimit = 0;
850b57cec5SDimitry Andric   uint32_t CurrentLocalMemUsage = 0;
865ffd83dbSDimitry Andric   unsigned MaxVGPRs;
870b57cec5SDimitry Andric 
880b57cec5SDimitry Andric   bool IsAMDGCN = false;
890b57cec5SDimitry Andric   bool IsAMDHSA = false;
900b57cec5SDimitry Andric 
910b57cec5SDimitry Andric   std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
920b57cec5SDimitry Andric   Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
930b57cec5SDimitry Andric 
940b57cec5SDimitry Andric   /// BaseAlloca is the alloca root the search started from.
950b57cec5SDimitry Andric   /// Val may be that alloca or a recursive user of it.
9606c3fb27SDimitry Andric   bool collectUsesWithPtrTypes(Value *BaseAlloca, Value *Val,
970b57cec5SDimitry Andric                                std::vector<Value *> &WorkList) const;
980b57cec5SDimitry Andric 
990b57cec5SDimitry Andric   /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
1000b57cec5SDimitry Andric   /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
1010b57cec5SDimitry Andric   /// Returns true if both operands are derived from the same alloca. Val should
1020b57cec5SDimitry Andric   /// be the same value as one of the input operands of UseInst.
1030b57cec5SDimitry Andric   bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
10406c3fb27SDimitry Andric                                        Instruction *UseInst, int OpIdx0,
10506c3fb27SDimitry Andric                                        int OpIdx1) const;
1060b57cec5SDimitry Andric 
1070b57cec5SDimitry Andric   /// Check whether we have enough local memory for promotion.
1080b57cec5SDimitry Andric   bool hasSufficientLocalMem(const Function &F);
1090b57cec5SDimitry Andric 
11006c3fb27SDimitry Andric   bool tryPromoteAllocaToVector(AllocaInst &I);
11106c3fb27SDimitry Andric   bool tryPromoteAllocaToLDS(AllocaInst &I, bool SufficientLDS);
1120b57cec5SDimitry Andric 
113*0fca6ea1SDimitry Andric   void sortAllocasToPromote(SmallVectorImpl<AllocaInst *> &Allocas);
114*0fca6ea1SDimitry Andric 
115e8d8bef9SDimitry Andric public:
116*0fca6ea1SDimitry Andric   AMDGPUPromoteAllocaImpl(TargetMachine &TM, LoopInfo &LI) : TM(TM), LI(LI) {
117*0fca6ea1SDimitry Andric 
11806c3fb27SDimitry Andric     const Triple &TT = TM.getTargetTriple();
11906c3fb27SDimitry Andric     IsAMDGCN = TT.getArch() == Triple::amdgcn;
12006c3fb27SDimitry Andric     IsAMDHSA = TT.getOS() == Triple::AMDHSA;
12106c3fb27SDimitry Andric   }
12206c3fb27SDimitry Andric 
12306c3fb27SDimitry Andric   bool run(Function &F, bool PromoteToLDS);
12406c3fb27SDimitry Andric };
12506c3fb27SDimitry Andric 
12606c3fb27SDimitry Andric // FIXME: This can create globals so should be a module pass.
12706c3fb27SDimitry Andric class AMDGPUPromoteAlloca : public FunctionPass {
12806c3fb27SDimitry Andric public:
12906c3fb27SDimitry Andric   static char ID;
13006c3fb27SDimitry Andric 
13106c3fb27SDimitry Andric   AMDGPUPromoteAlloca() : FunctionPass(ID) {}
13206c3fb27SDimitry Andric 
13306c3fb27SDimitry Andric   bool runOnFunction(Function &F) override {
13406c3fb27SDimitry Andric     if (skipFunction(F))
13506c3fb27SDimitry Andric       return false;
13606c3fb27SDimitry Andric     if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
137*0fca6ea1SDimitry Andric       return AMDGPUPromoteAllocaImpl(
138*0fca6ea1SDimitry Andric                  TPC->getTM<TargetMachine>(),
139*0fca6ea1SDimitry Andric                  getAnalysis<LoopInfoWrapperPass>().getLoopInfo())
14006c3fb27SDimitry Andric           .run(F, /*PromoteToLDS*/ true);
14106c3fb27SDimitry Andric     return false;
14206c3fb27SDimitry Andric   }
14306c3fb27SDimitry Andric 
14406c3fb27SDimitry Andric   StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
14506c3fb27SDimitry Andric 
14606c3fb27SDimitry Andric   void getAnalysisUsage(AnalysisUsage &AU) const override {
14706c3fb27SDimitry Andric     AU.setPreservesCFG();
148*0fca6ea1SDimitry Andric     AU.addRequired<LoopInfoWrapperPass>();
14906c3fb27SDimitry Andric     FunctionPass::getAnalysisUsage(AU);
15006c3fb27SDimitry Andric   }
1510b57cec5SDimitry Andric };
1520b57cec5SDimitry Andric 
1535ffd83dbSDimitry Andric class AMDGPUPromoteAllocaToVector : public FunctionPass {
1545ffd83dbSDimitry Andric public:
1555ffd83dbSDimitry Andric   static char ID;
1565ffd83dbSDimitry Andric 
1575ffd83dbSDimitry Andric   AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
1585ffd83dbSDimitry Andric 
15906c3fb27SDimitry Andric   bool runOnFunction(Function &F) override {
16006c3fb27SDimitry Andric     if (skipFunction(F))
16106c3fb27SDimitry Andric       return false;
16206c3fb27SDimitry Andric     if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
163*0fca6ea1SDimitry Andric       return AMDGPUPromoteAllocaImpl(
164*0fca6ea1SDimitry Andric                  TPC->getTM<TargetMachine>(),
165*0fca6ea1SDimitry Andric                  getAnalysis<LoopInfoWrapperPass>().getLoopInfo())
16606c3fb27SDimitry Andric           .run(F, /*PromoteToLDS*/ false);
16706c3fb27SDimitry Andric     return false;
16806c3fb27SDimitry Andric   }
1695ffd83dbSDimitry Andric 
1705ffd83dbSDimitry Andric   StringRef getPassName() const override {
1715ffd83dbSDimitry Andric     return "AMDGPU Promote Alloca to vector";
1725ffd83dbSDimitry Andric   }
1735ffd83dbSDimitry Andric 
1745ffd83dbSDimitry Andric   void getAnalysisUsage(AnalysisUsage &AU) const override {
1755ffd83dbSDimitry Andric     AU.setPreservesCFG();
176*0fca6ea1SDimitry Andric     AU.addRequired<LoopInfoWrapperPass>();
1775ffd83dbSDimitry Andric     FunctionPass::getAnalysisUsage(AU);
1785ffd83dbSDimitry Andric   }
1795ffd83dbSDimitry Andric };
1805ffd83dbSDimitry Andric 
18106c3fb27SDimitry Andric unsigned getMaxVGPRs(const TargetMachine &TM, const Function &F) {
18206c3fb27SDimitry Andric   if (!TM.getTargetTriple().isAMDGCN())
18306c3fb27SDimitry Andric     return 128;
18406c3fb27SDimitry Andric 
18506c3fb27SDimitry Andric   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
18606c3fb27SDimitry Andric   unsigned MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
18706c3fb27SDimitry Andric 
18806c3fb27SDimitry Andric   // A non-entry function has only 32 caller preserved registers.
18906c3fb27SDimitry Andric   // Do not promote alloca which will force spilling unless we know the function
19006c3fb27SDimitry Andric   // will be inlined.
19106c3fb27SDimitry Andric   if (!F.hasFnAttribute(Attribute::AlwaysInline) &&
19206c3fb27SDimitry Andric       !AMDGPU::isEntryFunctionCC(F.getCallingConv()))
19306c3fb27SDimitry Andric     MaxVGPRs = std::min(MaxVGPRs, 32u);
19406c3fb27SDimitry Andric   return MaxVGPRs;
19506c3fb27SDimitry Andric }
19606c3fb27SDimitry Andric 
1970b57cec5SDimitry Andric } // end anonymous namespace
1980b57cec5SDimitry Andric 
1990b57cec5SDimitry Andric char AMDGPUPromoteAlloca::ID = 0;
2005ffd83dbSDimitry Andric char AMDGPUPromoteAllocaToVector::ID = 0;
2010b57cec5SDimitry Andric 
202fe6060f1SDimitry Andric INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,
203fe6060f1SDimitry Andric                       "AMDGPU promote alloca to vector or LDS", false, false)
204fe6060f1SDimitry Andric // Move LDS uses from functions to kernels before promote alloca for accurate
205fe6060f1SDimitry Andric // estimation of LDS available
2065f757f3fSDimitry Andric INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDSLegacy)
207*0fca6ea1SDimitry Andric INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
208fe6060f1SDimitry Andric INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,
2090b57cec5SDimitry Andric                     "AMDGPU promote alloca to vector or LDS", false, false)
2100b57cec5SDimitry Andric 
211*0fca6ea1SDimitry Andric INITIALIZE_PASS_BEGIN(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
212*0fca6ea1SDimitry Andric                       "AMDGPU promote alloca to vector", false, false)
213*0fca6ea1SDimitry Andric INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
214*0fca6ea1SDimitry Andric INITIALIZE_PASS_END(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
2155ffd83dbSDimitry Andric                     "AMDGPU promote alloca to vector", false, false)
2165ffd83dbSDimitry Andric 
2170b57cec5SDimitry Andric char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
2185ffd83dbSDimitry Andric char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
2190b57cec5SDimitry Andric 
220e8d8bef9SDimitry Andric PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F,
221e8d8bef9SDimitry Andric                                                FunctionAnalysisManager &AM) {
222*0fca6ea1SDimitry Andric   auto &LI = AM.getResult<LoopAnalysis>(F);
223*0fca6ea1SDimitry Andric   bool Changed = AMDGPUPromoteAllocaImpl(TM, LI).run(F, /*PromoteToLDS=*/true);
224e8d8bef9SDimitry Andric   if (Changed) {
225e8d8bef9SDimitry Andric     PreservedAnalyses PA;
226e8d8bef9SDimitry Andric     PA.preserveSet<CFGAnalyses>();
227e8d8bef9SDimitry Andric     return PA;
228e8d8bef9SDimitry Andric   }
229e8d8bef9SDimitry Andric   return PreservedAnalyses::all();
230e8d8bef9SDimitry Andric }
231e8d8bef9SDimitry Andric 
23206c3fb27SDimitry Andric PreservedAnalyses
23306c3fb27SDimitry Andric AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) {
234*0fca6ea1SDimitry Andric   auto &LI = AM.getResult<LoopAnalysis>(F);
235*0fca6ea1SDimitry Andric   bool Changed = AMDGPUPromoteAllocaImpl(TM, LI).run(F, /*PromoteToLDS=*/false);
23606c3fb27SDimitry Andric   if (Changed) {
23706c3fb27SDimitry Andric     PreservedAnalyses PA;
23806c3fb27SDimitry Andric     PA.preserveSet<CFGAnalyses>();
23906c3fb27SDimitry Andric     return PA;
24006c3fb27SDimitry Andric   }
24106c3fb27SDimitry Andric   return PreservedAnalyses::all();
24206c3fb27SDimitry Andric }
24306c3fb27SDimitry Andric 
24406c3fb27SDimitry Andric FunctionPass *llvm::createAMDGPUPromoteAlloca() {
24506c3fb27SDimitry Andric   return new AMDGPUPromoteAlloca();
24606c3fb27SDimitry Andric }
24706c3fb27SDimitry Andric 
24806c3fb27SDimitry Andric FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
24906c3fb27SDimitry Andric   return new AMDGPUPromoteAllocaToVector();
25006c3fb27SDimitry Andric }
25106c3fb27SDimitry Andric 
252*0fca6ea1SDimitry Andric static void collectAllocaUses(AllocaInst &Alloca,
253*0fca6ea1SDimitry Andric                               SmallVectorImpl<Use *> &Uses) {
254*0fca6ea1SDimitry Andric   SmallVector<Instruction *, 4> WorkList({&Alloca});
255*0fca6ea1SDimitry Andric   while (!WorkList.empty()) {
256*0fca6ea1SDimitry Andric     auto *Cur = WorkList.pop_back_val();
257*0fca6ea1SDimitry Andric     for (auto &U : Cur->uses()) {
258*0fca6ea1SDimitry Andric       Uses.push_back(&U);
259*0fca6ea1SDimitry Andric 
260*0fca6ea1SDimitry Andric       if (isa<GetElementPtrInst>(U.getUser()))
261*0fca6ea1SDimitry Andric         WorkList.push_back(cast<Instruction>(U.getUser()));
262*0fca6ea1SDimitry Andric     }
263*0fca6ea1SDimitry Andric   }
264*0fca6ea1SDimitry Andric }
265*0fca6ea1SDimitry Andric 
266*0fca6ea1SDimitry Andric void AMDGPUPromoteAllocaImpl::sortAllocasToPromote(
267*0fca6ea1SDimitry Andric     SmallVectorImpl<AllocaInst *> &Allocas) {
268*0fca6ea1SDimitry Andric   DenseMap<AllocaInst *, unsigned> Scores;
269*0fca6ea1SDimitry Andric 
270*0fca6ea1SDimitry Andric   for (auto *Alloca : Allocas) {
271*0fca6ea1SDimitry Andric     LLVM_DEBUG(dbgs() << "Scoring: " << *Alloca << "\n");
272*0fca6ea1SDimitry Andric     unsigned &Score = Scores[Alloca];
273*0fca6ea1SDimitry Andric     // Increment score by one for each user + a bonus for users within loops.
274*0fca6ea1SDimitry Andric     SmallVector<Use *, 8> Uses;
275*0fca6ea1SDimitry Andric     collectAllocaUses(*Alloca, Uses);
276*0fca6ea1SDimitry Andric     for (auto *U : Uses) {
277*0fca6ea1SDimitry Andric       Instruction *Inst = cast<Instruction>(U->getUser());
278*0fca6ea1SDimitry Andric       if (isa<GetElementPtrInst>(Inst))
279*0fca6ea1SDimitry Andric         continue;
280*0fca6ea1SDimitry Andric       unsigned UserScore =
281*0fca6ea1SDimitry Andric           1 + (LoopUserWeight * LI.getLoopDepth(Inst->getParent()));
282*0fca6ea1SDimitry Andric       LLVM_DEBUG(dbgs() << "  [+" << UserScore << "]:\t" << *Inst << "\n");
283*0fca6ea1SDimitry Andric       Score += UserScore;
284*0fca6ea1SDimitry Andric     }
285*0fca6ea1SDimitry Andric     LLVM_DEBUG(dbgs() << "  => Final Score:" << Score << "\n");
286*0fca6ea1SDimitry Andric   }
287*0fca6ea1SDimitry Andric 
288*0fca6ea1SDimitry Andric   stable_sort(Allocas, [&](AllocaInst *A, AllocaInst *B) {
289*0fca6ea1SDimitry Andric     return Scores.at(A) > Scores.at(B);
290*0fca6ea1SDimitry Andric   });
291*0fca6ea1SDimitry Andric 
292*0fca6ea1SDimitry Andric   // clang-format off
293*0fca6ea1SDimitry Andric   LLVM_DEBUG(
294*0fca6ea1SDimitry Andric     dbgs() << "Sorted Worklist:\n";
295*0fca6ea1SDimitry Andric     for (auto *A: Allocas)
296*0fca6ea1SDimitry Andric       dbgs() << "  " << *A << "\n";
297*0fca6ea1SDimitry Andric   );
298*0fca6ea1SDimitry Andric   // clang-format on
299*0fca6ea1SDimitry Andric }
300*0fca6ea1SDimitry Andric 
30106c3fb27SDimitry Andric bool AMDGPUPromoteAllocaImpl::run(Function &F, bool PromoteToLDS) {
302e8d8bef9SDimitry Andric   Mod = F.getParent();
303e8d8bef9SDimitry Andric   DL = &Mod->getDataLayout();
304e8d8bef9SDimitry Andric 
305e8d8bef9SDimitry Andric   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
3060b57cec5SDimitry Andric   if (!ST.isPromoteAllocaEnabled())
3070b57cec5SDimitry Andric     return false;
3080b57cec5SDimitry Andric 
30906c3fb27SDimitry Andric   MaxVGPRs = getMaxVGPRs(TM, F);
3105ffd83dbSDimitry Andric 
31106c3fb27SDimitry Andric   bool SufficientLDS = PromoteToLDS ? hasSufficientLocalMem(F) : false;
3120b57cec5SDimitry Andric 
313*0fca6ea1SDimitry Andric   // Use up to 1/4 of available register budget for vectorization.
314*0fca6ea1SDimitry Andric   // FIXME: Increase the limit for whole function budgets? Perhaps x2?
315*0fca6ea1SDimitry Andric   unsigned VectorizationBudget =
316*0fca6ea1SDimitry Andric       (PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
317*0fca6ea1SDimitry Andric                                   : (MaxVGPRs * 32)) /
318*0fca6ea1SDimitry Andric       4;
319*0fca6ea1SDimitry Andric 
3200b57cec5SDimitry Andric   SmallVector<AllocaInst *, 16> Allocas;
32106c3fb27SDimitry Andric   for (Instruction &I : F.getEntryBlock()) {
32206c3fb27SDimitry Andric     if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
32306c3fb27SDimitry Andric       // Array allocations are probably not worth handling, since an allocation
32406c3fb27SDimitry Andric       // of the array type is the canonical form.
32506c3fb27SDimitry Andric       if (!AI->isStaticAlloca() || AI->isArrayAllocation())
32606c3fb27SDimitry Andric         continue;
3270b57cec5SDimitry Andric       Allocas.push_back(AI);
3280b57cec5SDimitry Andric     }
32906c3fb27SDimitry Andric   }
3300b57cec5SDimitry Andric 
331*0fca6ea1SDimitry Andric   sortAllocasToPromote(Allocas);
332*0fca6ea1SDimitry Andric 
33306c3fb27SDimitry Andric   bool Changed = false;
3340b57cec5SDimitry Andric   for (AllocaInst *AI : Allocas) {
335*0fca6ea1SDimitry Andric     const unsigned AllocaCost = DL->getTypeSizeInBits(AI->getAllocatedType());
336*0fca6ea1SDimitry Andric     // First, check if we have enough budget to vectorize this alloca.
337*0fca6ea1SDimitry Andric     if (AllocaCost <= VectorizationBudget) {
338*0fca6ea1SDimitry Andric       // If we do, attempt vectorization, otherwise, fall through and try
339*0fca6ea1SDimitry Andric       // promoting to LDS instead.
340*0fca6ea1SDimitry Andric       if (tryPromoteAllocaToVector(*AI)) {
34106c3fb27SDimitry Andric         Changed = true;
342*0fca6ea1SDimitry Andric         assert((VectorizationBudget - AllocaCost) < VectorizationBudget &&
343*0fca6ea1SDimitry Andric                "Underflow!");
344*0fca6ea1SDimitry Andric         VectorizationBudget -= AllocaCost;
345*0fca6ea1SDimitry Andric         LLVM_DEBUG(dbgs() << "  Remaining vectorization budget:"
346*0fca6ea1SDimitry Andric                           << VectorizationBudget << "\n");
347*0fca6ea1SDimitry Andric         continue;
348*0fca6ea1SDimitry Andric       }
349*0fca6ea1SDimitry Andric     } else {
350*0fca6ea1SDimitry Andric       LLVM_DEBUG(dbgs() << "Alloca too big for vectorization (size:"
351*0fca6ea1SDimitry Andric                         << AllocaCost << ", budget:" << VectorizationBudget
352*0fca6ea1SDimitry Andric                         << "): " << *AI << "\n");
353*0fca6ea1SDimitry Andric     }
354*0fca6ea1SDimitry Andric 
355*0fca6ea1SDimitry Andric     if (PromoteToLDS && tryPromoteAllocaToLDS(*AI, SufficientLDS))
3560b57cec5SDimitry Andric       Changed = true;
3570b57cec5SDimitry Andric   }
3580b57cec5SDimitry Andric 
35906c3fb27SDimitry Andric   // NOTE: tryPromoteAllocaToVector removes the alloca, so Allocas contains
36006c3fb27SDimitry Andric   // dangling pointers. If we want to reuse it past this point, the loop above
36106c3fb27SDimitry Andric   // would need to be updated to remove successfully promoted allocas.
36206c3fb27SDimitry Andric 
3630b57cec5SDimitry Andric   return Changed;
3640b57cec5SDimitry Andric }
3650b57cec5SDimitry Andric 
36606c3fb27SDimitry Andric struct MemTransferInfo {
36706c3fb27SDimitry Andric   ConstantInt *SrcIndex = nullptr;
36806c3fb27SDimitry Andric   ConstantInt *DestIndex = nullptr;
36906c3fb27SDimitry Andric };
37006c3fb27SDimitry Andric 
37106c3fb27SDimitry Andric // Checks if the instruction I is a memset user of the alloca AI that we can
37206c3fb27SDimitry Andric // deal with. Currently, only non-volatile memsets that affect the whole alloca
37306c3fb27SDimitry Andric // are handled.
37406c3fb27SDimitry Andric static bool isSupportedMemset(MemSetInst *I, AllocaInst *AI,
37506c3fb27SDimitry Andric                               const DataLayout &DL) {
37606c3fb27SDimitry Andric   using namespace PatternMatch;
37706c3fb27SDimitry Andric   // For now we only care about non-volatile memsets that affect the whole type
37806c3fb27SDimitry Andric   // (start at index 0 and fill the whole alloca).
37906c3fb27SDimitry Andric   //
38006c3fb27SDimitry Andric   // TODO: Now that we moved to PromoteAlloca we could handle any memsets
38106c3fb27SDimitry Andric   // (except maybe volatile ones?) - we just need to use shufflevector if it
38206c3fb27SDimitry Andric   // only affects a subset of the vector.
38306c3fb27SDimitry Andric   const unsigned Size = DL.getTypeStoreSize(AI->getAllocatedType());
38406c3fb27SDimitry Andric   return I->getOperand(0) == AI &&
38506c3fb27SDimitry Andric          match(I->getOperand(2), m_SpecificInt(Size)) && !I->isVolatile();
38606c3fb27SDimitry Andric }
38706c3fb27SDimitry Andric 
38806c3fb27SDimitry Andric static Value *
38906c3fb27SDimitry Andric calculateVectorIndex(Value *Ptr,
39006c3fb27SDimitry Andric                      const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
39106c3fb27SDimitry Andric   auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
39206c3fb27SDimitry Andric   if (!GEP)
39306c3fb27SDimitry Andric     return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));
39406c3fb27SDimitry Andric 
39506c3fb27SDimitry Andric   auto I = GEPIdx.find(GEP);
39606c3fb27SDimitry Andric   assert(I != GEPIdx.end() && "Must have entry for GEP!");
39706c3fb27SDimitry Andric   return I->second;
39806c3fb27SDimitry Andric }
39906c3fb27SDimitry Andric 
40006c3fb27SDimitry Andric static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
40106c3fb27SDimitry Andric                                Type *VecElemTy, const DataLayout &DL) {
40206c3fb27SDimitry Andric   // TODO: Extracting a "multiple of X" from a GEP might be a useful generic
40306c3fb27SDimitry Andric   // helper.
40406c3fb27SDimitry Andric   unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
40506c3fb27SDimitry Andric   MapVector<Value *, APInt> VarOffsets;
40606c3fb27SDimitry Andric   APInt ConstOffset(BW, 0);
40706c3fb27SDimitry Andric   if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
40806c3fb27SDimitry Andric       !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
40906c3fb27SDimitry Andric     return nullptr;
41006c3fb27SDimitry Andric 
41106c3fb27SDimitry Andric   unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
41206c3fb27SDimitry Andric   if (VarOffsets.size() > 1)
41306c3fb27SDimitry Andric     return nullptr;
41406c3fb27SDimitry Andric 
41506c3fb27SDimitry Andric   if (VarOffsets.size() == 1) {
41606c3fb27SDimitry Andric     // Only handle cases where we don't need to insert extra arithmetic
41706c3fb27SDimitry Andric     // instructions.
41806c3fb27SDimitry Andric     const auto &VarOffset = VarOffsets.front();
41906c3fb27SDimitry Andric     if (!ConstOffset.isZero() || VarOffset.second != VecElemSize)
42006c3fb27SDimitry Andric       return nullptr;
42106c3fb27SDimitry Andric     return VarOffset.first;
42206c3fb27SDimitry Andric   }
42306c3fb27SDimitry Andric 
42406c3fb27SDimitry Andric   APInt Quot;
42506c3fb27SDimitry Andric   uint64_t Rem;
42606c3fb27SDimitry Andric   APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem);
42706c3fb27SDimitry Andric   if (Rem != 0)
42806c3fb27SDimitry Andric     return nullptr;
42906c3fb27SDimitry Andric 
43006c3fb27SDimitry Andric   return ConstantInt::get(GEP->getContext(), Quot);
43106c3fb27SDimitry Andric }
43206c3fb27SDimitry Andric 
43306c3fb27SDimitry Andric /// Promotes a single user of the alloca to a vector form.
43406c3fb27SDimitry Andric ///
43506c3fb27SDimitry Andric /// \param Inst           Instruction to be promoted.
43606c3fb27SDimitry Andric /// \param DL             Module Data Layout.
43706c3fb27SDimitry Andric /// \param VectorTy       Vectorized Type.
43806c3fb27SDimitry Andric /// \param VecStoreSize   Size of \p VectorTy in bytes.
43906c3fb27SDimitry Andric /// \param ElementSize    Size of \p VectorTy element type in bytes.
44006c3fb27SDimitry Andric /// \param TransferInfo   MemTransferInst info map.
44106c3fb27SDimitry Andric /// \param GEPVectorIdx   GEP -> VectorIdx cache.
44206c3fb27SDimitry Andric /// \param CurVal         Current value of the vector (e.g. last stored value)
44306c3fb27SDimitry Andric /// \param[out]  DeferredLoads \p Inst is added to this vector if it can't
44406c3fb27SDimitry Andric ///              be promoted now. This happens when promoting requires \p
44506c3fb27SDimitry Andric ///              CurVal, but \p CurVal is nullptr.
44606c3fb27SDimitry Andric /// \return the stored value if \p Inst would have written to the alloca, or
44706c3fb27SDimitry Andric ///         nullptr otherwise.
44806c3fb27SDimitry Andric static Value *promoteAllocaUserToVector(
44906c3fb27SDimitry Andric     Instruction *Inst, const DataLayout &DL, FixedVectorType *VectorTy,
45006c3fb27SDimitry Andric     unsigned VecStoreSize, unsigned ElementSize,
45106c3fb27SDimitry Andric     DenseMap<MemTransferInst *, MemTransferInfo> &TransferInfo,
45206c3fb27SDimitry Andric     std::map<GetElementPtrInst *, Value *> &GEPVectorIdx, Value *CurVal,
45306c3fb27SDimitry Andric     SmallVectorImpl<LoadInst *> &DeferredLoads) {
45406c3fb27SDimitry Andric   // Note: we use InstSimplifyFolder because it can leverage the DataLayout
45506c3fb27SDimitry Andric   // to do more folding, especially in the case of vector splats.
45606c3fb27SDimitry Andric   IRBuilder<InstSimplifyFolder> Builder(Inst->getContext(),
45706c3fb27SDimitry Andric                                         InstSimplifyFolder(DL));
45806c3fb27SDimitry Andric   Builder.SetInsertPoint(Inst);
45906c3fb27SDimitry Andric 
46006c3fb27SDimitry Andric   const auto GetOrLoadCurrentVectorValue = [&]() -> Value * {
46106c3fb27SDimitry Andric     if (CurVal)
46206c3fb27SDimitry Andric       return CurVal;
46306c3fb27SDimitry Andric 
46406c3fb27SDimitry Andric     // If the current value is not known, insert a dummy load and lower it on
46506c3fb27SDimitry Andric     // the second pass.
46606c3fb27SDimitry Andric     LoadInst *Dummy =
46706c3fb27SDimitry Andric         Builder.CreateLoad(VectorTy, PoisonValue::get(Builder.getPtrTy()),
46806c3fb27SDimitry Andric                            "promotealloca.dummyload");
46906c3fb27SDimitry Andric     DeferredLoads.push_back(Dummy);
47006c3fb27SDimitry Andric     return Dummy;
47106c3fb27SDimitry Andric   };
47206c3fb27SDimitry Andric 
47306c3fb27SDimitry Andric   const auto CreateTempPtrIntCast = [&Builder, DL](Value *Val,
47406c3fb27SDimitry Andric                                                    Type *PtrTy) -> Value * {
47506c3fb27SDimitry Andric     assert(DL.getTypeStoreSize(Val->getType()) == DL.getTypeStoreSize(PtrTy));
47606c3fb27SDimitry Andric     const unsigned Size = DL.getTypeStoreSizeInBits(PtrTy);
47706c3fb27SDimitry Andric     if (!PtrTy->isVectorTy())
47806c3fb27SDimitry Andric       return Builder.CreateBitOrPointerCast(Val, Builder.getIntNTy(Size));
47906c3fb27SDimitry Andric     const unsigned NumPtrElts = cast<FixedVectorType>(PtrTy)->getNumElements();
48006c3fb27SDimitry Andric     // If we want to cast to cast, e.g. a <2 x ptr> into a <4 x i32>, we need to
48106c3fb27SDimitry Andric     // first cast the ptr vector to <2 x i64>.
48206c3fb27SDimitry Andric     assert((Size % NumPtrElts == 0) && "Vector size not divisble");
48306c3fb27SDimitry Andric     Type *EltTy = Builder.getIntNTy(Size / NumPtrElts);
48406c3fb27SDimitry Andric     return Builder.CreateBitOrPointerCast(
48506c3fb27SDimitry Andric         Val, FixedVectorType::get(EltTy, NumPtrElts));
48606c3fb27SDimitry Andric   };
48706c3fb27SDimitry Andric 
48806c3fb27SDimitry Andric   Type *VecEltTy = VectorTy->getElementType();
4898a4dda33SDimitry Andric 
49006c3fb27SDimitry Andric   switch (Inst->getOpcode()) {
49106c3fb27SDimitry Andric   case Instruction::Load: {
49206c3fb27SDimitry Andric     // Loads can only be lowered if the value is known.
49306c3fb27SDimitry Andric     if (!CurVal) {
49406c3fb27SDimitry Andric       DeferredLoads.push_back(cast<LoadInst>(Inst));
49506c3fb27SDimitry Andric       return nullptr;
49606c3fb27SDimitry Andric     }
49706c3fb27SDimitry Andric 
49806c3fb27SDimitry Andric     Value *Index = calculateVectorIndex(
49906c3fb27SDimitry Andric         cast<LoadInst>(Inst)->getPointerOperand(), GEPVectorIdx);
50006c3fb27SDimitry Andric 
50106c3fb27SDimitry Andric     // We're loading the full vector.
50206c3fb27SDimitry Andric     Type *AccessTy = Inst->getType();
50306c3fb27SDimitry Andric     TypeSize AccessSize = DL.getTypeStoreSize(AccessTy);
504*0fca6ea1SDimitry Andric     if (Constant *CI = dyn_cast<Constant>(Index)) {
505*0fca6ea1SDimitry Andric       if (CI->isZeroValue() && AccessSize == VecStoreSize) {
50606c3fb27SDimitry Andric         if (AccessTy->isPtrOrPtrVectorTy())
50706c3fb27SDimitry Andric           CurVal = CreateTempPtrIntCast(CurVal, AccessTy);
50806c3fb27SDimitry Andric         else if (CurVal->getType()->isPtrOrPtrVectorTy())
50906c3fb27SDimitry Andric           CurVal = CreateTempPtrIntCast(CurVal, CurVal->getType());
51006c3fb27SDimitry Andric         Value *NewVal = Builder.CreateBitOrPointerCast(CurVal, AccessTy);
51106c3fb27SDimitry Andric         Inst->replaceAllUsesWith(NewVal);
51206c3fb27SDimitry Andric         return nullptr;
51306c3fb27SDimitry Andric       }
514*0fca6ea1SDimitry Andric     }
51506c3fb27SDimitry Andric 
51606c3fb27SDimitry Andric     // Loading a subvector.
51706c3fb27SDimitry Andric     if (isa<FixedVectorType>(AccessTy)) {
51806c3fb27SDimitry Andric       assert(AccessSize.isKnownMultipleOf(DL.getTypeStoreSize(VecEltTy)));
5198a4dda33SDimitry Andric       const unsigned NumLoadedElts = AccessSize / DL.getTypeStoreSize(VecEltTy);
5208a4dda33SDimitry Andric       auto *SubVecTy = FixedVectorType::get(VecEltTy, NumLoadedElts);
52106c3fb27SDimitry Andric       assert(DL.getTypeStoreSize(SubVecTy) == DL.getTypeStoreSize(AccessTy));
52206c3fb27SDimitry Andric 
52306c3fb27SDimitry Andric       Value *SubVec = PoisonValue::get(SubVecTy);
5248a4dda33SDimitry Andric       for (unsigned K = 0; K < NumLoadedElts; ++K) {
5255f757f3fSDimitry Andric         Value *CurIdx =
5265f757f3fSDimitry Andric             Builder.CreateAdd(Index, ConstantInt::get(Index->getType(), K));
52706c3fb27SDimitry Andric         SubVec = Builder.CreateInsertElement(
5285f757f3fSDimitry Andric             SubVec, Builder.CreateExtractElement(CurVal, CurIdx), K);
52906c3fb27SDimitry Andric       }
53006c3fb27SDimitry Andric 
53106c3fb27SDimitry Andric       if (AccessTy->isPtrOrPtrVectorTy())
53206c3fb27SDimitry Andric         SubVec = CreateTempPtrIntCast(SubVec, AccessTy);
53306c3fb27SDimitry Andric       else if (SubVecTy->isPtrOrPtrVectorTy())
53406c3fb27SDimitry Andric         SubVec = CreateTempPtrIntCast(SubVec, SubVecTy);
53506c3fb27SDimitry Andric 
53606c3fb27SDimitry Andric       SubVec = Builder.CreateBitOrPointerCast(SubVec, AccessTy);
53706c3fb27SDimitry Andric       Inst->replaceAllUsesWith(SubVec);
53806c3fb27SDimitry Andric       return nullptr;
53906c3fb27SDimitry Andric     }
54006c3fb27SDimitry Andric 
54106c3fb27SDimitry Andric     // We're loading one element.
54206c3fb27SDimitry Andric     Value *ExtractElement = Builder.CreateExtractElement(CurVal, Index);
54306c3fb27SDimitry Andric     if (AccessTy != VecEltTy)
54406c3fb27SDimitry Andric       ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, AccessTy);
54506c3fb27SDimitry Andric 
54606c3fb27SDimitry Andric     Inst->replaceAllUsesWith(ExtractElement);
54706c3fb27SDimitry Andric     return nullptr;
54806c3fb27SDimitry Andric   }
54906c3fb27SDimitry Andric   case Instruction::Store: {
55006c3fb27SDimitry Andric     // For stores, it's a bit trickier and it depends on whether we're storing
55106c3fb27SDimitry Andric     // the full vector or not. If we're storing the full vector, we don't need
55206c3fb27SDimitry Andric     // to know the current value. If this is a store of a single element, we
55306c3fb27SDimitry Andric     // need to know the value.
55406c3fb27SDimitry Andric     StoreInst *SI = cast<StoreInst>(Inst);
55506c3fb27SDimitry Andric     Value *Index = calculateVectorIndex(SI->getPointerOperand(), GEPVectorIdx);
55606c3fb27SDimitry Andric     Value *Val = SI->getValueOperand();
55706c3fb27SDimitry Andric 
55806c3fb27SDimitry Andric     // We're storing the full vector, we can handle this without knowing CurVal.
55906c3fb27SDimitry Andric     Type *AccessTy = Val->getType();
56006c3fb27SDimitry Andric     TypeSize AccessSize = DL.getTypeStoreSize(AccessTy);
561*0fca6ea1SDimitry Andric     if (Constant *CI = dyn_cast<Constant>(Index)) {
562*0fca6ea1SDimitry Andric       if (CI->isZeroValue() && AccessSize == VecStoreSize) {
56306c3fb27SDimitry Andric         if (AccessTy->isPtrOrPtrVectorTy())
56406c3fb27SDimitry Andric           Val = CreateTempPtrIntCast(Val, AccessTy);
56506c3fb27SDimitry Andric         else if (VectorTy->isPtrOrPtrVectorTy())
56606c3fb27SDimitry Andric           Val = CreateTempPtrIntCast(Val, VectorTy);
56706c3fb27SDimitry Andric         return Builder.CreateBitOrPointerCast(Val, VectorTy);
56806c3fb27SDimitry Andric       }
569*0fca6ea1SDimitry Andric     }
57006c3fb27SDimitry Andric 
57106c3fb27SDimitry Andric     // Storing a subvector.
57206c3fb27SDimitry Andric     if (isa<FixedVectorType>(AccessTy)) {
57306c3fb27SDimitry Andric       assert(AccessSize.isKnownMultipleOf(DL.getTypeStoreSize(VecEltTy)));
5748a4dda33SDimitry Andric       const unsigned NumWrittenElts =
5758a4dda33SDimitry Andric           AccessSize / DL.getTypeStoreSize(VecEltTy);
5765f757f3fSDimitry Andric       const unsigned NumVecElts = VectorTy->getNumElements();
5778a4dda33SDimitry Andric       auto *SubVecTy = FixedVectorType::get(VecEltTy, NumWrittenElts);
57806c3fb27SDimitry Andric       assert(DL.getTypeStoreSize(SubVecTy) == DL.getTypeStoreSize(AccessTy));
57906c3fb27SDimitry Andric 
58006c3fb27SDimitry Andric       if (SubVecTy->isPtrOrPtrVectorTy())
58106c3fb27SDimitry Andric         Val = CreateTempPtrIntCast(Val, SubVecTy);
58206c3fb27SDimitry Andric       else if (AccessTy->isPtrOrPtrVectorTy())
58306c3fb27SDimitry Andric         Val = CreateTempPtrIntCast(Val, AccessTy);
58406c3fb27SDimitry Andric 
58506c3fb27SDimitry Andric       Val = Builder.CreateBitOrPointerCast(Val, SubVecTy);
58606c3fb27SDimitry Andric 
58706c3fb27SDimitry Andric       Value *CurVec = GetOrLoadCurrentVectorValue();
5885f757f3fSDimitry Andric       for (unsigned K = 0, NumElts = std::min(NumWrittenElts, NumVecElts);
5895f757f3fSDimitry Andric            K < NumElts; ++K) {
5905f757f3fSDimitry Andric         Value *CurIdx =
5915f757f3fSDimitry Andric             Builder.CreateAdd(Index, ConstantInt::get(Index->getType(), K));
59206c3fb27SDimitry Andric         CurVec = Builder.CreateInsertElement(
5935f757f3fSDimitry Andric             CurVec, Builder.CreateExtractElement(Val, K), CurIdx);
59406c3fb27SDimitry Andric       }
59506c3fb27SDimitry Andric       return CurVec;
59606c3fb27SDimitry Andric     }
59706c3fb27SDimitry Andric 
59806c3fb27SDimitry Andric     if (Val->getType() != VecEltTy)
59906c3fb27SDimitry Andric       Val = Builder.CreateBitOrPointerCast(Val, VecEltTy);
60006c3fb27SDimitry Andric     return Builder.CreateInsertElement(GetOrLoadCurrentVectorValue(), Val,
60106c3fb27SDimitry Andric                                        Index);
60206c3fb27SDimitry Andric   }
60306c3fb27SDimitry Andric   case Instruction::Call: {
60406c3fb27SDimitry Andric     if (auto *MTI = dyn_cast<MemTransferInst>(Inst)) {
60506c3fb27SDimitry Andric       // For memcpy, we need to know curval.
60606c3fb27SDimitry Andric       ConstantInt *Length = cast<ConstantInt>(MTI->getLength());
60706c3fb27SDimitry Andric       unsigned NumCopied = Length->getZExtValue() / ElementSize;
60806c3fb27SDimitry Andric       MemTransferInfo *TI = &TransferInfo[MTI];
60906c3fb27SDimitry Andric       unsigned SrcBegin = TI->SrcIndex->getZExtValue();
61006c3fb27SDimitry Andric       unsigned DestBegin = TI->DestIndex->getZExtValue();
61106c3fb27SDimitry Andric 
61206c3fb27SDimitry Andric       SmallVector<int> Mask;
61306c3fb27SDimitry Andric       for (unsigned Idx = 0; Idx < VectorTy->getNumElements(); ++Idx) {
61406c3fb27SDimitry Andric         if (Idx >= DestBegin && Idx < DestBegin + NumCopied) {
61506c3fb27SDimitry Andric           Mask.push_back(SrcBegin++);
61606c3fb27SDimitry Andric         } else {
61706c3fb27SDimitry Andric           Mask.push_back(Idx);
61806c3fb27SDimitry Andric         }
61906c3fb27SDimitry Andric       }
62006c3fb27SDimitry Andric 
62106c3fb27SDimitry Andric       return Builder.CreateShuffleVector(GetOrLoadCurrentVectorValue(), Mask);
62206c3fb27SDimitry Andric     }
62306c3fb27SDimitry Andric 
62406c3fb27SDimitry Andric     if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
62506c3fb27SDimitry Andric       // For memset, we don't need to know the previous value because we
62606c3fb27SDimitry Andric       // currently only allow memsets that cover the whole alloca.
62706c3fb27SDimitry Andric       Value *Elt = MSI->getOperand(1);
628b3edf446SDimitry Andric       const unsigned BytesPerElt = DL.getTypeStoreSize(VecEltTy);
629b3edf446SDimitry Andric       if (BytesPerElt > 1) {
630b3edf446SDimitry Andric         Value *EltBytes = Builder.CreateVectorSplat(BytesPerElt, Elt);
631b3edf446SDimitry Andric 
632b3edf446SDimitry Andric         // If the element type of the vector is a pointer, we need to first cast
633b3edf446SDimitry Andric         // to an integer, then use a PtrCast.
634b3edf446SDimitry Andric         if (VecEltTy->isPointerTy()) {
635b3edf446SDimitry Andric           Type *PtrInt = Builder.getIntNTy(BytesPerElt * 8);
636b3edf446SDimitry Andric           Elt = Builder.CreateBitCast(EltBytes, PtrInt);
637b3edf446SDimitry Andric           Elt = Builder.CreateIntToPtr(Elt, VecEltTy);
638b3edf446SDimitry Andric         } else
63906c3fb27SDimitry Andric           Elt = Builder.CreateBitCast(EltBytes, VecEltTy);
64006c3fb27SDimitry Andric       }
64106c3fb27SDimitry Andric 
64206c3fb27SDimitry Andric       return Builder.CreateVectorSplat(VectorTy->getElementCount(), Elt);
64306c3fb27SDimitry Andric     }
64406c3fb27SDimitry Andric 
645cb14a3feSDimitry Andric     if (auto *Intr = dyn_cast<IntrinsicInst>(Inst)) {
646cb14a3feSDimitry Andric       if (Intr->getIntrinsicID() == Intrinsic::objectsize) {
647cb14a3feSDimitry Andric         Intr->replaceAllUsesWith(
648cb14a3feSDimitry Andric             Builder.getIntN(Intr->getType()->getIntegerBitWidth(),
649cb14a3feSDimitry Andric                             DL.getTypeAllocSize(VectorTy)));
650cb14a3feSDimitry Andric         return nullptr;
651cb14a3feSDimitry Andric       }
652cb14a3feSDimitry Andric     }
653cb14a3feSDimitry Andric 
65406c3fb27SDimitry Andric     llvm_unreachable("Unsupported call when promoting alloca to vector");
65506c3fb27SDimitry Andric   }
65606c3fb27SDimitry Andric 
65706c3fb27SDimitry Andric   default:
65806c3fb27SDimitry Andric     llvm_unreachable("Inconsistency in instructions promotable to vector");
65906c3fb27SDimitry Andric   }
66006c3fb27SDimitry Andric 
66106c3fb27SDimitry Andric   llvm_unreachable("Did not return after promoting instruction!");
66206c3fb27SDimitry Andric }
66306c3fb27SDimitry Andric 
66406c3fb27SDimitry Andric static bool isSupportedAccessType(FixedVectorType *VecTy, Type *AccessTy,
66506c3fb27SDimitry Andric                                   const DataLayout &DL) {
66606c3fb27SDimitry Andric   // Access as a vector type can work if the size of the access vector is a
66706c3fb27SDimitry Andric   // multiple of the size of the alloca's vector element type.
66806c3fb27SDimitry Andric   //
66906c3fb27SDimitry Andric   // Examples:
67006c3fb27SDimitry Andric   //    - VecTy = <8 x float>, AccessTy = <4 x float> -> OK
67106c3fb27SDimitry Andric   //    - VecTy = <4 x double>, AccessTy = <2 x float> -> OK
67206c3fb27SDimitry Andric   //    - VecTy = <4 x double>, AccessTy = <3 x float> -> NOT OK
67306c3fb27SDimitry Andric   //        - 3*32 is not a multiple of 64
67406c3fb27SDimitry Andric   //
67506c3fb27SDimitry Andric   // We could handle more complicated cases, but it'd make things a lot more
67606c3fb27SDimitry Andric   // complicated.
67706c3fb27SDimitry Andric   if (isa<FixedVectorType>(AccessTy)) {
67806c3fb27SDimitry Andric     TypeSize AccTS = DL.getTypeStoreSize(AccessTy);
67906c3fb27SDimitry Andric     TypeSize VecTS = DL.getTypeStoreSize(VecTy->getElementType());
68006c3fb27SDimitry Andric     return AccTS.isKnownMultipleOf(VecTS);
68106c3fb27SDimitry Andric   }
68206c3fb27SDimitry Andric 
68306c3fb27SDimitry Andric   return CastInst::isBitOrNoopPointerCastable(VecTy->getElementType(), AccessTy,
68406c3fb27SDimitry Andric                                               DL);
68506c3fb27SDimitry Andric }
68606c3fb27SDimitry Andric 
68706c3fb27SDimitry Andric /// Iterates over an instruction worklist that may contain multiple instructions
68806c3fb27SDimitry Andric /// from the same basic block, but in a different order.
68906c3fb27SDimitry Andric template <typename InstContainer>
69006c3fb27SDimitry Andric static void forEachWorkListItem(const InstContainer &WorkList,
69106c3fb27SDimitry Andric                                 std::function<void(Instruction *)> Fn) {
69206c3fb27SDimitry Andric   // Bucket up uses of the alloca by the block they occur in.
69306c3fb27SDimitry Andric   // This is important because we have to handle multiple defs/uses in a block
69406c3fb27SDimitry Andric   // ourselves: SSAUpdater is purely for cross-block references.
69506c3fb27SDimitry Andric   DenseMap<BasicBlock *, SmallDenseSet<Instruction *>> UsesByBlock;
69606c3fb27SDimitry Andric   for (Instruction *User : WorkList)
69706c3fb27SDimitry Andric     UsesByBlock[User->getParent()].insert(User);
69806c3fb27SDimitry Andric 
69906c3fb27SDimitry Andric   for (Instruction *User : WorkList) {
70006c3fb27SDimitry Andric     BasicBlock *BB = User->getParent();
70106c3fb27SDimitry Andric     auto &BlockUses = UsesByBlock[BB];
70206c3fb27SDimitry Andric 
70306c3fb27SDimitry Andric     // Already processed, skip.
70406c3fb27SDimitry Andric     if (BlockUses.empty())
70506c3fb27SDimitry Andric       continue;
70606c3fb27SDimitry Andric 
70706c3fb27SDimitry Andric     // Only user in the block, directly process it.
70806c3fb27SDimitry Andric     if (BlockUses.size() == 1) {
70906c3fb27SDimitry Andric       Fn(User);
71006c3fb27SDimitry Andric       continue;
71106c3fb27SDimitry Andric     }
71206c3fb27SDimitry Andric 
71306c3fb27SDimitry Andric     // Multiple users in the block, do a linear scan to see users in order.
71406c3fb27SDimitry Andric     for (Instruction &Inst : *BB) {
71506c3fb27SDimitry Andric       if (!BlockUses.contains(&Inst))
71606c3fb27SDimitry Andric         continue;
71706c3fb27SDimitry Andric 
71806c3fb27SDimitry Andric       Fn(&Inst);
71906c3fb27SDimitry Andric     }
72006c3fb27SDimitry Andric 
72106c3fb27SDimitry Andric     // Clear the block so we know it's been processed.
72206c3fb27SDimitry Andric     BlockUses.clear();
72306c3fb27SDimitry Andric   }
72406c3fb27SDimitry Andric }
72506c3fb27SDimitry Andric 
72606c3fb27SDimitry Andric // FIXME: Should try to pick the most likely to be profitable allocas first.
72706c3fb27SDimitry Andric bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
72806c3fb27SDimitry Andric   LLVM_DEBUG(dbgs() << "Trying to promote to vector: " << Alloca << '\n');
72906c3fb27SDimitry Andric 
73006c3fb27SDimitry Andric   if (DisablePromoteAllocaToVector) {
73106c3fb27SDimitry Andric     LLVM_DEBUG(dbgs() << "  Promote alloca to vector is disabled\n");
73206c3fb27SDimitry Andric     return false;
73306c3fb27SDimitry Andric   }
73406c3fb27SDimitry Andric 
73506c3fb27SDimitry Andric   Type *AllocaTy = Alloca.getAllocatedType();
73606c3fb27SDimitry Andric   auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
73706c3fb27SDimitry Andric   if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
73806c3fb27SDimitry Andric     if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
73906c3fb27SDimitry Andric         ArrayTy->getNumElements() > 0)
74006c3fb27SDimitry Andric       VectorTy = FixedVectorType::get(ArrayTy->getElementType(),
74106c3fb27SDimitry Andric                                       ArrayTy->getNumElements());
74206c3fb27SDimitry Andric   }
74306c3fb27SDimitry Andric 
74406c3fb27SDimitry Andric   // FIXME: There is no reason why we can't support larger arrays, we
74506c3fb27SDimitry Andric   // are just being conservative for now.
74606c3fb27SDimitry Andric   // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or
74706c3fb27SDimitry Andric   // equivalent. Potentially these could also be promoted but we don't currently
74806c3fb27SDimitry Andric   // handle this case
74906c3fb27SDimitry Andric   if (!VectorTy) {
75006c3fb27SDimitry Andric     LLVM_DEBUG(dbgs() << "  Cannot convert type to vector\n");
75106c3fb27SDimitry Andric     return false;
75206c3fb27SDimitry Andric   }
75306c3fb27SDimitry Andric 
75406c3fb27SDimitry Andric   if (VectorTy->getNumElements() > 16 || VectorTy->getNumElements() < 2) {
75506c3fb27SDimitry Andric     LLVM_DEBUG(dbgs() << "  " << *VectorTy
75606c3fb27SDimitry Andric                       << " has an unsupported number of elements\n");
75706c3fb27SDimitry Andric     return false;
75806c3fb27SDimitry Andric   }
75906c3fb27SDimitry Andric 
76006c3fb27SDimitry Andric   std::map<GetElementPtrInst *, Value *> GEPVectorIdx;
76106c3fb27SDimitry Andric   SmallVector<Instruction *> WorkList;
76206c3fb27SDimitry Andric   SmallVector<Instruction *> UsersToRemove;
76306c3fb27SDimitry Andric   SmallVector<Instruction *> DeferredInsts;
76406c3fb27SDimitry Andric   DenseMap<MemTransferInst *, MemTransferInfo> TransferInfo;
76506c3fb27SDimitry Andric 
76606c3fb27SDimitry Andric   const auto RejectUser = [&](Instruction *Inst, Twine Msg) {
76706c3fb27SDimitry Andric     LLVM_DEBUG(dbgs() << "  Cannot promote alloca to vector: " << Msg << "\n"
76806c3fb27SDimitry Andric                       << "    " << *Inst << "\n");
76906c3fb27SDimitry Andric     return false;
77006c3fb27SDimitry Andric   };
77106c3fb27SDimitry Andric 
772*0fca6ea1SDimitry Andric   SmallVector<Use *, 8> Uses;
773*0fca6ea1SDimitry Andric   collectAllocaUses(Alloca, Uses);
77406c3fb27SDimitry Andric 
77506c3fb27SDimitry Andric   LLVM_DEBUG(dbgs() << "  Attempting promotion to: " << *VectorTy << "\n");
77606c3fb27SDimitry Andric 
77706c3fb27SDimitry Andric   Type *VecEltTy = VectorTy->getElementType();
77806c3fb27SDimitry Andric   unsigned ElementSize = DL->getTypeSizeInBits(VecEltTy) / 8;
779*0fca6ea1SDimitry Andric   for (auto *U : Uses) {
78006c3fb27SDimitry Andric     Instruction *Inst = cast<Instruction>(U->getUser());
78106c3fb27SDimitry Andric 
78206c3fb27SDimitry Andric     if (Value *Ptr = getLoadStorePointerOperand(Inst)) {
78306c3fb27SDimitry Andric       // This is a store of the pointer, not to the pointer.
78406c3fb27SDimitry Andric       if (isa<StoreInst>(Inst) &&
78506c3fb27SDimitry Andric           U->getOperandNo() != StoreInst::getPointerOperandIndex())
78606c3fb27SDimitry Andric         return RejectUser(Inst, "pointer is being stored");
78706c3fb27SDimitry Andric 
78806c3fb27SDimitry Andric       Type *AccessTy = getLoadStoreType(Inst);
78906c3fb27SDimitry Andric       if (AccessTy->isAggregateType())
79006c3fb27SDimitry Andric         return RejectUser(Inst, "unsupported load/store as aggregate");
79106c3fb27SDimitry Andric       assert(!AccessTy->isAggregateType() || AccessTy->isArrayTy());
79206c3fb27SDimitry Andric 
7935f757f3fSDimitry Andric       // Check that this is a simple access of a vector element.
7945f757f3fSDimitry Andric       bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
7955f757f3fSDimitry Andric                                           : cast<StoreInst>(Inst)->isSimple();
7965f757f3fSDimitry Andric       if (!IsSimple)
7975f757f3fSDimitry Andric         return RejectUser(Inst, "not a simple load or store");
7985f757f3fSDimitry Andric 
79906c3fb27SDimitry Andric       Ptr = Ptr->stripPointerCasts();
80006c3fb27SDimitry Andric 
80106c3fb27SDimitry Andric       // Alloca already accessed as vector.
80206c3fb27SDimitry Andric       if (Ptr == &Alloca && DL->getTypeStoreSize(Alloca.getAllocatedType()) ==
80306c3fb27SDimitry Andric                                 DL->getTypeStoreSize(AccessTy)) {
80406c3fb27SDimitry Andric         WorkList.push_back(Inst);
80506c3fb27SDimitry Andric         continue;
80606c3fb27SDimitry Andric       }
80706c3fb27SDimitry Andric 
80806c3fb27SDimitry Andric       if (!isSupportedAccessType(VectorTy, AccessTy, *DL))
80906c3fb27SDimitry Andric         return RejectUser(Inst, "not a supported access type");
81006c3fb27SDimitry Andric 
81106c3fb27SDimitry Andric       WorkList.push_back(Inst);
81206c3fb27SDimitry Andric       continue;
81306c3fb27SDimitry Andric     }
81406c3fb27SDimitry Andric 
81506c3fb27SDimitry Andric     if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
81606c3fb27SDimitry Andric       // If we can't compute a vector index from this GEP, then we can't
81706c3fb27SDimitry Andric       // promote this alloca to vector.
81806c3fb27SDimitry Andric       Value *Index = GEPToVectorIndex(GEP, &Alloca, VecEltTy, *DL);
81906c3fb27SDimitry Andric       if (!Index)
82006c3fb27SDimitry Andric         return RejectUser(Inst, "cannot compute vector index for GEP");
82106c3fb27SDimitry Andric 
82206c3fb27SDimitry Andric       GEPVectorIdx[GEP] = Index;
82306c3fb27SDimitry Andric       UsersToRemove.push_back(Inst);
82406c3fb27SDimitry Andric       continue;
82506c3fb27SDimitry Andric     }
82606c3fb27SDimitry Andric 
82706c3fb27SDimitry Andric     if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst);
82806c3fb27SDimitry Andric         MSI && isSupportedMemset(MSI, &Alloca, *DL)) {
82906c3fb27SDimitry Andric       WorkList.push_back(Inst);
83006c3fb27SDimitry Andric       continue;
83106c3fb27SDimitry Andric     }
83206c3fb27SDimitry Andric 
83306c3fb27SDimitry Andric     if (MemTransferInst *TransferInst = dyn_cast<MemTransferInst>(Inst)) {
83406c3fb27SDimitry Andric       if (TransferInst->isVolatile())
83506c3fb27SDimitry Andric         return RejectUser(Inst, "mem transfer inst is volatile");
83606c3fb27SDimitry Andric 
83706c3fb27SDimitry Andric       ConstantInt *Len = dyn_cast<ConstantInt>(TransferInst->getLength());
83806c3fb27SDimitry Andric       if (!Len || (Len->getZExtValue() % ElementSize))
83906c3fb27SDimitry Andric         return RejectUser(Inst, "mem transfer inst length is non-constant or "
84006c3fb27SDimitry Andric                                 "not a multiple of the vector element size");
84106c3fb27SDimitry Andric 
84206c3fb27SDimitry Andric       if (!TransferInfo.count(TransferInst)) {
84306c3fb27SDimitry Andric         DeferredInsts.push_back(Inst);
84406c3fb27SDimitry Andric         WorkList.push_back(Inst);
84506c3fb27SDimitry Andric         TransferInfo[TransferInst] = MemTransferInfo();
84606c3fb27SDimitry Andric       }
84706c3fb27SDimitry Andric 
84806c3fb27SDimitry Andric       auto getPointerIndexOfAlloca = [&](Value *Ptr) -> ConstantInt * {
84906c3fb27SDimitry Andric         GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
85006c3fb27SDimitry Andric         if (Ptr != &Alloca && !GEPVectorIdx.count(GEP))
85106c3fb27SDimitry Andric           return nullptr;
85206c3fb27SDimitry Andric 
85306c3fb27SDimitry Andric         return dyn_cast<ConstantInt>(calculateVectorIndex(Ptr, GEPVectorIdx));
85406c3fb27SDimitry Andric       };
85506c3fb27SDimitry Andric 
85606c3fb27SDimitry Andric       unsigned OpNum = U->getOperandNo();
85706c3fb27SDimitry Andric       MemTransferInfo *TI = &TransferInfo[TransferInst];
85806c3fb27SDimitry Andric       if (OpNum == 0) {
85906c3fb27SDimitry Andric         Value *Dest = TransferInst->getDest();
86006c3fb27SDimitry Andric         ConstantInt *Index = getPointerIndexOfAlloca(Dest);
86106c3fb27SDimitry Andric         if (!Index)
86206c3fb27SDimitry Andric           return RejectUser(Inst, "could not calculate constant dest index");
86306c3fb27SDimitry Andric         TI->DestIndex = Index;
86406c3fb27SDimitry Andric       } else {
86506c3fb27SDimitry Andric         assert(OpNum == 1);
86606c3fb27SDimitry Andric         Value *Src = TransferInst->getSource();
86706c3fb27SDimitry Andric         ConstantInt *Index = getPointerIndexOfAlloca(Src);
86806c3fb27SDimitry Andric         if (!Index)
86906c3fb27SDimitry Andric           return RejectUser(Inst, "could not calculate constant src index");
87006c3fb27SDimitry Andric         TI->SrcIndex = Index;
87106c3fb27SDimitry Andric       }
87206c3fb27SDimitry Andric       continue;
87306c3fb27SDimitry Andric     }
87406c3fb27SDimitry Andric 
875cb14a3feSDimitry Andric     if (auto *Intr = dyn_cast<IntrinsicInst>(Inst)) {
876cb14a3feSDimitry Andric       if (Intr->getIntrinsicID() == Intrinsic::objectsize) {
877cb14a3feSDimitry Andric         WorkList.push_back(Inst);
878cb14a3feSDimitry Andric         continue;
879cb14a3feSDimitry Andric       }
880cb14a3feSDimitry Andric     }
881cb14a3feSDimitry Andric 
88206c3fb27SDimitry Andric     // Ignore assume-like intrinsics and comparisons used in assumes.
88306c3fb27SDimitry Andric     if (isAssumeLikeIntrinsic(Inst)) {
884cb14a3feSDimitry Andric       if (!Inst->use_empty())
885cb14a3feSDimitry Andric         return RejectUser(Inst, "assume-like intrinsic cannot have any users");
88606c3fb27SDimitry Andric       UsersToRemove.push_back(Inst);
88706c3fb27SDimitry Andric       continue;
88806c3fb27SDimitry Andric     }
88906c3fb27SDimitry Andric 
89006c3fb27SDimitry Andric     if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) {
89106c3fb27SDimitry Andric           return isAssumeLikeIntrinsic(cast<Instruction>(U));
89206c3fb27SDimitry Andric         })) {
89306c3fb27SDimitry Andric       UsersToRemove.push_back(Inst);
89406c3fb27SDimitry Andric       continue;
89506c3fb27SDimitry Andric     }
89606c3fb27SDimitry Andric 
89706c3fb27SDimitry Andric     return RejectUser(Inst, "unhandled alloca user");
89806c3fb27SDimitry Andric   }
89906c3fb27SDimitry Andric 
90006c3fb27SDimitry Andric   while (!DeferredInsts.empty()) {
90106c3fb27SDimitry Andric     Instruction *Inst = DeferredInsts.pop_back_val();
90206c3fb27SDimitry Andric     MemTransferInst *TransferInst = cast<MemTransferInst>(Inst);
90306c3fb27SDimitry Andric     // TODO: Support the case if the pointers are from different alloca or
90406c3fb27SDimitry Andric     // from different address spaces.
90506c3fb27SDimitry Andric     MemTransferInfo &Info = TransferInfo[TransferInst];
90606c3fb27SDimitry Andric     if (!Info.SrcIndex || !Info.DestIndex)
90706c3fb27SDimitry Andric       return RejectUser(
90806c3fb27SDimitry Andric           Inst, "mem transfer inst is missing constant src and/or dst index");
90906c3fb27SDimitry Andric   }
91006c3fb27SDimitry Andric 
91106c3fb27SDimitry Andric   LLVM_DEBUG(dbgs() << "  Converting alloca to vector " << *AllocaTy << " -> "
91206c3fb27SDimitry Andric                     << *VectorTy << '\n');
91306c3fb27SDimitry Andric   const unsigned VecStoreSize = DL->getTypeStoreSize(VectorTy);
91406c3fb27SDimitry Andric 
91506c3fb27SDimitry Andric   // Alloca is uninitialized memory. Imitate that by making the first value
91606c3fb27SDimitry Andric   // undef.
91706c3fb27SDimitry Andric   SSAUpdater Updater;
91806c3fb27SDimitry Andric   Updater.Initialize(VectorTy, "promotealloca");
91906c3fb27SDimitry Andric   Updater.AddAvailableValue(Alloca.getParent(), UndefValue::get(VectorTy));
92006c3fb27SDimitry Andric 
92106c3fb27SDimitry Andric   // First handle the initial worklist.
92206c3fb27SDimitry Andric   SmallVector<LoadInst *, 4> DeferredLoads;
92306c3fb27SDimitry Andric   forEachWorkListItem(WorkList, [&](Instruction *I) {
92406c3fb27SDimitry Andric     BasicBlock *BB = I->getParent();
92506c3fb27SDimitry Andric     // On the first pass, we only take values that are trivially known, i.e.
92606c3fb27SDimitry Andric     // where AddAvailableValue was already called in this block.
92706c3fb27SDimitry Andric     Value *Result = promoteAllocaUserToVector(
92806c3fb27SDimitry Andric         I, *DL, VectorTy, VecStoreSize, ElementSize, TransferInfo, GEPVectorIdx,
92906c3fb27SDimitry Andric         Updater.FindValueForBlock(BB), DeferredLoads);
93006c3fb27SDimitry Andric     if (Result)
93106c3fb27SDimitry Andric       Updater.AddAvailableValue(BB, Result);
93206c3fb27SDimitry Andric   });
93306c3fb27SDimitry Andric 
93406c3fb27SDimitry Andric   // Then handle deferred loads.
93506c3fb27SDimitry Andric   forEachWorkListItem(DeferredLoads, [&](Instruction *I) {
93606c3fb27SDimitry Andric     SmallVector<LoadInst *, 0> NewDLs;
93706c3fb27SDimitry Andric     BasicBlock *BB = I->getParent();
93806c3fb27SDimitry Andric     // On the second pass, we use GetValueInMiddleOfBlock to guarantee we always
93906c3fb27SDimitry Andric     // get a value, inserting PHIs as needed.
94006c3fb27SDimitry Andric     Value *Result = promoteAllocaUserToVector(
94106c3fb27SDimitry Andric         I, *DL, VectorTy, VecStoreSize, ElementSize, TransferInfo, GEPVectorIdx,
94206c3fb27SDimitry Andric         Updater.GetValueInMiddleOfBlock(I->getParent()), NewDLs);
94306c3fb27SDimitry Andric     if (Result)
94406c3fb27SDimitry Andric       Updater.AddAvailableValue(BB, Result);
94506c3fb27SDimitry Andric     assert(NewDLs.empty() && "No more deferred loads should be queued!");
94606c3fb27SDimitry Andric   });
94706c3fb27SDimitry Andric 
94806c3fb27SDimitry Andric   // Delete all instructions. On the first pass, new dummy loads may have been
94906c3fb27SDimitry Andric   // added so we need to collect them too.
95006c3fb27SDimitry Andric   DenseSet<Instruction *> InstsToDelete(WorkList.begin(), WorkList.end());
95106c3fb27SDimitry Andric   InstsToDelete.insert(DeferredLoads.begin(), DeferredLoads.end());
95206c3fb27SDimitry Andric   for (Instruction *I : InstsToDelete) {
95306c3fb27SDimitry Andric     assert(I->use_empty());
95406c3fb27SDimitry Andric     I->eraseFromParent();
95506c3fb27SDimitry Andric   }
95606c3fb27SDimitry Andric 
95706c3fb27SDimitry Andric   // Delete all the users that are known to be removeable.
95806c3fb27SDimitry Andric   for (Instruction *I : reverse(UsersToRemove)) {
95906c3fb27SDimitry Andric     I->dropDroppableUses();
96006c3fb27SDimitry Andric     assert(I->use_empty());
96106c3fb27SDimitry Andric     I->eraseFromParent();
96206c3fb27SDimitry Andric   }
96306c3fb27SDimitry Andric 
96406c3fb27SDimitry Andric   // Alloca should now be dead too.
96506c3fb27SDimitry Andric   assert(Alloca.use_empty());
96606c3fb27SDimitry Andric   Alloca.eraseFromParent();
96706c3fb27SDimitry Andric   return true;
96806c3fb27SDimitry Andric }
96906c3fb27SDimitry Andric 
9700b57cec5SDimitry Andric std::pair<Value *, Value *>
971e8d8bef9SDimitry Andric AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
972349cc55cSDimitry Andric   Function &F = *Builder.GetInsertBlock()->getParent();
973e8d8bef9SDimitry Andric   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
9740b57cec5SDimitry Andric 
9750b57cec5SDimitry Andric   if (!IsAMDHSA) {
97606c3fb27SDimitry Andric     Function *LocalSizeYFn =
97706c3fb27SDimitry Andric         Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
97806c3fb27SDimitry Andric     Function *LocalSizeZFn =
97906c3fb27SDimitry Andric         Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
9800b57cec5SDimitry Andric 
9810b57cec5SDimitry Andric     CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
9820b57cec5SDimitry Andric     CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
9830b57cec5SDimitry Andric 
9840b57cec5SDimitry Andric     ST.makeLIDRangeMetadata(LocalSizeY);
9850b57cec5SDimitry Andric     ST.makeLIDRangeMetadata(LocalSizeZ);
9860b57cec5SDimitry Andric 
987bdd1243dSDimitry Andric     return std::pair(LocalSizeY, LocalSizeZ);
9880b57cec5SDimitry Andric   }
9890b57cec5SDimitry Andric 
9900b57cec5SDimitry Andric   // We must read the size out of the dispatch pointer.
9910b57cec5SDimitry Andric   assert(IsAMDGCN);
9920b57cec5SDimitry Andric 
9930b57cec5SDimitry Andric   // We are indexing into this struct, and want to extract the workgroup_size_*
9940b57cec5SDimitry Andric   // fields.
9950b57cec5SDimitry Andric   //
9960b57cec5SDimitry Andric   //   typedef struct hsa_kernel_dispatch_packet_s {
9970b57cec5SDimitry Andric   //     uint16_t header;
9980b57cec5SDimitry Andric   //     uint16_t setup;
9990b57cec5SDimitry Andric   //     uint16_t workgroup_size_x ;
10000b57cec5SDimitry Andric   //     uint16_t workgroup_size_y;
10010b57cec5SDimitry Andric   //     uint16_t workgroup_size_z;
10020b57cec5SDimitry Andric   //     uint16_t reserved0;
10030b57cec5SDimitry Andric   //     uint32_t grid_size_x ;
10040b57cec5SDimitry Andric   //     uint32_t grid_size_y ;
10050b57cec5SDimitry Andric   //     uint32_t grid_size_z;
10060b57cec5SDimitry Andric   //
10070b57cec5SDimitry Andric   //     uint32_t private_segment_size;
10080b57cec5SDimitry Andric   //     uint32_t group_segment_size;
10090b57cec5SDimitry Andric   //     uint64_t kernel_object;
10100b57cec5SDimitry Andric   //
10110b57cec5SDimitry Andric   // #ifdef HSA_LARGE_MODEL
10120b57cec5SDimitry Andric   //     void *kernarg_address;
10130b57cec5SDimitry Andric   // #elif defined HSA_LITTLE_ENDIAN
10140b57cec5SDimitry Andric   //     void *kernarg_address;
10150b57cec5SDimitry Andric   //     uint32_t reserved1;
10160b57cec5SDimitry Andric   // #else
10170b57cec5SDimitry Andric   //     uint32_t reserved1;
10180b57cec5SDimitry Andric   //     void *kernarg_address;
10190b57cec5SDimitry Andric   // #endif
10200b57cec5SDimitry Andric   //     uint64_t reserved2;
10210b57cec5SDimitry Andric   //     hsa_signal_t completion_signal; // uint64_t wrapper
10220b57cec5SDimitry Andric   //   } hsa_kernel_dispatch_packet_t
10230b57cec5SDimitry Andric   //
102406c3fb27SDimitry Andric   Function *DispatchPtrFn =
102506c3fb27SDimitry Andric       Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
10260b57cec5SDimitry Andric 
10270b57cec5SDimitry Andric   CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
1028349cc55cSDimitry Andric   DispatchPtr->addRetAttr(Attribute::NoAlias);
1029349cc55cSDimitry Andric   DispatchPtr->addRetAttr(Attribute::NonNull);
1030349cc55cSDimitry Andric   F.removeFnAttr("amdgpu-no-dispatch-ptr");
10310b57cec5SDimitry Andric 
10320b57cec5SDimitry Andric   // Size of the dispatch packet struct.
1033349cc55cSDimitry Andric   DispatchPtr->addDereferenceableRetAttr(64);
10340b57cec5SDimitry Andric 
10350b57cec5SDimitry Andric   Type *I32Ty = Type::getInt32Ty(Mod->getContext());
10360b57cec5SDimitry Andric   Value *CastDispatchPtr = Builder.CreateBitCast(
10370b57cec5SDimitry Andric       DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
10380b57cec5SDimitry Andric 
10390b57cec5SDimitry Andric   // We could do a single 64-bit load here, but it's likely that the basic
10400b57cec5SDimitry Andric   // 32-bit and extract sequence is already present, and it is probably easier
1041349cc55cSDimitry Andric   // to CSE this. The loads should be mergeable later anyway.
10420b57cec5SDimitry Andric   Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
10435ffd83dbSDimitry Andric   LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
10440b57cec5SDimitry Andric 
10450b57cec5SDimitry Andric   Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
10465ffd83dbSDimitry Andric   LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
10470b57cec5SDimitry Andric 
1048bdd1243dSDimitry Andric   MDNode *MD = MDNode::get(Mod->getContext(), std::nullopt);
10490b57cec5SDimitry Andric   LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
10500b57cec5SDimitry Andric   LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
10510b57cec5SDimitry Andric   ST.makeLIDRangeMetadata(LoadZU);
10520b57cec5SDimitry Andric 
10530b57cec5SDimitry Andric   // Extract y component. Upper half of LoadZU should be zero already.
10540b57cec5SDimitry Andric   Value *Y = Builder.CreateLShr(LoadXY, 16);
10550b57cec5SDimitry Andric 
1056bdd1243dSDimitry Andric   return std::pair(Y, LoadZU);
10570b57cec5SDimitry Andric }
10580b57cec5SDimitry Andric 
1059e8d8bef9SDimitry Andric Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
1060e8d8bef9SDimitry Andric                                               unsigned N) {
1061349cc55cSDimitry Andric   Function *F = Builder.GetInsertBlock()->getParent();
1062349cc55cSDimitry Andric   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F);
1063480093f4SDimitry Andric   Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
1064349cc55cSDimitry Andric   StringRef AttrName;
10650b57cec5SDimitry Andric 
10660b57cec5SDimitry Andric   switch (N) {
10670b57cec5SDimitry Andric   case 0:
1068480093f4SDimitry Andric     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
1069480093f4SDimitry Andric                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
1070349cc55cSDimitry Andric     AttrName = "amdgpu-no-workitem-id-x";
10710b57cec5SDimitry Andric     break;
10720b57cec5SDimitry Andric   case 1:
1073480093f4SDimitry Andric     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
1074480093f4SDimitry Andric                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
1075349cc55cSDimitry Andric     AttrName = "amdgpu-no-workitem-id-y";
10760b57cec5SDimitry Andric     break;
10770b57cec5SDimitry Andric 
10780b57cec5SDimitry Andric   case 2:
1079480093f4SDimitry Andric     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
1080480093f4SDimitry Andric                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
1081349cc55cSDimitry Andric     AttrName = "amdgpu-no-workitem-id-z";
10820b57cec5SDimitry Andric     break;
10830b57cec5SDimitry Andric   default:
10840b57cec5SDimitry Andric     llvm_unreachable("invalid dimension");
10850b57cec5SDimitry Andric   }
10860b57cec5SDimitry Andric 
10870b57cec5SDimitry Andric   Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
10880b57cec5SDimitry Andric   CallInst *CI = Builder.CreateCall(WorkitemIdFn);
10890b57cec5SDimitry Andric   ST.makeLIDRangeMetadata(CI);
1090349cc55cSDimitry Andric   F->removeFnAttr(AttrName);
10910b57cec5SDimitry Andric 
10920b57cec5SDimitry Andric   return CI;
10930b57cec5SDimitry Andric }
10940b57cec5SDimitry Andric 
10950b57cec5SDimitry Andric static bool isCallPromotable(CallInst *CI) {
10960b57cec5SDimitry Andric   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
10970b57cec5SDimitry Andric   if (!II)
10980b57cec5SDimitry Andric     return false;
10990b57cec5SDimitry Andric 
11000b57cec5SDimitry Andric   switch (II->getIntrinsicID()) {
11010b57cec5SDimitry Andric   case Intrinsic::memcpy:
11020b57cec5SDimitry Andric   case Intrinsic::memmove:
11030b57cec5SDimitry Andric   case Intrinsic::memset:
11040b57cec5SDimitry Andric   case Intrinsic::lifetime_start:
11050b57cec5SDimitry Andric   case Intrinsic::lifetime_end:
11060b57cec5SDimitry Andric   case Intrinsic::invariant_start:
11070b57cec5SDimitry Andric   case Intrinsic::invariant_end:
11080b57cec5SDimitry Andric   case Intrinsic::launder_invariant_group:
11090b57cec5SDimitry Andric   case Intrinsic::strip_invariant_group:
11100b57cec5SDimitry Andric   case Intrinsic::objectsize:
11110b57cec5SDimitry Andric     return true;
11120b57cec5SDimitry Andric   default:
11130b57cec5SDimitry Andric     return false;
11140b57cec5SDimitry Andric   }
11150b57cec5SDimitry Andric }
11160b57cec5SDimitry Andric 
1117e8d8bef9SDimitry Andric bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
1118e8d8bef9SDimitry Andric     Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
11190b57cec5SDimitry Andric     int OpIdx1) const {
11200b57cec5SDimitry Andric   // Figure out which operand is the one we might not be promoting.
11210b57cec5SDimitry Andric   Value *OtherOp = Inst->getOperand(OpIdx0);
11220b57cec5SDimitry Andric   if (Val == OtherOp)
11230b57cec5SDimitry Andric     OtherOp = Inst->getOperand(OpIdx1);
11240b57cec5SDimitry Andric 
11250b57cec5SDimitry Andric   if (isa<ConstantPointerNull>(OtherOp))
11260b57cec5SDimitry Andric     return true;
11270b57cec5SDimitry Andric 
1128e8d8bef9SDimitry Andric   Value *OtherObj = getUnderlyingObject(OtherOp);
11290b57cec5SDimitry Andric   if (!isa<AllocaInst>(OtherObj))
11300b57cec5SDimitry Andric     return false;
11310b57cec5SDimitry Andric 
11320b57cec5SDimitry Andric   // TODO: We should be able to replace undefs with the right pointer type.
11330b57cec5SDimitry Andric 
11340b57cec5SDimitry Andric   // TODO: If we know the other base object is another promotable
11350b57cec5SDimitry Andric   // alloca, not necessarily this alloca, we can do this. The
11360b57cec5SDimitry Andric   // important part is both must have the same address space at
11370b57cec5SDimitry Andric   // the end.
11380b57cec5SDimitry Andric   if (OtherObj != BaseAlloca) {
11390b57cec5SDimitry Andric     LLVM_DEBUG(
11400b57cec5SDimitry Andric         dbgs() << "Found a binary instruction with another alloca object\n");
11410b57cec5SDimitry Andric     return false;
11420b57cec5SDimitry Andric   }
11430b57cec5SDimitry Andric 
11440b57cec5SDimitry Andric   return true;
11450b57cec5SDimitry Andric }
11460b57cec5SDimitry Andric 
1147e8d8bef9SDimitry Andric bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
1148e8d8bef9SDimitry Andric     Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
11490b57cec5SDimitry Andric 
11500b57cec5SDimitry Andric   for (User *User : Val->users()) {
11510b57cec5SDimitry Andric     if (is_contained(WorkList, User))
11520b57cec5SDimitry Andric       continue;
11530b57cec5SDimitry Andric 
11540b57cec5SDimitry Andric     if (CallInst *CI = dyn_cast<CallInst>(User)) {
11550b57cec5SDimitry Andric       if (!isCallPromotable(CI))
11560b57cec5SDimitry Andric         return false;
11570b57cec5SDimitry Andric 
11580b57cec5SDimitry Andric       WorkList.push_back(User);
11590b57cec5SDimitry Andric       continue;
11600b57cec5SDimitry Andric     }
11610b57cec5SDimitry Andric 
11620b57cec5SDimitry Andric     Instruction *UseInst = cast<Instruction>(User);
11630b57cec5SDimitry Andric     if (UseInst->getOpcode() == Instruction::PtrToInt)
11640b57cec5SDimitry Andric       return false;
11650b57cec5SDimitry Andric 
11660b57cec5SDimitry Andric     if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
11670b57cec5SDimitry Andric       if (LI->isVolatile())
11680b57cec5SDimitry Andric         return false;
11690b57cec5SDimitry Andric 
11700b57cec5SDimitry Andric       continue;
11710b57cec5SDimitry Andric     }
11720b57cec5SDimitry Andric 
11730b57cec5SDimitry Andric     if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
11740b57cec5SDimitry Andric       if (SI->isVolatile())
11750b57cec5SDimitry Andric         return false;
11760b57cec5SDimitry Andric 
11770b57cec5SDimitry Andric       // Reject if the stored value is not the pointer operand.
11780b57cec5SDimitry Andric       if (SI->getPointerOperand() != Val)
11790b57cec5SDimitry Andric         return false;
11800b57cec5SDimitry Andric     } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
11810b57cec5SDimitry Andric       if (RMW->isVolatile())
11820b57cec5SDimitry Andric         return false;
11830b57cec5SDimitry Andric     } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
11840b57cec5SDimitry Andric       if (CAS->isVolatile())
11850b57cec5SDimitry Andric         return false;
11860b57cec5SDimitry Andric     }
11870b57cec5SDimitry Andric 
11880b57cec5SDimitry Andric     // Only promote a select if we know that the other select operand
11890b57cec5SDimitry Andric     // is from another pointer that will also be promoted.
11900b57cec5SDimitry Andric     if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
11910b57cec5SDimitry Andric       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
11920b57cec5SDimitry Andric         return false;
11930b57cec5SDimitry Andric 
11940b57cec5SDimitry Andric       // May need to rewrite constant operands.
11950b57cec5SDimitry Andric       WorkList.push_back(ICmp);
11960b57cec5SDimitry Andric     }
11970b57cec5SDimitry Andric 
11980b57cec5SDimitry Andric     if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
11990b57cec5SDimitry Andric       // Give up if the pointer may be captured.
12000b57cec5SDimitry Andric       if (PointerMayBeCaptured(UseInst, true, true))
12010b57cec5SDimitry Andric         return false;
12020b57cec5SDimitry Andric       // Don't collect the users of this.
12030b57cec5SDimitry Andric       WorkList.push_back(User);
12040b57cec5SDimitry Andric       continue;
12050b57cec5SDimitry Andric     }
12060b57cec5SDimitry Andric 
1207fe6060f1SDimitry Andric     // Do not promote vector/aggregate type instructions. It is hard to track
1208fe6060f1SDimitry Andric     // their users.
1209fe6060f1SDimitry Andric     if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
1210fe6060f1SDimitry Andric       return false;
1211fe6060f1SDimitry Andric 
12120b57cec5SDimitry Andric     if (!User->getType()->isPointerTy())
12130b57cec5SDimitry Andric       continue;
12140b57cec5SDimitry Andric 
12150b57cec5SDimitry Andric     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
12160b57cec5SDimitry Andric       // Be conservative if an address could be computed outside the bounds of
12170b57cec5SDimitry Andric       // the alloca.
12180b57cec5SDimitry Andric       if (!GEP->isInBounds())
12190b57cec5SDimitry Andric         return false;
12200b57cec5SDimitry Andric     }
12210b57cec5SDimitry Andric 
12220b57cec5SDimitry Andric     // Only promote a select if we know that the other select operand is from
12230b57cec5SDimitry Andric     // another pointer that will also be promoted.
12240b57cec5SDimitry Andric     if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
12250b57cec5SDimitry Andric       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
12260b57cec5SDimitry Andric         return false;
12270b57cec5SDimitry Andric     }
12280b57cec5SDimitry Andric 
12290b57cec5SDimitry Andric     // Repeat for phis.
12300b57cec5SDimitry Andric     if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
12310b57cec5SDimitry Andric       // TODO: Handle more complex cases. We should be able to replace loops
12320b57cec5SDimitry Andric       // over arrays.
12330b57cec5SDimitry Andric       switch (Phi->getNumIncomingValues()) {
12340b57cec5SDimitry Andric       case 1:
12350b57cec5SDimitry Andric         break;
12360b57cec5SDimitry Andric       case 2:
12370b57cec5SDimitry Andric         if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
12380b57cec5SDimitry Andric           return false;
12390b57cec5SDimitry Andric         break;
12400b57cec5SDimitry Andric       default:
12410b57cec5SDimitry Andric         return false;
12420b57cec5SDimitry Andric       }
12430b57cec5SDimitry Andric     }
12440b57cec5SDimitry Andric 
12450b57cec5SDimitry Andric     WorkList.push_back(User);
12460b57cec5SDimitry Andric     if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
12470b57cec5SDimitry Andric       return false;
12480b57cec5SDimitry Andric   }
12490b57cec5SDimitry Andric 
12500b57cec5SDimitry Andric   return true;
12510b57cec5SDimitry Andric }
12520b57cec5SDimitry Andric 
1253e8d8bef9SDimitry Andric bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
12540b57cec5SDimitry Andric 
12550b57cec5SDimitry Andric   FunctionType *FTy = F.getFunctionType();
1256e8d8bef9SDimitry Andric   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
12570b57cec5SDimitry Andric 
12580b57cec5SDimitry Andric   // If the function has any arguments in the local address space, then it's
12590b57cec5SDimitry Andric   // possible these arguments require the entire local memory space, so
12600b57cec5SDimitry Andric   // we cannot use local memory in the pass.
12610b57cec5SDimitry Andric   for (Type *ParamTy : FTy->params()) {
12620b57cec5SDimitry Andric     PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
12630b57cec5SDimitry Andric     if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
12640b57cec5SDimitry Andric       LocalMemLimit = 0;
12650b57cec5SDimitry Andric       LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
12660b57cec5SDimitry Andric                            "local memory disabled.\n");
12670b57cec5SDimitry Andric       return false;
12680b57cec5SDimitry Andric     }
12690b57cec5SDimitry Andric   }
12700b57cec5SDimitry Andric 
1271bdd1243dSDimitry Andric   LocalMemLimit = ST.getAddressableLocalMemorySize();
12720b57cec5SDimitry Andric   if (LocalMemLimit == 0)
12730b57cec5SDimitry Andric     return false;
12740b57cec5SDimitry Andric 
1275e8d8bef9SDimitry Andric   SmallVector<const Constant *, 16> Stack;
1276e8d8bef9SDimitry Andric   SmallPtrSet<const Constant *, 8> VisitedConstants;
1277e8d8bef9SDimitry Andric   SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
12780b57cec5SDimitry Andric 
1279e8d8bef9SDimitry Andric   auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
1280e8d8bef9SDimitry Andric     for (const User *U : Val->users()) {
1281e8d8bef9SDimitry Andric       if (const Instruction *Use = dyn_cast<Instruction>(U)) {
1282e8d8bef9SDimitry Andric         if (Use->getParent()->getParent() == &F)
1283e8d8bef9SDimitry Andric           return true;
1284e8d8bef9SDimitry Andric       } else {
1285e8d8bef9SDimitry Andric         const Constant *C = cast<Constant>(U);
1286e8d8bef9SDimitry Andric         if (VisitedConstants.insert(C).second)
1287e8d8bef9SDimitry Andric           Stack.push_back(C);
1288e8d8bef9SDimitry Andric       }
1289e8d8bef9SDimitry Andric     }
1290e8d8bef9SDimitry Andric 
1291e8d8bef9SDimitry Andric     return false;
1292e8d8bef9SDimitry Andric   };
1293e8d8bef9SDimitry Andric 
12940b57cec5SDimitry Andric   for (GlobalVariable &GV : Mod->globals()) {
1295480093f4SDimitry Andric     if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
12960b57cec5SDimitry Andric       continue;
12970b57cec5SDimitry Andric 
1298e8d8bef9SDimitry Andric     if (visitUsers(&GV, &GV)) {
1299e8d8bef9SDimitry Andric       UsedLDS.insert(&GV);
1300e8d8bef9SDimitry Andric       Stack.clear();
13010b57cec5SDimitry Andric       continue;
1302e8d8bef9SDimitry Andric     }
13030b57cec5SDimitry Andric 
1304e8d8bef9SDimitry Andric     // For any ConstantExpr uses, we need to recursively search the users until
1305e8d8bef9SDimitry Andric     // we see a function.
1306e8d8bef9SDimitry Andric     while (!Stack.empty()) {
1307e8d8bef9SDimitry Andric       const Constant *C = Stack.pop_back_val();
1308e8d8bef9SDimitry Andric       if (visitUsers(&GV, C)) {
1309e8d8bef9SDimitry Andric         UsedLDS.insert(&GV);
1310e8d8bef9SDimitry Andric         Stack.clear();
13110b57cec5SDimitry Andric         break;
13120b57cec5SDimitry Andric       }
13130b57cec5SDimitry Andric     }
13140b57cec5SDimitry Andric   }
13150b57cec5SDimitry Andric 
1316e8d8bef9SDimitry Andric   const DataLayout &DL = Mod->getDataLayout();
1317e8d8bef9SDimitry Andric   SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
1318e8d8bef9SDimitry Andric   AllocatedSizes.reserve(UsedLDS.size());
1319e8d8bef9SDimitry Andric 
1320e8d8bef9SDimitry Andric   for (const GlobalVariable *GV : UsedLDS) {
1321e8d8bef9SDimitry Andric     Align Alignment =
1322e8d8bef9SDimitry Andric         DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
1323e8d8bef9SDimitry Andric     uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
132404eeddc0SDimitry Andric 
132504eeddc0SDimitry Andric     // HIP uses an extern unsized array in local address space for dynamically
132604eeddc0SDimitry Andric     // allocated shared memory.  In that case, we have to disable the promotion.
132704eeddc0SDimitry Andric     if (GV->hasExternalLinkage() && AllocSize == 0) {
132804eeddc0SDimitry Andric       LocalMemLimit = 0;
132904eeddc0SDimitry Andric       LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "
133004eeddc0SDimitry Andric                            "local memory. Promoting to local memory "
133104eeddc0SDimitry Andric                            "disabled.\n");
133204eeddc0SDimitry Andric       return false;
133304eeddc0SDimitry Andric     }
133404eeddc0SDimitry Andric 
1335e8d8bef9SDimitry Andric     AllocatedSizes.emplace_back(AllocSize, Alignment);
1336e8d8bef9SDimitry Andric   }
1337e8d8bef9SDimitry Andric 
1338e8d8bef9SDimitry Andric   // Sort to try to estimate the worst case alignment padding
1339e8d8bef9SDimitry Andric   //
1340e8d8bef9SDimitry Andric   // FIXME: We should really do something to fix the addresses to a more optimal
1341e8d8bef9SDimitry Andric   // value instead
134281ad6265SDimitry Andric   llvm::sort(AllocatedSizes, llvm::less_second());
1343e8d8bef9SDimitry Andric 
1344e8d8bef9SDimitry Andric   // Check how much local memory is being used by global objects
1345e8d8bef9SDimitry Andric   CurrentLocalMemUsage = 0;
1346e8d8bef9SDimitry Andric 
1347e8d8bef9SDimitry Andric   // FIXME: Try to account for padding here. The real padding and address is
1348e8d8bef9SDimitry Andric   // currently determined from the inverse order of uses in the function when
1349e8d8bef9SDimitry Andric   // legalizing, which could also potentially change. We try to estimate the
1350e8d8bef9SDimitry Andric   // worst case here, but we probably should fix the addresses earlier.
1351e8d8bef9SDimitry Andric   for (auto Alloc : AllocatedSizes) {
1352e8d8bef9SDimitry Andric     CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
1353e8d8bef9SDimitry Andric     CurrentLocalMemUsage += Alloc.first;
1354e8d8bef9SDimitry Andric   }
1355e8d8bef9SDimitry Andric 
135606c3fb27SDimitry Andric   unsigned MaxOccupancy =
135706c3fb27SDimitry Andric       ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, F);
13580b57cec5SDimitry Andric 
13590b57cec5SDimitry Andric   // Restrict local memory usage so that we don't drastically reduce occupancy,
13600b57cec5SDimitry Andric   // unless it is already significantly reduced.
13610b57cec5SDimitry Andric 
13620b57cec5SDimitry Andric   // TODO: Have some sort of hint or other heuristics to guess occupancy based
13630b57cec5SDimitry Andric   // on other factors..
13640b57cec5SDimitry Andric   unsigned OccupancyHint = ST.getWavesPerEU(F).second;
13650b57cec5SDimitry Andric   if (OccupancyHint == 0)
13660b57cec5SDimitry Andric     OccupancyHint = 7;
13670b57cec5SDimitry Andric 
13680b57cec5SDimitry Andric   // Clamp to max value.
13690b57cec5SDimitry Andric   OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
13700b57cec5SDimitry Andric 
13710b57cec5SDimitry Andric   // Check the hint but ignore it if it's obviously wrong from the existing LDS
13720b57cec5SDimitry Andric   // usage.
13730b57cec5SDimitry Andric   MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
13740b57cec5SDimitry Andric 
13750b57cec5SDimitry Andric   // Round up to the next tier of usage.
137606c3fb27SDimitry Andric   unsigned MaxSizeWithWaveCount =
137706c3fb27SDimitry Andric       ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
13780b57cec5SDimitry Andric 
13790b57cec5SDimitry Andric   // Program is possibly broken by using more local mem than available.
13800b57cec5SDimitry Andric   if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
13810b57cec5SDimitry Andric     return false;
13820b57cec5SDimitry Andric 
13830b57cec5SDimitry Andric   LocalMemLimit = MaxSizeWithWaveCount;
13840b57cec5SDimitry Andric 
13850b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
13860b57cec5SDimitry Andric                     << " bytes of LDS\n"
13870b57cec5SDimitry Andric                     << "  Rounding size to " << MaxSizeWithWaveCount
13880b57cec5SDimitry Andric                     << " with a maximum occupancy of " << MaxOccupancy << '\n'
13890b57cec5SDimitry Andric                     << " and " << (LocalMemLimit - CurrentLocalMemUsage)
13900b57cec5SDimitry Andric                     << " available for promotion\n");
13910b57cec5SDimitry Andric 
13920b57cec5SDimitry Andric   return true;
13930b57cec5SDimitry Andric }
13940b57cec5SDimitry Andric 
13950b57cec5SDimitry Andric // FIXME: Should try to pick the most likely to be profitable allocas first.
139606c3fb27SDimitry Andric bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I,
139706c3fb27SDimitry Andric                                                     bool SufficientLDS) {
139806c3fb27SDimitry Andric   LLVM_DEBUG(dbgs() << "Trying to promote to LDS: " << I << '\n');
139906c3fb27SDimitry Andric 
140006c3fb27SDimitry Andric   if (DisablePromoteAllocaToLDS) {
140106c3fb27SDimitry Andric     LLVM_DEBUG(dbgs() << "  Promote alloca to LDS is disabled\n");
14020b57cec5SDimitry Andric     return false;
140306c3fb27SDimitry Andric   }
14040b57cec5SDimitry Andric 
14055ffd83dbSDimitry Andric   const DataLayout &DL = Mod->getDataLayout();
14060b57cec5SDimitry Andric   IRBuilder<> Builder(&I);
14070b57cec5SDimitry Andric 
14080b57cec5SDimitry Andric   const Function &ContainingFunction = *I.getParent()->getParent();
14090b57cec5SDimitry Andric   CallingConv::ID CC = ContainingFunction.getCallingConv();
14100b57cec5SDimitry Andric 
14110b57cec5SDimitry Andric   // Don't promote the alloca to LDS for shader calling conventions as the work
14120b57cec5SDimitry Andric   // item ID intrinsics are not supported for these calling conventions.
14130b57cec5SDimitry Andric   // Furthermore not all LDS is available for some of the stages.
14140b57cec5SDimitry Andric   switch (CC) {
14150b57cec5SDimitry Andric   case CallingConv::AMDGPU_KERNEL:
14160b57cec5SDimitry Andric   case CallingConv::SPIR_KERNEL:
14170b57cec5SDimitry Andric     break;
14180b57cec5SDimitry Andric   default:
14190b57cec5SDimitry Andric     LLVM_DEBUG(
14200b57cec5SDimitry Andric         dbgs()
14210b57cec5SDimitry Andric         << " promote alloca to LDS not supported with calling convention.\n");
14220b57cec5SDimitry Andric     return false;
14230b57cec5SDimitry Andric   }
14240b57cec5SDimitry Andric 
14250b57cec5SDimitry Andric   // Not likely to have sufficient local memory for promotion.
14260b57cec5SDimitry Andric   if (!SufficientLDS)
14270b57cec5SDimitry Andric     return false;
14280b57cec5SDimitry Andric 
1429e8d8bef9SDimitry Andric   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
14300b57cec5SDimitry Andric   unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
14310b57cec5SDimitry Andric 
14325ffd83dbSDimitry Andric   Align Alignment =
14335ffd83dbSDimitry Andric       DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
14340b57cec5SDimitry Andric 
14350b57cec5SDimitry Andric   // FIXME: This computed padding is likely wrong since it depends on inverse
14360b57cec5SDimitry Andric   // usage order.
14370b57cec5SDimitry Andric   //
14380b57cec5SDimitry Andric   // FIXME: It is also possible that if we're allowed to use all of the memory
143981ad6265SDimitry Andric   // could end up using more than the maximum due to alignment padding.
14400b57cec5SDimitry Andric 
14415ffd83dbSDimitry Andric   uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
144206c3fb27SDimitry Andric   uint32_t AllocSize =
144306c3fb27SDimitry Andric       WorkGroupSize * DL.getTypeAllocSize(I.getAllocatedType());
14440b57cec5SDimitry Andric   NewSize += AllocSize;
14450b57cec5SDimitry Andric 
14460b57cec5SDimitry Andric   if (NewSize > LocalMemLimit) {
14470b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << "  " << AllocSize
14480b57cec5SDimitry Andric                       << " bytes of local memory not available to promote\n");
14490b57cec5SDimitry Andric     return false;
14500b57cec5SDimitry Andric   }
14510b57cec5SDimitry Andric 
14520b57cec5SDimitry Andric   CurrentLocalMemUsage = NewSize;
14530b57cec5SDimitry Andric 
14540b57cec5SDimitry Andric   std::vector<Value *> WorkList;
14550b57cec5SDimitry Andric 
14560b57cec5SDimitry Andric   if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
14570b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
14580b57cec5SDimitry Andric     return false;
14590b57cec5SDimitry Andric   }
14600b57cec5SDimitry Andric 
14610b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
14620b57cec5SDimitry Andric 
14630b57cec5SDimitry Andric   Function *F = I.getParent()->getParent();
14640b57cec5SDimitry Andric 
14650b57cec5SDimitry Andric   Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
14660b57cec5SDimitry Andric   GlobalVariable *GV = new GlobalVariable(
1467bdd1243dSDimitry Andric       *Mod, GVTy, false, GlobalValue::InternalLinkage, PoisonValue::get(GVTy),
1468bdd1243dSDimitry Andric       Twine(F->getName()) + Twine('.') + I.getName(), nullptr,
1469bdd1243dSDimitry Andric       GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS);
14700b57cec5SDimitry Andric   GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
14710eae32dcSDimitry Andric   GV->setAlignment(I.getAlign());
14720b57cec5SDimitry Andric 
14730b57cec5SDimitry Andric   Value *TCntY, *TCntZ;
14740b57cec5SDimitry Andric 
14750b57cec5SDimitry Andric   std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
14760b57cec5SDimitry Andric   Value *TIdX = getWorkitemID(Builder, 0);
14770b57cec5SDimitry Andric   Value *TIdY = getWorkitemID(Builder, 1);
14780b57cec5SDimitry Andric   Value *TIdZ = getWorkitemID(Builder, 2);
14790b57cec5SDimitry Andric 
14800b57cec5SDimitry Andric   Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
14810b57cec5SDimitry Andric   Tmp0 = Builder.CreateMul(Tmp0, TIdX);
14820b57cec5SDimitry Andric   Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
14830b57cec5SDimitry Andric   Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
14840b57cec5SDimitry Andric   TID = Builder.CreateAdd(TID, TIdZ);
14850b57cec5SDimitry Andric 
148606c3fb27SDimitry Andric   LLVMContext &Context = Mod->getContext();
148706c3fb27SDimitry Andric   Value *Indices[] = {Constant::getNullValue(Type::getInt32Ty(Context)), TID};
14880b57cec5SDimitry Andric 
14890b57cec5SDimitry Andric   Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
14900b57cec5SDimitry Andric   I.mutateType(Offset->getType());
14910b57cec5SDimitry Andric   I.replaceAllUsesWith(Offset);
14920b57cec5SDimitry Andric   I.eraseFromParent();
14930b57cec5SDimitry Andric 
1494fe6060f1SDimitry Andric   SmallVector<IntrinsicInst *> DeferredIntrs;
1495fe6060f1SDimitry Andric 
14960b57cec5SDimitry Andric   for (Value *V : WorkList) {
14970b57cec5SDimitry Andric     CallInst *Call = dyn_cast<CallInst>(V);
14980b57cec5SDimitry Andric     if (!Call) {
14990b57cec5SDimitry Andric       if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
150006c3fb27SDimitry Andric         PointerType *NewTy = PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS);
15010b57cec5SDimitry Andric 
15020b57cec5SDimitry Andric         if (isa<ConstantPointerNull>(CI->getOperand(0)))
15030b57cec5SDimitry Andric           CI->setOperand(0, ConstantPointerNull::get(NewTy));
15040b57cec5SDimitry Andric 
15050b57cec5SDimitry Andric         if (isa<ConstantPointerNull>(CI->getOperand(1)))
15060b57cec5SDimitry Andric           CI->setOperand(1, ConstantPointerNull::get(NewTy));
15070b57cec5SDimitry Andric 
15080b57cec5SDimitry Andric         continue;
15090b57cec5SDimitry Andric       }
15100b57cec5SDimitry Andric 
15110b57cec5SDimitry Andric       // The operand's value should be corrected on its own and we don't want to
15120b57cec5SDimitry Andric       // touch the users.
15130b57cec5SDimitry Andric       if (isa<AddrSpaceCastInst>(V))
15140b57cec5SDimitry Andric         continue;
15150b57cec5SDimitry Andric 
151606c3fb27SDimitry Andric       PointerType *NewTy = PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS);
15170b57cec5SDimitry Andric 
15180b57cec5SDimitry Andric       // FIXME: It doesn't really make sense to try to do this for all
15190b57cec5SDimitry Andric       // instructions.
15200b57cec5SDimitry Andric       V->mutateType(NewTy);
15210b57cec5SDimitry Andric 
15220b57cec5SDimitry Andric       // Adjust the types of any constant operands.
15230b57cec5SDimitry Andric       if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
15240b57cec5SDimitry Andric         if (isa<ConstantPointerNull>(SI->getOperand(1)))
15250b57cec5SDimitry Andric           SI->setOperand(1, ConstantPointerNull::get(NewTy));
15260b57cec5SDimitry Andric 
15270b57cec5SDimitry Andric         if (isa<ConstantPointerNull>(SI->getOperand(2)))
15280b57cec5SDimitry Andric           SI->setOperand(2, ConstantPointerNull::get(NewTy));
15290b57cec5SDimitry Andric       } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
15300b57cec5SDimitry Andric         for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
15310b57cec5SDimitry Andric           if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
15320b57cec5SDimitry Andric             Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
15330b57cec5SDimitry Andric         }
15340b57cec5SDimitry Andric       }
15350b57cec5SDimitry Andric 
15360b57cec5SDimitry Andric       continue;
15370b57cec5SDimitry Andric     }
15380b57cec5SDimitry Andric 
15390b57cec5SDimitry Andric     IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
15400b57cec5SDimitry Andric     Builder.SetInsertPoint(Intr);
15410b57cec5SDimitry Andric     switch (Intr->getIntrinsicID()) {
15420b57cec5SDimitry Andric     case Intrinsic::lifetime_start:
15430b57cec5SDimitry Andric     case Intrinsic::lifetime_end:
15440b57cec5SDimitry Andric       // These intrinsics are for address space 0 only
15450b57cec5SDimitry Andric       Intr->eraseFromParent();
15460b57cec5SDimitry Andric       continue;
1547fe6060f1SDimitry Andric     case Intrinsic::memcpy:
1548fe6060f1SDimitry Andric     case Intrinsic::memmove:
1549fe6060f1SDimitry Andric       // These have 2 pointer operands. In case if second pointer also needs
1550fe6060f1SDimitry Andric       // to be replaced we defer processing of these intrinsics until all
1551fe6060f1SDimitry Andric       // other values are processed.
1552fe6060f1SDimitry Andric       DeferredIntrs.push_back(Intr);
15530b57cec5SDimitry Andric       continue;
15540b57cec5SDimitry Andric     case Intrinsic::memset: {
15550b57cec5SDimitry Andric       MemSetInst *MemSet = cast<MemSetInst>(Intr);
1556bdd1243dSDimitry Andric       Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
1557bdd1243dSDimitry Andric                            MemSet->getLength(), MemSet->getDestAlign(),
1558bdd1243dSDimitry Andric                            MemSet->isVolatile());
15590b57cec5SDimitry Andric       Intr->eraseFromParent();
15600b57cec5SDimitry Andric       continue;
15610b57cec5SDimitry Andric     }
15620b57cec5SDimitry Andric     case Intrinsic::invariant_start:
15630b57cec5SDimitry Andric     case Intrinsic::invariant_end:
15640b57cec5SDimitry Andric     case Intrinsic::launder_invariant_group:
15650b57cec5SDimitry Andric     case Intrinsic::strip_invariant_group:
15660b57cec5SDimitry Andric       Intr->eraseFromParent();
15670b57cec5SDimitry Andric       // FIXME: I think the invariant marker should still theoretically apply,
15680b57cec5SDimitry Andric       // but the intrinsics need to be changed to accept pointers with any
15690b57cec5SDimitry Andric       // address space.
15700b57cec5SDimitry Andric       continue;
15710b57cec5SDimitry Andric     case Intrinsic::objectsize: {
15720b57cec5SDimitry Andric       Value *Src = Intr->getOperand(0);
1573fe6060f1SDimitry Andric       Function *ObjectSize = Intrinsic::getDeclaration(
1574fe6060f1SDimitry Andric           Mod, Intrinsic::objectsize,
1575fe6060f1SDimitry Andric           {Intr->getType(),
157606c3fb27SDimitry Andric            PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS)});
15770b57cec5SDimitry Andric 
15780b57cec5SDimitry Andric       CallInst *NewCall = Builder.CreateCall(
15790b57cec5SDimitry Andric           ObjectSize,
15800b57cec5SDimitry Andric           {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
15810b57cec5SDimitry Andric       Intr->replaceAllUsesWith(NewCall);
15820b57cec5SDimitry Andric       Intr->eraseFromParent();
15830b57cec5SDimitry Andric       continue;
15840b57cec5SDimitry Andric     }
15850b57cec5SDimitry Andric     default:
15860b57cec5SDimitry Andric       Intr->print(errs());
15870b57cec5SDimitry Andric       llvm_unreachable("Don't know how to promote alloca intrinsic use.");
15880b57cec5SDimitry Andric     }
15890b57cec5SDimitry Andric   }
1590fe6060f1SDimitry Andric 
1591fe6060f1SDimitry Andric   for (IntrinsicInst *Intr : DeferredIntrs) {
1592fe6060f1SDimitry Andric     Builder.SetInsertPoint(Intr);
1593fe6060f1SDimitry Andric     Intrinsic::ID ID = Intr->getIntrinsicID();
1594fe6060f1SDimitry Andric     assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove);
1595fe6060f1SDimitry Andric 
1596fe6060f1SDimitry Andric     MemTransferInst *MI = cast<MemTransferInst>(Intr);
159706c3fb27SDimitry Andric     auto *B = Builder.CreateMemTransferInst(
159806c3fb27SDimitry Andric         ID, MI->getRawDest(), MI->getDestAlign(), MI->getRawSource(),
159906c3fb27SDimitry Andric         MI->getSourceAlign(), MI->getLength(), MI->isVolatile());
1600fe6060f1SDimitry Andric 
1601349cc55cSDimitry Andric     for (unsigned I = 0; I != 2; ++I) {
1602349cc55cSDimitry Andric       if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1603349cc55cSDimitry Andric         B->addDereferenceableParamAttr(I, Bytes);
1604fe6060f1SDimitry Andric       }
1605fe6060f1SDimitry Andric     }
1606fe6060f1SDimitry Andric 
1607fe6060f1SDimitry Andric     Intr->eraseFromParent();
1608fe6060f1SDimitry Andric   }
1609fe6060f1SDimitry Andric 
16100b57cec5SDimitry Andric   return true;
16110b57cec5SDimitry Andric }
1612