xref: /llvm-project/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp (revision 11b040192640ef3b1f481124c440f464ed6ec86a)
1 //===----------------------- SIFrameLowering.cpp --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //==-----------------------------------------------------------------------===//
8 
9 #include "SIFrameLowering.h"
10 #include "AMDGPU.h"
11 #include "GCNSubtarget.h"
12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
13 #include "SIMachineFunctionInfo.h"
14 #include "llvm/CodeGen/LiveRegUnits.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/RegisterScavenging.h"
17 #include "llvm/Target/TargetMachine.h"
18 
19 using namespace llvm;
20 
21 #define DEBUG_TYPE "frame-info"
22 
23 static cl::opt<bool> EnableSpillVGPRToAGPR(
24   "amdgpu-spill-vgpr-to-agpr",
25   cl::desc("Enable spilling VGPRs to AGPRs"),
26   cl::ReallyHidden,
27   cl::init(true));
28 
29 // Find a register matching \p RC from \p LiveUnits which is unused and
30 // available throughout the function. On failure, returns AMDGPU::NoRegister.
31 // TODO: Rewrite the loop here to iterate over MCRegUnits instead of
32 // MCRegisters. This should reduce the number of iterations and avoid redundant
33 // checking.
34 static MCRegister findUnusedRegister(MachineRegisterInfo &MRI,
35                                      const LiveRegUnits &LiveUnits,
36                                      const TargetRegisterClass &RC) {
37   for (MCRegister Reg : RC) {
38     if (!MRI.isPhysRegUsed(Reg) && LiveUnits.available(Reg) &&
39         !MRI.isReserved(Reg))
40       return Reg;
41   }
42   return MCRegister();
43 }
44 
45 // Find a scratch register that we can use in the prologue. We avoid using
46 // callee-save registers since they may appear to be free when this is called
47 // from canUseAsPrologue (during shrink wrapping), but then no longer be free
48 // when this is called from emitPrologue.
49 static MCRegister findScratchNonCalleeSaveRegister(
50     MachineRegisterInfo &MRI, LiveRegUnits &LiveUnits,
51     const TargetRegisterClass &RC, bool Unused = false) {
52   // Mark callee saved registers as used so we will not choose them.
53   const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs();
54   for (unsigned i = 0; CSRegs[i]; ++i)
55     LiveUnits.addReg(CSRegs[i]);
56 
57   // We are looking for a register that can be used throughout the entire
58   // function, so any use is unacceptable.
59   if (Unused)
60     return findUnusedRegister(MRI, LiveUnits, RC);
61 
62   for (MCRegister Reg : RC) {
63     if (LiveUnits.available(Reg) && !MRI.isReserved(Reg))
64       return Reg;
65   }
66 
67   return MCRegister();
68 }
69 
70 /// Query target location for spilling SGPRs
71 /// \p IncludeScratchCopy : Also look for free scratch SGPRs
72 static void getVGPRSpillLaneOrTempRegister(
73     MachineFunction &MF, LiveRegUnits &LiveUnits, Register SGPR,
74     const TargetRegisterClass &RC = AMDGPU::SReg_32_XM0_XEXECRegClass,
75     bool IncludeScratchCopy = true) {
76   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
77   MachineFrameInfo &FrameInfo = MF.getFrameInfo();
78 
79   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
80   const SIRegisterInfo *TRI = ST.getRegisterInfo();
81   unsigned Size = TRI->getSpillSize(RC);
82   Align Alignment = TRI->getSpillAlign(RC);
83 
84   // We need to save and restore the given SGPR.
85 
86   Register ScratchSGPR;
87   // 1: Try to save the given register into an unused scratch SGPR. The
88   // LiveUnits should have all the callee saved registers marked as used. For
89   // certain cases we skip copy to scratch SGPR.
90   if (IncludeScratchCopy)
91     ScratchSGPR = findUnusedRegister(MF.getRegInfo(), LiveUnits, RC);
92 
93   if (!ScratchSGPR) {
94     int FI = FrameInfo.CreateStackObject(Size, Alignment, true, nullptr,
95                                          TargetStackID::SGPRSpill);
96 
97     if (TRI->spillSGPRToVGPR() &&
98         MFI->allocateSGPRSpillToVGPRLane(MF, FI, /*SpillToPhysVGPRLane=*/true,
99                                          /*IsPrologEpilog=*/true)) {
100       // 2: There's no free lane to spill, and no free register to save the
101       // SGPR, so we're forced to take another VGPR to use for the spill.
102       MFI->addToPrologEpilogSGPRSpills(
103           SGPR, PrologEpilogSGPRSaveRestoreInfo(
104                     SGPRSaveKind::SPILL_TO_VGPR_LANE, FI));
105 
106       LLVM_DEBUG(auto Spill = MFI->getSGPRSpillToPhysicalVGPRLanes(FI).front();
107                  dbgs() << printReg(SGPR, TRI) << " requires fallback spill to "
108                         << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane
109                         << '\n';);
110     } else {
111       // Remove dead <FI> index
112       MF.getFrameInfo().RemoveStackObject(FI);
113       // 3: If all else fails, spill the register to memory.
114       FI = FrameInfo.CreateSpillStackObject(Size, Alignment);
115       MFI->addToPrologEpilogSGPRSpills(
116           SGPR,
117           PrologEpilogSGPRSaveRestoreInfo(SGPRSaveKind::SPILL_TO_MEM, FI));
118       LLVM_DEBUG(dbgs() << "Reserved FI " << FI << " for spilling "
119                         << printReg(SGPR, TRI) << '\n');
120     }
121   } else {
122     MFI->addToPrologEpilogSGPRSpills(
123         SGPR, PrologEpilogSGPRSaveRestoreInfo(
124                   SGPRSaveKind::COPY_TO_SCRATCH_SGPR, ScratchSGPR));
125     LiveUnits.addReg(ScratchSGPR);
126     LLVM_DEBUG(dbgs() << "Saving " << printReg(SGPR, TRI) << " with copy to "
127                       << printReg(ScratchSGPR, TRI) << '\n');
128   }
129 }
130 
131 // We need to specially emit stack operations here because a different frame
132 // register is used than in the rest of the function, as getFrameRegister would
133 // use.
134 static void buildPrologSpill(const GCNSubtarget &ST, const SIRegisterInfo &TRI,
135                              const SIMachineFunctionInfo &FuncInfo,
136                              LiveRegUnits &LiveUnits, MachineFunction &MF,
137                              MachineBasicBlock &MBB,
138                              MachineBasicBlock::iterator I, const DebugLoc &DL,
139                              Register SpillReg, int FI, Register FrameReg,
140                              int64_t DwordOff = 0) {
141   unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
142                                         : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
143 
144   MachineFrameInfo &FrameInfo = MF.getFrameInfo();
145   MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
146   MachineMemOperand *MMO = MF.getMachineMemOperand(
147       PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FI),
148       FrameInfo.getObjectAlign(FI));
149   LiveUnits.addReg(SpillReg);
150   bool IsKill = !MBB.isLiveIn(SpillReg);
151   TRI.buildSpillLoadStore(MBB, I, DL, Opc, FI, SpillReg, IsKill, FrameReg,
152                           DwordOff, MMO, nullptr, &LiveUnits);
153   if (IsKill)
154     LiveUnits.removeReg(SpillReg);
155 }
156 
157 static void buildEpilogRestore(const GCNSubtarget &ST,
158                                const SIRegisterInfo &TRI,
159                                const SIMachineFunctionInfo &FuncInfo,
160                                LiveRegUnits &LiveUnits, MachineFunction &MF,
161                                MachineBasicBlock &MBB,
162                                MachineBasicBlock::iterator I,
163                                const DebugLoc &DL, Register SpillReg, int FI,
164                                Register FrameReg, int64_t DwordOff = 0) {
165   unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
166                                         : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
167 
168   MachineFrameInfo &FrameInfo = MF.getFrameInfo();
169   MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
170   MachineMemOperand *MMO = MF.getMachineMemOperand(
171       PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FI),
172       FrameInfo.getObjectAlign(FI));
173   TRI.buildSpillLoadStore(MBB, I, DL, Opc, FI, SpillReg, false, FrameReg,
174                           DwordOff, MMO, nullptr, &LiveUnits);
175 }
176 
177 static void buildGitPtr(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
178                         const DebugLoc &DL, const SIInstrInfo *TII,
179                         Register TargetReg) {
180   MachineFunction *MF = MBB.getParent();
181   const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
182   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
183   const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
184   Register TargetLo = TRI->getSubReg(TargetReg, AMDGPU::sub0);
185   Register TargetHi = TRI->getSubReg(TargetReg, AMDGPU::sub1);
186 
187   if (MFI->getGITPtrHigh() != 0xffffffff) {
188     BuildMI(MBB, I, DL, SMovB32, TargetHi)
189         .addImm(MFI->getGITPtrHigh())
190         .addReg(TargetReg, RegState::ImplicitDefine);
191   } else {
192     const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64_pseudo);
193     BuildMI(MBB, I, DL, GetPC64, TargetReg);
194   }
195   Register GitPtrLo = MFI->getGITPtrLoReg(*MF);
196   MF->getRegInfo().addLiveIn(GitPtrLo);
197   MBB.addLiveIn(GitPtrLo);
198   BuildMI(MBB, I, DL, SMovB32, TargetLo)
199     .addReg(GitPtrLo);
200 }
201 
202 static void initLiveUnits(LiveRegUnits &LiveUnits, const SIRegisterInfo &TRI,
203                           const SIMachineFunctionInfo *FuncInfo,
204                           MachineFunction &MF, MachineBasicBlock &MBB,
205                           MachineBasicBlock::iterator MBBI, bool IsProlog) {
206   if (LiveUnits.empty()) {
207     LiveUnits.init(TRI);
208     if (IsProlog) {
209       LiveUnits.addLiveIns(MBB);
210     } else {
211       // In epilog.
212       LiveUnits.addLiveOuts(MBB);
213       LiveUnits.stepBackward(*MBBI);
214     }
215   }
216 }
217 
218 namespace llvm {
219 
220 // SpillBuilder to save/restore special SGPR spills like the one needed for FP,
221 // BP, etc. These spills are delayed until the current function's frame is
222 // finalized. For a given register, the builder uses the
223 // PrologEpilogSGPRSaveRestoreInfo to decide the spill method.
224 class PrologEpilogSGPRSpillBuilder {
225   MachineBasicBlock::iterator MI;
226   MachineBasicBlock &MBB;
227   MachineFunction &MF;
228   const GCNSubtarget &ST;
229   MachineFrameInfo &MFI;
230   SIMachineFunctionInfo *FuncInfo;
231   const SIInstrInfo *TII;
232   const SIRegisterInfo &TRI;
233   Register SuperReg;
234   const PrologEpilogSGPRSaveRestoreInfo SI;
235   LiveRegUnits &LiveUnits;
236   const DebugLoc &DL;
237   Register FrameReg;
238   ArrayRef<int16_t> SplitParts;
239   unsigned NumSubRegs;
240   unsigned EltSize = 4;
241 
242   void saveToMemory(const int FI) const {
243     MachineRegisterInfo &MRI = MF.getRegInfo();
244     assert(!MFI.isDeadObjectIndex(FI));
245 
246     initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MI, /*IsProlog*/ true);
247 
248     MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister(
249         MRI, LiveUnits, AMDGPU::VGPR_32RegClass);
250     if (!TmpVGPR)
251       report_fatal_error("failed to find free scratch register");
252 
253     for (unsigned I = 0, DwordOff = 0; I < NumSubRegs; ++I) {
254       Register SubReg = NumSubRegs == 1
255                             ? SuperReg
256                             : Register(TRI.getSubReg(SuperReg, SplitParts[I]));
257       BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR)
258           .addReg(SubReg);
259 
260       buildPrologSpill(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MI, DL, TmpVGPR,
261                        FI, FrameReg, DwordOff);
262       DwordOff += 4;
263     }
264   }
265 
266   void saveToVGPRLane(const int FI) const {
267     assert(!MFI.isDeadObjectIndex(FI));
268 
269     assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill);
270     ArrayRef<SIRegisterInfo::SpilledReg> Spill =
271         FuncInfo->getSGPRSpillToPhysicalVGPRLanes(FI);
272     assert(Spill.size() == NumSubRegs);
273 
274     for (unsigned I = 0; I < NumSubRegs; ++I) {
275       Register SubReg = NumSubRegs == 1
276                             ? SuperReg
277                             : Register(TRI.getSubReg(SuperReg, SplitParts[I]));
278       BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_S32_TO_VGPR),
279               Spill[I].VGPR)
280           .addReg(SubReg)
281           .addImm(Spill[I].Lane)
282           .addReg(Spill[I].VGPR, RegState::Undef);
283     }
284   }
285 
286   void copyToScratchSGPR(Register DstReg) const {
287     BuildMI(MBB, MI, DL, TII->get(AMDGPU::COPY), DstReg)
288         .addReg(SuperReg)
289         .setMIFlag(MachineInstr::FrameSetup);
290   }
291 
292   void restoreFromMemory(const int FI) {
293     MachineRegisterInfo &MRI = MF.getRegInfo();
294 
295     initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MI, /*IsProlog*/ false);
296     MCPhysReg TmpVGPR = findScratchNonCalleeSaveRegister(
297         MRI, LiveUnits, AMDGPU::VGPR_32RegClass);
298     if (!TmpVGPR)
299       report_fatal_error("failed to find free scratch register");
300 
301     for (unsigned I = 0, DwordOff = 0; I < NumSubRegs; ++I) {
302       Register SubReg = NumSubRegs == 1
303                             ? SuperReg
304                             : Register(TRI.getSubReg(SuperReg, SplitParts[I]));
305 
306       buildEpilogRestore(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MI, DL,
307                          TmpVGPR, FI, FrameReg, DwordOff);
308       BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg)
309           .addReg(TmpVGPR, RegState::Kill);
310       DwordOff += 4;
311     }
312   }
313 
314   void restoreFromVGPRLane(const int FI) {
315     assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill);
316     ArrayRef<SIRegisterInfo::SpilledReg> Spill =
317         FuncInfo->getSGPRSpillToPhysicalVGPRLanes(FI);
318     assert(Spill.size() == NumSubRegs);
319 
320     for (unsigned I = 0; I < NumSubRegs; ++I) {
321       Register SubReg = NumSubRegs == 1
322                             ? SuperReg
323                             : Register(TRI.getSubReg(SuperReg, SplitParts[I]));
324       BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_RESTORE_S32_FROM_VGPR), SubReg)
325           .addReg(Spill[I].VGPR)
326           .addImm(Spill[I].Lane);
327     }
328   }
329 
330   void copyFromScratchSGPR(Register SrcReg) const {
331     BuildMI(MBB, MI, DL, TII->get(AMDGPU::COPY), SuperReg)
332         .addReg(SrcReg)
333         .setMIFlag(MachineInstr::FrameDestroy);
334   }
335 
336 public:
337   PrologEpilogSGPRSpillBuilder(Register Reg,
338                                const PrologEpilogSGPRSaveRestoreInfo SI,
339                                MachineBasicBlock &MBB,
340                                MachineBasicBlock::iterator MI,
341                                const DebugLoc &DL, const SIInstrInfo *TII,
342                                const SIRegisterInfo &TRI,
343                                LiveRegUnits &LiveUnits, Register FrameReg)
344       : MI(MI), MBB(MBB), MF(*MBB.getParent()),
345         ST(MF.getSubtarget<GCNSubtarget>()), MFI(MF.getFrameInfo()),
346         FuncInfo(MF.getInfo<SIMachineFunctionInfo>()), TII(TII), TRI(TRI),
347         SuperReg(Reg), SI(SI), LiveUnits(LiveUnits), DL(DL),
348         FrameReg(FrameReg) {
349     const TargetRegisterClass *RC = TRI.getPhysRegBaseClass(SuperReg);
350     SplitParts = TRI.getRegSplitParts(RC, EltSize);
351     NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
352 
353     assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
354   }
355 
356   void save() {
357     switch (SI.getKind()) {
358     case SGPRSaveKind::SPILL_TO_MEM:
359       return saveToMemory(SI.getIndex());
360     case SGPRSaveKind::SPILL_TO_VGPR_LANE:
361       return saveToVGPRLane(SI.getIndex());
362     case SGPRSaveKind::COPY_TO_SCRATCH_SGPR:
363       return copyToScratchSGPR(SI.getReg());
364     }
365   }
366 
367   void restore() {
368     switch (SI.getKind()) {
369     case SGPRSaveKind::SPILL_TO_MEM:
370       return restoreFromMemory(SI.getIndex());
371     case SGPRSaveKind::SPILL_TO_VGPR_LANE:
372       return restoreFromVGPRLane(SI.getIndex());
373     case SGPRSaveKind::COPY_TO_SCRATCH_SGPR:
374       return copyFromScratchSGPR(SI.getReg());
375     }
376   }
377 };
378 
379 } // namespace llvm
380 
381 // Emit flat scratch setup code, assuming `MFI->hasFlatScratchInit()`
382 void SIFrameLowering::emitEntryFunctionFlatScratchInit(
383     MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
384     const DebugLoc &DL, Register ScratchWaveOffsetReg) const {
385   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
386   const SIInstrInfo *TII = ST.getInstrInfo();
387   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
388   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
389 
390   // We don't need this if we only have spills since there is no user facing
391   // scratch.
392 
393   // TODO: If we know we don't have flat instructions earlier, we can omit
394   // this from the input registers.
395   //
396   // TODO: We only need to know if we access scratch space through a flat
397   // pointer. Because we only detect if flat instructions are used at all,
398   // this will be used more often than necessary on VI.
399 
400   Register FlatScrInitLo;
401   Register FlatScrInitHi;
402 
403   if (ST.isAmdPalOS()) {
404     // Extract the scratch offset from the descriptor in the GIT
405     LiveRegUnits LiveUnits;
406     LiveUnits.init(*TRI);
407     LiveUnits.addLiveIns(MBB);
408 
409     // Find unused reg to load flat scratch init into
410     MachineRegisterInfo &MRI = MF.getRegInfo();
411     Register FlatScrInit = AMDGPU::NoRegister;
412     ArrayRef<MCPhysReg> AllSGPR64s = TRI->getAllSGPR64(MF);
413     unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 1) / 2;
414     AllSGPR64s = AllSGPR64s.slice(
415         std::min(static_cast<unsigned>(AllSGPR64s.size()), NumPreloaded));
416     Register GITPtrLoReg = MFI->getGITPtrLoReg(MF);
417     for (MCPhysReg Reg : AllSGPR64s) {
418       if (LiveUnits.available(Reg) && !MRI.isReserved(Reg) &&
419           MRI.isAllocatable(Reg) && !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) {
420         FlatScrInit = Reg;
421         break;
422       }
423     }
424     assert(FlatScrInit && "Failed to find free register for scratch init");
425 
426     FlatScrInitLo = TRI->getSubReg(FlatScrInit, AMDGPU::sub0);
427     FlatScrInitHi = TRI->getSubReg(FlatScrInit, AMDGPU::sub1);
428 
429     buildGitPtr(MBB, I, DL, TII, FlatScrInit);
430 
431     // We now have the GIT ptr - now get the scratch descriptor from the entry
432     // at offset 0 (or offset 16 for a compute shader).
433     MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
434     const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
435     auto *MMO = MF.getMachineMemOperand(
436         PtrInfo,
437         MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
438             MachineMemOperand::MODereferenceable,
439         8, Align(4));
440     unsigned Offset =
441         MF.getFunction().getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0;
442     const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
443     unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset);
444     BuildMI(MBB, I, DL, LoadDwordX2, FlatScrInit)
445         .addReg(FlatScrInit)
446         .addImm(EncodedOffset) // offset
447         .addImm(0)             // cpol
448         .addMemOperand(MMO);
449 
450     // Mask the offset in [47:0] of the descriptor
451     const MCInstrDesc &SAndB32 = TII->get(AMDGPU::S_AND_B32);
452     auto And = BuildMI(MBB, I, DL, SAndB32, FlatScrInitHi)
453         .addReg(FlatScrInitHi)
454         .addImm(0xffff);
455     And->getOperand(3).setIsDead(); // Mark SCC as dead.
456   } else {
457     Register FlatScratchInitReg =
458         MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT);
459     assert(FlatScratchInitReg);
460 
461     MachineRegisterInfo &MRI = MF.getRegInfo();
462     MRI.addLiveIn(FlatScratchInitReg);
463     MBB.addLiveIn(FlatScratchInitReg);
464 
465     FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
466     FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
467   }
468 
469   // Do a 64-bit pointer add.
470   if (ST.flatScratchIsPointer()) {
471     if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
472       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
473         .addReg(FlatScrInitLo)
474         .addReg(ScratchWaveOffsetReg);
475       auto Addc = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32),
476                           FlatScrInitHi)
477         .addReg(FlatScrInitHi)
478         .addImm(0);
479       Addc->getOperand(3).setIsDead(); // Mark SCC as dead.
480 
481       using namespace AMDGPU::Hwreg;
482       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32))
483           .addReg(FlatScrInitLo)
484           .addImm(int16_t(HwregEncoding::encode(ID_FLAT_SCR_LO, 0, 32)));
485       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32))
486           .addReg(FlatScrInitHi)
487           .addImm(int16_t(HwregEncoding::encode(ID_FLAT_SCR_HI, 0, 32)));
488       return;
489     }
490 
491     // For GFX9.
492     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO)
493       .addReg(FlatScrInitLo)
494       .addReg(ScratchWaveOffsetReg);
495     auto Addc = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32),
496                         AMDGPU::FLAT_SCR_HI)
497       .addReg(FlatScrInitHi)
498       .addImm(0);
499     Addc->getOperand(3).setIsDead(); // Mark SCC as dead.
500 
501     return;
502   }
503 
504   assert(ST.getGeneration() < AMDGPUSubtarget::GFX9);
505 
506   // Copy the size in bytes.
507   BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO)
508     .addReg(FlatScrInitHi, RegState::Kill);
509 
510   // Add wave offset in bytes to private base offset.
511   // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init.
512   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), FlatScrInitLo)
513       .addReg(FlatScrInitLo)
514       .addReg(ScratchWaveOffsetReg);
515 
516   // Convert offset to 256-byte units.
517   auto LShr = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32),
518                       AMDGPU::FLAT_SCR_HI)
519     .addReg(FlatScrInitLo, RegState::Kill)
520     .addImm(8);
521   LShr->getOperand(3).setIsDead(); // Mark SCC as dead.
522 }
523 
524 // Note SGPRSpill stack IDs should only be used for SGPR spilling to VGPRs, not
525 // memory. They should have been removed by now.
526 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) {
527   for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
528        I != E; ++I) {
529     if (!MFI.isDeadObjectIndex(I))
530       return false;
531   }
532 
533   return true;
534 }
535 
536 // Shift down registers reserved for the scratch RSRC.
537 Register SIFrameLowering::getEntryFunctionReservedScratchRsrcReg(
538     MachineFunction &MF) const {
539 
540   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
541   const SIInstrInfo *TII = ST.getInstrInfo();
542   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
543   MachineRegisterInfo &MRI = MF.getRegInfo();
544   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
545 
546   assert(MFI->isEntryFunction());
547 
548   Register ScratchRsrcReg = MFI->getScratchRSrcReg();
549 
550   if (!ScratchRsrcReg || (!MRI.isPhysRegUsed(ScratchRsrcReg) &&
551                           allStackObjectsAreDead(MF.getFrameInfo())))
552     return Register();
553 
554   if (ST.hasSGPRInitBug() ||
555       ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF))
556     return ScratchRsrcReg;
557 
558   // We reserved the last registers for this. Shift it down to the end of those
559   // which were actually used.
560   //
561   // FIXME: It might be safer to use a pseudoregister before replacement.
562 
563   // FIXME: We should be able to eliminate unused input registers. We only
564   // cannot do this for the resources required for scratch access. For now we
565   // skip over user SGPRs and may leave unused holes.
566 
567   unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4;
568   ArrayRef<MCPhysReg> AllSGPR128s = TRI->getAllSGPR128(MF);
569   AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded));
570 
571   // Skip the last N reserved elements because they should have already been
572   // reserved for VCC etc.
573   Register GITPtrLoReg = MFI->getGITPtrLoReg(MF);
574   for (MCPhysReg Reg : AllSGPR128s) {
575     // Pick the first unallocated one. Make sure we don't clobber the other
576     // reserved input we needed. Also for PAL, make sure we don't clobber
577     // the GIT pointer passed in SGPR0 or SGPR8.
578     if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) &&
579         (!GITPtrLoReg || !TRI->isSubRegisterEq(Reg, GITPtrLoReg))) {
580       MRI.replaceRegWith(ScratchRsrcReg, Reg);
581       MFI->setScratchRSrcReg(Reg);
582       MRI.reserveReg(Reg, TRI);
583       return Reg;
584     }
585   }
586 
587   return ScratchRsrcReg;
588 }
589 
590 static unsigned getScratchScaleFactor(const GCNSubtarget &ST) {
591   return ST.enableFlatScratch() ? 1 : ST.getWavefrontSize();
592 }
593 
594 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF,
595                                                 MachineBasicBlock &MBB) const {
596   assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
597 
598   // FIXME: If we only have SGPR spills, we won't actually be using scratch
599   // memory since these spill to VGPRs. We should be cleaning up these unused
600   // SGPR spill frame indices somewhere.
601 
602   // FIXME: We still have implicit uses on SGPR spill instructions in case they
603   // need to spill to vector memory. It's likely that will not happen, but at
604   // this point it appears we need the setup. This part of the prolog should be
605   // emitted after frame indices are eliminated.
606 
607   // FIXME: Remove all of the isPhysRegUsed checks
608 
609   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
610   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
611   const SIInstrInfo *TII = ST.getInstrInfo();
612   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
613   MachineRegisterInfo &MRI = MF.getRegInfo();
614   const Function &F = MF.getFunction();
615   MachineFrameInfo &FrameInfo = MF.getFrameInfo();
616 
617   assert(MFI->isEntryFunction());
618 
619   Register PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg(
620       AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
621 
622   // We need to do the replacement of the private segment buffer register even
623   // if there are no stack objects. There could be stores to undef or a
624   // constant without an associated object.
625   //
626   // This will return `Register()` in cases where there are no actual
627   // uses of the SRSRC.
628   Register ScratchRsrcReg;
629   if (!ST.enableFlatScratch())
630     ScratchRsrcReg = getEntryFunctionReservedScratchRsrcReg(MF);
631 
632   // Make the selected register live throughout the function.
633   if (ScratchRsrcReg) {
634     for (MachineBasicBlock &OtherBB : MF) {
635       if (&OtherBB != &MBB) {
636         OtherBB.addLiveIn(ScratchRsrcReg);
637       }
638     }
639   }
640 
641   // Now that we have fixed the reserved SRSRC we need to locate the
642   // (potentially) preloaded SRSRC.
643   Register PreloadedScratchRsrcReg;
644   if (ST.isAmdHsaOrMesa(F)) {
645     PreloadedScratchRsrcReg =
646         MFI->getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
647     if (ScratchRsrcReg && PreloadedScratchRsrcReg) {
648       // We added live-ins during argument lowering, but since they were not
649       // used they were deleted. We're adding the uses now, so add them back.
650       MRI.addLiveIn(PreloadedScratchRsrcReg);
651       MBB.addLiveIn(PreloadedScratchRsrcReg);
652     }
653   }
654 
655   // Debug location must be unknown since the first debug location is used to
656   // determine the end of the prologue.
657   DebugLoc DL;
658   MachineBasicBlock::iterator I = MBB.begin();
659 
660   // We found the SRSRC first because it needs four registers and has an
661   // alignment requirement. If the SRSRC that we found is clobbering with
662   // the scratch wave offset, which may be in a fixed SGPR or a free SGPR
663   // chosen by SITargetLowering::allocateSystemSGPRs, COPY the scratch
664   // wave offset to a free SGPR.
665   Register ScratchWaveOffsetReg;
666   if (PreloadedScratchWaveOffsetReg &&
667       TRI->isSubRegisterEq(ScratchRsrcReg, PreloadedScratchWaveOffsetReg)) {
668     ArrayRef<MCPhysReg> AllSGPRs = TRI->getAllSGPR32(MF);
669     unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
670     AllSGPRs = AllSGPRs.slice(
671         std::min(static_cast<unsigned>(AllSGPRs.size()), NumPreloaded));
672     Register GITPtrLoReg = MFI->getGITPtrLoReg(MF);
673     for (MCPhysReg Reg : AllSGPRs) {
674       if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) &&
675           !TRI->isSubRegisterEq(ScratchRsrcReg, Reg) && GITPtrLoReg != Reg) {
676         ScratchWaveOffsetReg = Reg;
677         BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
678             .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill);
679         break;
680       }
681     }
682 
683     // FIXME: We can spill incoming arguments and restore at the end of the
684     // prolog.
685     if (!ScratchWaveOffsetReg)
686       report_fatal_error(
687           "could not find temporary scratch offset register in prolog");
688   } else {
689     ScratchWaveOffsetReg = PreloadedScratchWaveOffsetReg;
690   }
691   assert(ScratchWaveOffsetReg || !PreloadedScratchWaveOffsetReg);
692 
693   if (hasFP(MF)) {
694     Register FPReg = MFI->getFrameOffsetReg();
695     assert(FPReg != AMDGPU::FP_REG);
696     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), FPReg).addImm(0);
697   }
698 
699   if (requiresStackPointerReference(MF)) {
700     Register SPReg = MFI->getStackPtrOffsetReg();
701     assert(SPReg != AMDGPU::SP_REG);
702     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), SPReg)
703         .addImm(FrameInfo.getStackSize() * getScratchScaleFactor(ST));
704   }
705 
706   bool NeedsFlatScratchInit =
707       MFI->getUserSGPRInfo().hasFlatScratchInit() &&
708       (MRI.isPhysRegUsed(AMDGPU::FLAT_SCR) || FrameInfo.hasCalls() ||
709        (!allStackObjectsAreDead(FrameInfo) && ST.enableFlatScratch()));
710 
711   if ((NeedsFlatScratchInit || ScratchRsrcReg) &&
712       PreloadedScratchWaveOffsetReg && !ST.flatScratchIsArchitected()) {
713     MRI.addLiveIn(PreloadedScratchWaveOffsetReg);
714     MBB.addLiveIn(PreloadedScratchWaveOffsetReg);
715   }
716 
717   if (NeedsFlatScratchInit) {
718     emitEntryFunctionFlatScratchInit(MF, MBB, I, DL, ScratchWaveOffsetReg);
719   }
720 
721   if (ScratchRsrcReg) {
722     emitEntryFunctionScratchRsrcRegSetup(MF, MBB, I, DL,
723                                          PreloadedScratchRsrcReg,
724                                          ScratchRsrcReg, ScratchWaveOffsetReg);
725   }
726 }
727 
728 // Emit scratch RSRC setup code, assuming `ScratchRsrcReg != AMDGPU::NoReg`
729 void SIFrameLowering::emitEntryFunctionScratchRsrcRegSetup(
730     MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
731     const DebugLoc &DL, Register PreloadedScratchRsrcReg,
732     Register ScratchRsrcReg, Register ScratchWaveOffsetReg) const {
733 
734   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
735   const SIInstrInfo *TII = ST.getInstrInfo();
736   const SIRegisterInfo *TRI = &TII->getRegisterInfo();
737   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
738   const Function &Fn = MF.getFunction();
739 
740   if (ST.isAmdPalOS()) {
741     // The pointer to the GIT is formed from the offset passed in and either
742     // the amdgpu-git-ptr-high function attribute or the top part of the PC
743     Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
744     Register Rsrc03 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
745 
746     buildGitPtr(MBB, I, DL, TII, Rsrc01);
747 
748     // We now have the GIT ptr - now get the scratch descriptor from the entry
749     // at offset 0 (or offset 16 for a compute shader).
750     MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
751     const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM);
752     auto *MMO = MF.getMachineMemOperand(
753         PtrInfo,
754         MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
755             MachineMemOperand::MODereferenceable,
756         16, Align(4));
757     unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0;
758     const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
759     unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset);
760     BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg)
761       .addReg(Rsrc01)
762       .addImm(EncodedOffset) // offset
763       .addImm(0) // cpol
764       .addReg(ScratchRsrcReg, RegState::ImplicitDefine)
765       .addMemOperand(MMO);
766 
767     // The driver will always set the SRD for wave 64 (bits 118:117 of
768     // descriptor / bits 22:21 of third sub-reg will be 0b11)
769     // If the shader is actually wave32 we have to modify the const_index_stride
770     // field of the descriptor 3rd sub-reg (bits 22:21) to 0b10 (stride=32). The
771     // reason the driver does this is that there can be cases where it presents
772     // 2 shaders with different wave size (e.g. VsFs).
773     // TODO: convert to using SCRATCH instructions or multiple SRD buffers
774     if (ST.isWave32()) {
775       const MCInstrDesc &SBitsetB32 = TII->get(AMDGPU::S_BITSET0_B32);
776       BuildMI(MBB, I, DL, SBitsetB32, Rsrc03)
777           .addImm(21)
778           .addReg(Rsrc03);
779     }
780   } else if (ST.isMesaGfxShader(Fn) || !PreloadedScratchRsrcReg) {
781     assert(!ST.isAmdHsaOrMesa(Fn));
782     const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
783 
784     Register Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
785     Register Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
786 
787     // Use relocations to get the pointer, and setup the other bits manually.
788     uint64_t Rsrc23 = TII->getScratchRsrcWords23();
789 
790     if (MFI->getUserSGPRInfo().hasImplicitBufferPtr()) {
791       Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
792 
793       if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
794         const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
795 
796         BuildMI(MBB, I, DL, Mov64, Rsrc01)
797           .addReg(MFI->getImplicitBufferPtrUserSGPR())
798           .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
799       } else {
800         const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
801 
802         MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
803         auto *MMO = MF.getMachineMemOperand(
804             PtrInfo,
805             MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
806                 MachineMemOperand::MODereferenceable,
807             8, Align(4));
808         BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01)
809           .addReg(MFI->getImplicitBufferPtrUserSGPR())
810           .addImm(0) // offset
811           .addImm(0) // cpol
812           .addMemOperand(MMO)
813           .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
814 
815         MF.getRegInfo().addLiveIn(MFI->getImplicitBufferPtrUserSGPR());
816         MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR());
817       }
818     } else {
819       Register Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
820       Register Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
821 
822       BuildMI(MBB, I, DL, SMovB32, Rsrc0)
823         .addExternalSymbol("SCRATCH_RSRC_DWORD0")
824         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
825 
826       BuildMI(MBB, I, DL, SMovB32, Rsrc1)
827         .addExternalSymbol("SCRATCH_RSRC_DWORD1")
828         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
829     }
830 
831     BuildMI(MBB, I, DL, SMovB32, Rsrc2)
832         .addImm(Lo_32(Rsrc23))
833         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
834 
835     BuildMI(MBB, I, DL, SMovB32, Rsrc3)
836         .addImm(Hi_32(Rsrc23))
837         .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
838   } else if (ST.isAmdHsaOrMesa(Fn)) {
839     assert(PreloadedScratchRsrcReg);
840 
841     if (ScratchRsrcReg != PreloadedScratchRsrcReg) {
842       BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
843           .addReg(PreloadedScratchRsrcReg, RegState::Kill);
844     }
845   }
846 
847   // Add the scratch wave offset into the scratch RSRC.
848   //
849   // We only want to update the first 48 bits, which is the base address
850   // pointer, without touching the adjacent 16 bits of flags. We know this add
851   // cannot carry-out from bit 47, otherwise the scratch allocation would be
852   // impossible to fit in the 48-bit global address space.
853   //
854   // TODO: Evaluate if it is better to just construct an SRD using the flat
855   // scratch init and some constants rather than update the one we are passed.
856   Register ScratchRsrcSub0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
857   Register ScratchRsrcSub1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
858 
859   // We cannot Kill ScratchWaveOffsetReg here because we allow it to be used in
860   // the kernel body via inreg arguments.
861   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), ScratchRsrcSub0)
862       .addReg(ScratchRsrcSub0)
863       .addReg(ScratchWaveOffsetReg)
864       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
865   auto Addc = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), ScratchRsrcSub1)
866       .addReg(ScratchRsrcSub1)
867       .addImm(0)
868       .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
869   Addc->getOperand(3).setIsDead(); // Mark SCC as dead.
870 }
871 
872 bool SIFrameLowering::isSupportedStackID(TargetStackID::Value ID) const {
873   switch (ID) {
874   case TargetStackID::Default:
875   case TargetStackID::NoAlloc:
876   case TargetStackID::SGPRSpill:
877     return true;
878   case TargetStackID::ScalableVector:
879   case TargetStackID::WasmLocal:
880     return false;
881   }
882   llvm_unreachable("Invalid TargetStackID::Value");
883 }
884 
885 // Activate only the inactive lanes when \p EnableInactiveLanes is true.
886 // Otherwise, activate all lanes. It returns the saved exec.
887 static Register buildScratchExecCopy(LiveRegUnits &LiveUnits,
888                                      MachineFunction &MF,
889                                      MachineBasicBlock &MBB,
890                                      MachineBasicBlock::iterator MBBI,
891                                      const DebugLoc &DL, bool IsProlog,
892                                      bool EnableInactiveLanes) {
893   Register ScratchExecCopy;
894   MachineRegisterInfo &MRI = MF.getRegInfo();
895   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
896   const SIInstrInfo *TII = ST.getInstrInfo();
897   const SIRegisterInfo &TRI = TII->getRegisterInfo();
898   SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
899 
900   initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI, IsProlog);
901 
902   ScratchExecCopy = findScratchNonCalleeSaveRegister(
903       MRI, LiveUnits, *TRI.getWaveMaskRegClass());
904   if (!ScratchExecCopy)
905     report_fatal_error("failed to find free scratch register");
906 
907   LiveUnits.addReg(ScratchExecCopy);
908 
909   const unsigned SaveExecOpc =
910       ST.isWave32() ? (EnableInactiveLanes ? AMDGPU::S_XOR_SAVEEXEC_B32
911                                            : AMDGPU::S_OR_SAVEEXEC_B32)
912                     : (EnableInactiveLanes ? AMDGPU::S_XOR_SAVEEXEC_B64
913                                            : AMDGPU::S_OR_SAVEEXEC_B64);
914   auto SaveExec =
915       BuildMI(MBB, MBBI, DL, TII->get(SaveExecOpc), ScratchExecCopy).addImm(-1);
916   SaveExec->getOperand(3).setIsDead(); // Mark SCC as dead.
917 
918   return ScratchExecCopy;
919 }
920 
921 void SIFrameLowering::emitCSRSpillStores(
922     MachineFunction &MF, MachineBasicBlock &MBB,
923     MachineBasicBlock::iterator MBBI, DebugLoc &DL, LiveRegUnits &LiveUnits,
924     Register FrameReg, Register FramePtrRegScratchCopy) const {
925   SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
926   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
927   const SIInstrInfo *TII = ST.getInstrInfo();
928   const SIRegisterInfo &TRI = TII->getRegisterInfo();
929 
930   // Spill Whole-Wave Mode VGPRs. Save only the inactive lanes of the scratch
931   // registers. However, save all lanes of callee-saved VGPRs. Due to this, we
932   // might end up flipping the EXEC bits twice.
933   Register ScratchExecCopy;
934   SmallVector<std::pair<Register, int>, 2> WWMCalleeSavedRegs, WWMScratchRegs;
935   FuncInfo->splitWWMSpillRegisters(MF, WWMCalleeSavedRegs, WWMScratchRegs);
936   if (!WWMScratchRegs.empty())
937     ScratchExecCopy =
938         buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
939                              /*IsProlog*/ true, /*EnableInactiveLanes*/ true);
940 
941   auto StoreWWMRegisters =
942       [&](SmallVectorImpl<std::pair<Register, int>> &WWMRegs) {
943         for (const auto &Reg : WWMRegs) {
944           Register VGPR = Reg.first;
945           int FI = Reg.second;
946           buildPrologSpill(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MBBI, DL,
947                            VGPR, FI, FrameReg);
948         }
949       };
950 
951   StoreWWMRegisters(WWMScratchRegs);
952   if (!WWMCalleeSavedRegs.empty()) {
953     if (ScratchExecCopy) {
954       unsigned MovOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
955       BuildMI(MBB, MBBI, DL, TII->get(MovOpc), TRI.getExec()).addImm(-1);
956     } else {
957       ScratchExecCopy = buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
958                                              /*IsProlog*/ true,
959                                              /*EnableInactiveLanes*/ false);
960     }
961   }
962 
963   StoreWWMRegisters(WWMCalleeSavedRegs);
964   if (ScratchExecCopy) {
965     // FIXME: Split block and make terminator.
966     unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
967     BuildMI(MBB, MBBI, DL, TII->get(ExecMov), TRI.getExec())
968         .addReg(ScratchExecCopy, RegState::Kill);
969     LiveUnits.addReg(ScratchExecCopy);
970   }
971 
972   Register FramePtrReg = FuncInfo->getFrameOffsetReg();
973 
974   for (const auto &Spill : FuncInfo->getPrologEpilogSGPRSpills()) {
975     // Special handle FP spill:
976     // Skip if FP is saved to a scratch SGPR, the save has already been emitted.
977     // Otherwise, FP has been moved to a temporary register and spill it
978     // instead.
979     Register Reg =
980         Spill.first == FramePtrReg ? FramePtrRegScratchCopy : Spill.first;
981     if (!Reg)
982       continue;
983 
984     PrologEpilogSGPRSpillBuilder SB(Reg, Spill.second, MBB, MBBI, DL, TII, TRI,
985                                     LiveUnits, FrameReg);
986     SB.save();
987   }
988 
989   // If a copy to scratch SGPR has been chosen for any of the SGPR spills, make
990   // such scratch registers live throughout the function.
991   SmallVector<Register, 1> ScratchSGPRs;
992   FuncInfo->getAllScratchSGPRCopyDstRegs(ScratchSGPRs);
993   if (!ScratchSGPRs.empty()) {
994     for (MachineBasicBlock &MBB : MF) {
995       for (MCPhysReg Reg : ScratchSGPRs)
996         MBB.addLiveIn(Reg);
997 
998       MBB.sortUniqueLiveIns();
999     }
1000     if (!LiveUnits.empty()) {
1001       for (MCPhysReg Reg : ScratchSGPRs)
1002         LiveUnits.addReg(Reg);
1003     }
1004   }
1005 }
1006 
1007 void SIFrameLowering::emitCSRSpillRestores(
1008     MachineFunction &MF, MachineBasicBlock &MBB,
1009     MachineBasicBlock::iterator MBBI, DebugLoc &DL, LiveRegUnits &LiveUnits,
1010     Register FrameReg, Register FramePtrRegScratchCopy) const {
1011   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1012   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1013   const SIInstrInfo *TII = ST.getInstrInfo();
1014   const SIRegisterInfo &TRI = TII->getRegisterInfo();
1015   Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1016 
1017   for (const auto &Spill : FuncInfo->getPrologEpilogSGPRSpills()) {
1018     // Special handle FP restore:
1019     // Skip if FP needs to be restored from the scratch SGPR. Otherwise, restore
1020     // the FP value to a temporary register. The frame pointer should be
1021     // overwritten only at the end when all other spills are restored from
1022     // current frame.
1023     Register Reg =
1024         Spill.first == FramePtrReg ? FramePtrRegScratchCopy : Spill.first;
1025     if (!Reg)
1026       continue;
1027 
1028     PrologEpilogSGPRSpillBuilder SB(Reg, Spill.second, MBB, MBBI, DL, TII, TRI,
1029                                     LiveUnits, FrameReg);
1030     SB.restore();
1031   }
1032 
1033   // Restore Whole-Wave Mode VGPRs. Restore only the inactive lanes of the
1034   // scratch registers. However, restore all lanes of callee-saved VGPRs. Due to
1035   // this, we might end up flipping the EXEC bits twice.
1036   Register ScratchExecCopy;
1037   SmallVector<std::pair<Register, int>, 2> WWMCalleeSavedRegs, WWMScratchRegs;
1038   FuncInfo->splitWWMSpillRegisters(MF, WWMCalleeSavedRegs, WWMScratchRegs);
1039   if (!WWMScratchRegs.empty())
1040     ScratchExecCopy =
1041         buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1042                              /*IsProlog*/ false, /*EnableInactiveLanes*/ true);
1043 
1044   auto RestoreWWMRegisters =
1045       [&](SmallVectorImpl<std::pair<Register, int>> &WWMRegs) {
1046         for (const auto &Reg : WWMRegs) {
1047           Register VGPR = Reg.first;
1048           int FI = Reg.second;
1049           buildEpilogRestore(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MBBI, DL,
1050                              VGPR, FI, FrameReg);
1051         }
1052       };
1053 
1054   RestoreWWMRegisters(WWMScratchRegs);
1055   if (!WWMCalleeSavedRegs.empty()) {
1056     if (ScratchExecCopy) {
1057       unsigned MovOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
1058       BuildMI(MBB, MBBI, DL, TII->get(MovOpc), TRI.getExec()).addImm(-1);
1059     } else {
1060       ScratchExecCopy = buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1061                                              /*IsProlog*/ false,
1062                                              /*EnableInactiveLanes*/ false);
1063     }
1064   }
1065 
1066   RestoreWWMRegisters(WWMCalleeSavedRegs);
1067   if (ScratchExecCopy) {
1068     // FIXME: Split block and make terminator.
1069     unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
1070     BuildMI(MBB, MBBI, DL, TII->get(ExecMov), TRI.getExec())
1071         .addReg(ScratchExecCopy, RegState::Kill);
1072   }
1073 }
1074 
1075 void SIFrameLowering::emitPrologue(MachineFunction &MF,
1076                                    MachineBasicBlock &MBB) const {
1077   SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1078   if (FuncInfo->isEntryFunction()) {
1079     emitEntryFunctionPrologue(MF, MBB);
1080     return;
1081   }
1082 
1083   MachineFrameInfo &MFI = MF.getFrameInfo();
1084   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1085   const SIInstrInfo *TII = ST.getInstrInfo();
1086   const SIRegisterInfo &TRI = TII->getRegisterInfo();
1087   MachineRegisterInfo &MRI = MF.getRegInfo();
1088 
1089   Register StackPtrReg = FuncInfo->getStackPtrOffsetReg();
1090   Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1091   Register BasePtrReg =
1092       TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register();
1093   LiveRegUnits LiveUnits;
1094 
1095   MachineBasicBlock::iterator MBBI = MBB.begin();
1096   // DebugLoc must be unknown since the first instruction with DebugLoc is used
1097   // to determine the end of the prologue.
1098   DebugLoc DL;
1099 
1100   if (FuncInfo->isChainFunction()) {
1101     // Functions with the amdgpu_cs_chain[_preserve] CC don't receive a SP, but
1102     // are free to set one up if they need it.
1103     bool UseSP = requiresStackPointerReference(MF);
1104     if (UseSP) {
1105       assert(StackPtrReg != AMDGPU::SP_REG);
1106 
1107       BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_MOV_B32), StackPtrReg)
1108           .addImm(MFI.getStackSize() * getScratchScaleFactor(ST));
1109     }
1110   }
1111 
1112   bool HasFP = false;
1113   bool HasBP = false;
1114   uint32_t NumBytes = MFI.getStackSize();
1115   uint32_t RoundedSize = NumBytes;
1116 
1117   if (TRI.hasStackRealignment(MF))
1118     HasFP = true;
1119 
1120   Register FramePtrRegScratchCopy;
1121   if (!HasFP && !hasFP(MF)) {
1122     // Emit the CSR spill stores with SP base register.
1123     emitCSRSpillStores(MF, MBB, MBBI, DL, LiveUnits,
1124                        FuncInfo->isChainFunction() ? Register() : StackPtrReg,
1125                        FramePtrRegScratchCopy);
1126   } else {
1127     // CSR spill stores will use FP as base register.
1128     Register SGPRForFPSaveRestoreCopy =
1129         FuncInfo->getScratchSGPRCopyDstReg(FramePtrReg);
1130 
1131     initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI, /*IsProlog*/ true);
1132     if (SGPRForFPSaveRestoreCopy) {
1133       // Copy FP to the scratch register now and emit the CFI entry. It avoids
1134       // the extra FP copy needed in the other two cases when FP is spilled to
1135       // memory or to a VGPR lane.
1136       PrologEpilogSGPRSpillBuilder SB(
1137           FramePtrReg,
1138           FuncInfo->getPrologEpilogSGPRSaveRestoreInfo(FramePtrReg), MBB, MBBI,
1139           DL, TII, TRI, LiveUnits, FramePtrReg);
1140       SB.save();
1141       LiveUnits.addReg(SGPRForFPSaveRestoreCopy);
1142     } else {
1143       // Copy FP into a new scratch register so that its previous value can be
1144       // spilled after setting up the new frame.
1145       FramePtrRegScratchCopy = findScratchNonCalleeSaveRegister(
1146           MRI, LiveUnits, AMDGPU::SReg_32_XM0_XEXECRegClass);
1147       if (!FramePtrRegScratchCopy)
1148         report_fatal_error("failed to find free scratch register");
1149 
1150       LiveUnits.addReg(FramePtrRegScratchCopy);
1151       BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrRegScratchCopy)
1152           .addReg(FramePtrReg);
1153     }
1154   }
1155 
1156   if (HasFP) {
1157     const unsigned Alignment = MFI.getMaxAlign().value();
1158 
1159     RoundedSize += Alignment;
1160     if (LiveUnits.empty()) {
1161       LiveUnits.init(TRI);
1162       LiveUnits.addLiveIns(MBB);
1163     }
1164 
1165     // s_add_i32 s33, s32, NumBytes
1166     // s_and_b32 s33, s33, 0b111...0000
1167     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_I32), FramePtrReg)
1168         .addReg(StackPtrReg)
1169         .addImm((Alignment - 1) * getScratchScaleFactor(ST))
1170         .setMIFlag(MachineInstr::FrameSetup);
1171     auto And = BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg)
1172         .addReg(FramePtrReg, RegState::Kill)
1173         .addImm(-Alignment * getScratchScaleFactor(ST))
1174         .setMIFlag(MachineInstr::FrameSetup);
1175     And->getOperand(3).setIsDead(); // Mark SCC as dead.
1176     FuncInfo->setIsStackRealigned(true);
1177   } else if ((HasFP = hasFP(MF))) {
1178     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg)
1179         .addReg(StackPtrReg)
1180         .setMIFlag(MachineInstr::FrameSetup);
1181   }
1182 
1183   // If FP is used, emit the CSR spills with FP base register.
1184   if (HasFP) {
1185     emitCSRSpillStores(MF, MBB, MBBI, DL, LiveUnits, FramePtrReg,
1186                        FramePtrRegScratchCopy);
1187     if (FramePtrRegScratchCopy)
1188       LiveUnits.removeReg(FramePtrRegScratchCopy);
1189   }
1190 
1191   // If we need a base pointer, set it up here. It's whatever the value of
1192   // the stack pointer is at this point. Any variable size objects will be
1193   // allocated after this, so we can still use the base pointer to reference
1194   // the incoming arguments.
1195   if ((HasBP = TRI.hasBasePointer(MF))) {
1196     BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg)
1197         .addReg(StackPtrReg)
1198         .setMIFlag(MachineInstr::FrameSetup);
1199   }
1200 
1201   if (HasFP && RoundedSize != 0) {
1202     auto Add = BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_I32), StackPtrReg)
1203         .addReg(StackPtrReg)
1204         .addImm(RoundedSize * getScratchScaleFactor(ST))
1205         .setMIFlag(MachineInstr::FrameSetup);
1206     Add->getOperand(3).setIsDead(); // Mark SCC as dead.
1207   }
1208 
1209   bool FPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(FramePtrReg);
1210   (void)FPSaved;
1211   assert((!HasFP || FPSaved) &&
1212          "Needed to save FP but didn't save it anywhere");
1213 
1214   // If we allow spilling to AGPRs we may have saved FP but then spill
1215   // everything into AGPRs instead of the stack.
1216   assert((HasFP || !FPSaved || EnableSpillVGPRToAGPR) &&
1217          "Saved FP but didn't need it");
1218 
1219   bool BPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(BasePtrReg);
1220   (void)BPSaved;
1221   assert((!HasBP || BPSaved) &&
1222          "Needed to save BP but didn't save it anywhere");
1223 
1224   assert((HasBP || !BPSaved) && "Saved BP but didn't need it");
1225 }
1226 
1227 void SIFrameLowering::emitEpilogue(MachineFunction &MF,
1228                                    MachineBasicBlock &MBB) const {
1229   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1230   if (FuncInfo->isEntryFunction())
1231     return;
1232 
1233   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1234   const SIInstrInfo *TII = ST.getInstrInfo();
1235   const SIRegisterInfo &TRI = TII->getRegisterInfo();
1236   MachineRegisterInfo &MRI = MF.getRegInfo();
1237   LiveRegUnits LiveUnits;
1238   // Get the insert location for the epilogue. If there were no terminators in
1239   // the block, get the last instruction.
1240   MachineBasicBlock::iterator MBBI = MBB.end();
1241   DebugLoc DL;
1242   if (!MBB.empty()) {
1243     MBBI = MBB.getLastNonDebugInstr();
1244     if (MBBI != MBB.end())
1245       DL = MBBI->getDebugLoc();
1246 
1247     MBBI = MBB.getFirstTerminator();
1248   }
1249 
1250   const MachineFrameInfo &MFI = MF.getFrameInfo();
1251   uint32_t NumBytes = MFI.getStackSize();
1252   uint32_t RoundedSize = FuncInfo->isStackRealigned()
1253                              ? NumBytes + MFI.getMaxAlign().value()
1254                              : NumBytes;
1255   const Register StackPtrReg = FuncInfo->getStackPtrOffsetReg();
1256   Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1257   bool FPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(FramePtrReg);
1258 
1259   if (RoundedSize != 0) {
1260     if (TRI.hasBasePointer(MF)) {
1261       BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), StackPtrReg)
1262           .addReg(TRI.getBaseRegister())
1263           .setMIFlag(MachineInstr::FrameDestroy);
1264     } else if (hasFP(MF)) {
1265       BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), StackPtrReg)
1266           .addReg(FramePtrReg)
1267           .setMIFlag(MachineInstr::FrameDestroy);
1268     }
1269   }
1270 
1271   Register FramePtrRegScratchCopy;
1272   Register SGPRForFPSaveRestoreCopy =
1273       FuncInfo->getScratchSGPRCopyDstReg(FramePtrReg);
1274   if (FPSaved) {
1275     // CSR spill restores should use FP as base register. If
1276     // SGPRForFPSaveRestoreCopy is not true, restore the previous value of FP
1277     // into a new scratch register and copy to FP later when other registers are
1278     // restored from the current stack frame.
1279     initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI, /*IsProlog*/ false);
1280     if (SGPRForFPSaveRestoreCopy) {
1281       LiveUnits.addReg(SGPRForFPSaveRestoreCopy);
1282     } else {
1283       FramePtrRegScratchCopy = findScratchNonCalleeSaveRegister(
1284           MRI, LiveUnits, AMDGPU::SReg_32_XM0_XEXECRegClass);
1285       if (!FramePtrRegScratchCopy)
1286         report_fatal_error("failed to find free scratch register");
1287 
1288       LiveUnits.addReg(FramePtrRegScratchCopy);
1289     }
1290 
1291     emitCSRSpillRestores(MF, MBB, MBBI, DL, LiveUnits, FramePtrReg,
1292                          FramePtrRegScratchCopy);
1293   }
1294 
1295   if (FPSaved) {
1296     // Insert the copy to restore FP.
1297     Register SrcReg = SGPRForFPSaveRestoreCopy ? SGPRForFPSaveRestoreCopy
1298                                                : FramePtrRegScratchCopy;
1299     MachineInstrBuilder MIB =
1300         BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg)
1301             .addReg(SrcReg);
1302     if (SGPRForFPSaveRestoreCopy)
1303       MIB.setMIFlag(MachineInstr::FrameDestroy);
1304   } else {
1305     // Insert the CSR spill restores with SP as the base register.
1306     emitCSRSpillRestores(MF, MBB, MBBI, DL, LiveUnits,
1307                          FuncInfo->isChainFunction() ? Register() : StackPtrReg,
1308                          FramePtrRegScratchCopy);
1309   }
1310 }
1311 
1312 #ifndef NDEBUG
1313 static bool allSGPRSpillsAreDead(const MachineFunction &MF) {
1314   const MachineFrameInfo &MFI = MF.getFrameInfo();
1315   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1316   for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
1317        I != E; ++I) {
1318     if (!MFI.isDeadObjectIndex(I) &&
1319         MFI.getStackID(I) == TargetStackID::SGPRSpill &&
1320         !FuncInfo->checkIndexInPrologEpilogSGPRSpills(I)) {
1321       return false;
1322     }
1323   }
1324 
1325   return true;
1326 }
1327 #endif
1328 
1329 StackOffset SIFrameLowering::getFrameIndexReference(const MachineFunction &MF,
1330                                                     int FI,
1331                                                     Register &FrameReg) const {
1332   const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
1333 
1334   FrameReg = RI->getFrameRegister(MF);
1335   return StackOffset::getFixed(MF.getFrameInfo().getObjectOffset(FI));
1336 }
1337 
1338 void SIFrameLowering::processFunctionBeforeFrameFinalized(
1339   MachineFunction &MF,
1340   RegScavenger *RS) const {
1341   MachineFrameInfo &MFI = MF.getFrameInfo();
1342 
1343   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1344   const SIInstrInfo *TII = ST.getInstrInfo();
1345   const SIRegisterInfo *TRI = ST.getRegisterInfo();
1346   MachineRegisterInfo &MRI = MF.getRegInfo();
1347   SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1348 
1349   const bool SpillVGPRToAGPR = ST.hasMAIInsts() && FuncInfo->hasSpilledVGPRs()
1350                                && EnableSpillVGPRToAGPR;
1351 
1352   if (SpillVGPRToAGPR) {
1353     // To track the spill frame indices handled in this pass.
1354     BitVector SpillFIs(MFI.getObjectIndexEnd(), false);
1355     BitVector NonVGPRSpillFIs(MFI.getObjectIndexEnd(), false);
1356 
1357     bool SeenDbgInstr = false;
1358 
1359     for (MachineBasicBlock &MBB : MF) {
1360       for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
1361         int FrameIndex;
1362         if (MI.isDebugInstr())
1363           SeenDbgInstr = true;
1364 
1365         if (TII->isVGPRSpill(MI)) {
1366           // Try to eliminate stack used by VGPR spills before frame
1367           // finalization.
1368           unsigned FIOp = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1369                                                      AMDGPU::OpName::vaddr);
1370           int FI = MI.getOperand(FIOp).getIndex();
1371           Register VReg =
1372             TII->getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
1373           if (FuncInfo->allocateVGPRSpillToAGPR(MF, FI,
1374                                                 TRI->isAGPR(MRI, VReg))) {
1375             assert(RS != nullptr);
1376             RS->enterBasicBlockEnd(MBB);
1377             RS->backward(std::next(MI.getIterator()));
1378             TRI->eliminateFrameIndex(MI, 0, FIOp, RS);
1379             SpillFIs.set(FI);
1380             continue;
1381           }
1382         } else if (TII->isStoreToStackSlot(MI, FrameIndex) ||
1383                    TII->isLoadFromStackSlot(MI, FrameIndex))
1384           if (!MFI.isFixedObjectIndex(FrameIndex))
1385             NonVGPRSpillFIs.set(FrameIndex);
1386       }
1387     }
1388 
1389     // Stack slot coloring may assign different objects to the same stack slot.
1390     // If not, then the VGPR to AGPR spill slot is dead.
1391     for (unsigned FI : SpillFIs.set_bits())
1392       if (!NonVGPRSpillFIs.test(FI))
1393         FuncInfo->setVGPRToAGPRSpillDead(FI);
1394 
1395     for (MachineBasicBlock &MBB : MF) {
1396       for (MCPhysReg Reg : FuncInfo->getVGPRSpillAGPRs())
1397         MBB.addLiveIn(Reg);
1398 
1399       for (MCPhysReg Reg : FuncInfo->getAGPRSpillVGPRs())
1400         MBB.addLiveIn(Reg);
1401 
1402       MBB.sortUniqueLiveIns();
1403 
1404       if (!SpillFIs.empty() && SeenDbgInstr) {
1405         // FIXME: The dead frame indices are replaced with a null register from
1406         // the debug value instructions. We should instead, update it with the
1407         // correct register value. But not sure the register value alone is
1408         for (MachineInstr &MI : MBB) {
1409           if (MI.isDebugValue()) {
1410             uint32_t StackOperandIdx = MI.isDebugValueList() ? 2 : 0;
1411             if (MI.getOperand(StackOperandIdx).isFI() &&
1412                 !MFI.isFixedObjectIndex(
1413                     MI.getOperand(StackOperandIdx).getIndex()) &&
1414                 SpillFIs[MI.getOperand(StackOperandIdx).getIndex()]) {
1415               MI.getOperand(StackOperandIdx)
1416                   .ChangeToRegister(Register(), false /*isDef*/);
1417             }
1418           }
1419         }
1420       }
1421     }
1422   }
1423 
1424   // At this point we've already allocated all spilled SGPRs to VGPRs if we
1425   // can. Any remaining SGPR spills will go to memory, so move them back to the
1426   // default stack.
1427   bool HaveSGPRToVMemSpill =
1428       FuncInfo->removeDeadFrameIndices(MFI, /*ResetSGPRSpillStackIDs*/ true);
1429   assert(allSGPRSpillsAreDead(MF) &&
1430          "SGPR spill should have been removed in SILowerSGPRSpills");
1431 
1432   // FIXME: The other checks should be redundant with allStackObjectsAreDead,
1433   // but currently hasNonSpillStackObjects is set only from source
1434   // allocas. Stack temps produced from legalization are not counted currently.
1435   if (!allStackObjectsAreDead(MFI)) {
1436     assert(RS && "RegScavenger required if spilling");
1437 
1438     // Add an emergency spill slot
1439     RS->addScavengingFrameIndex(FuncInfo->getScavengeFI(MFI, *TRI));
1440 
1441     // If we are spilling SGPRs to memory with a large frame, we may need a
1442     // second VGPR emergency frame index.
1443     if (HaveSGPRToVMemSpill &&
1444         allocateScavengingFrameIndexesNearIncomingSP(MF)) {
1445       RS->addScavengingFrameIndex(MFI.CreateSpillStackObject(4, Align(4)));
1446     }
1447   }
1448 }
1449 
1450 void SIFrameLowering::processFunctionBeforeFrameIndicesReplaced(
1451     MachineFunction &MF, RegScavenger *RS) const {
1452   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1453   const SIRegisterInfo *TRI = ST.getRegisterInfo();
1454   MachineRegisterInfo &MRI = MF.getRegInfo();
1455   SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1456 
1457   if (ST.hasMAIInsts() && !ST.hasGFX90AInsts()) {
1458     // On gfx908, we had initially reserved highest available VGPR for AGPR
1459     // copy. Now since we are done with RA, check if there exist an unused VGPR
1460     // which is lower than the eariler reserved VGPR before RA. If one exist,
1461     // use it for AGPR copy instead of one reserved before RA.
1462     Register VGPRForAGPRCopy = FuncInfo->getVGPRForAGPRCopy();
1463     Register UnusedLowVGPR =
1464         TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF);
1465     if (UnusedLowVGPR && (TRI->getHWRegIndex(UnusedLowVGPR) <
1466                           TRI->getHWRegIndex(VGPRForAGPRCopy))) {
1467       // Reserve this newly identified VGPR (for AGPR copy)
1468       // reserved registers should already be frozen at this point
1469       // so we can avoid calling MRI.freezeReservedRegs and just use
1470       // MRI.reserveReg
1471       FuncInfo->setVGPRForAGPRCopy(UnusedLowVGPR);
1472       MRI.reserveReg(UnusedLowVGPR, TRI);
1473     }
1474   }
1475   // We initally reserved the highest available SGPR pair for long branches
1476   // now, after RA, we shift down to a lower unused one if one exists
1477   Register LongBranchReservedReg = FuncInfo->getLongBranchReservedReg();
1478   Register UnusedLowSGPR =
1479       TRI->findUnusedRegister(MRI, &AMDGPU::SGPR_64RegClass, MF);
1480   // If LongBranchReservedReg is null then we didn't find a long branch
1481   // and never reserved a register to begin with so there is nothing to
1482   // shift down. Then if UnusedLowSGPR is null, there isn't available lower
1483   // register to use so just keep the original one we set.
1484   if (LongBranchReservedReg && UnusedLowSGPR) {
1485     FuncInfo->setLongBranchReservedReg(UnusedLowSGPR);
1486     MRI.reserveReg(UnusedLowSGPR, TRI);
1487   }
1488 }
1489 
1490 // The special SGPR spills like the one needed for FP, BP or any reserved
1491 // registers delayed until frame lowering.
1492 void SIFrameLowering::determinePrologEpilogSGPRSaves(
1493     MachineFunction &MF, BitVector &SavedVGPRs,
1494     bool NeedExecCopyReservedReg) const {
1495   MachineFrameInfo &FrameInfo = MF.getFrameInfo();
1496   MachineRegisterInfo &MRI = MF.getRegInfo();
1497   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1498   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1499   const SIRegisterInfo *TRI = ST.getRegisterInfo();
1500   LiveRegUnits LiveUnits;
1501   LiveUnits.init(*TRI);
1502   // Initially mark callee saved registers as used so we will not choose them
1503   // while looking for scratch SGPRs.
1504   const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs();
1505   for (unsigned I = 0; CSRegs[I]; ++I)
1506     LiveUnits.addReg(CSRegs[I]);
1507 
1508   const TargetRegisterClass &RC = *TRI->getWaveMaskRegClass();
1509 
1510   Register ReservedRegForExecCopy = MFI->getSGPRForEXECCopy();
1511   if (NeedExecCopyReservedReg ||
1512       (ReservedRegForExecCopy &&
1513        MRI.isPhysRegUsed(ReservedRegForExecCopy, /*SkipRegMaskTest=*/true))) {
1514     MRI.reserveReg(ReservedRegForExecCopy, TRI);
1515     Register UnusedScratchReg = findUnusedRegister(MRI, LiveUnits, RC);
1516     if (UnusedScratchReg) {
1517       // If found any unused scratch SGPR, reserve the register itself for Exec
1518       // copy and there is no need for any spills in that case.
1519       MFI->setSGPRForEXECCopy(UnusedScratchReg);
1520       MRI.replaceRegWith(ReservedRegForExecCopy, UnusedScratchReg);
1521       LiveUnits.addReg(UnusedScratchReg);
1522     } else {
1523       // Needs spill.
1524       assert(!MFI->hasPrologEpilogSGPRSpillEntry(ReservedRegForExecCopy) &&
1525              "Re-reserving spill slot for EXEC copy register");
1526       getVGPRSpillLaneOrTempRegister(MF, LiveUnits, ReservedRegForExecCopy, RC,
1527                                      /*IncludeScratchCopy=*/false);
1528     }
1529   } else if (ReservedRegForExecCopy) {
1530     // Reset it at this point. There are no whole-wave copies and spills
1531     // encountered.
1532     MFI->setSGPRForEXECCopy(AMDGPU::NoRegister);
1533   }
1534 
1535   // hasFP only knows about stack objects that already exist. We're now
1536   // determining the stack slots that will be created, so we have to predict
1537   // them. Stack objects force FP usage with calls.
1538   //
1539   // Note a new VGPR CSR may be introduced if one is used for the spill, but we
1540   // don't want to report it here.
1541   //
1542   // FIXME: Is this really hasReservedCallFrame?
1543   const bool WillHaveFP =
1544       FrameInfo.hasCalls() &&
1545       (SavedVGPRs.any() || !allStackObjectsAreDead(FrameInfo));
1546 
1547   if (WillHaveFP || hasFP(MF)) {
1548     Register FramePtrReg = MFI->getFrameOffsetReg();
1549     assert(!MFI->hasPrologEpilogSGPRSpillEntry(FramePtrReg) &&
1550            "Re-reserving spill slot for FP");
1551     getVGPRSpillLaneOrTempRegister(MF, LiveUnits, FramePtrReg);
1552   }
1553 
1554   if (TRI->hasBasePointer(MF)) {
1555     Register BasePtrReg = TRI->getBaseRegister();
1556     assert(!MFI->hasPrologEpilogSGPRSpillEntry(BasePtrReg) &&
1557            "Re-reserving spill slot for BP");
1558     getVGPRSpillLaneOrTempRegister(MF, LiveUnits, BasePtrReg);
1559   }
1560 }
1561 
1562 // Only report VGPRs to generic code.
1563 void SIFrameLowering::determineCalleeSaves(MachineFunction &MF,
1564                                            BitVector &SavedVGPRs,
1565                                            RegScavenger *RS) const {
1566   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1567 
1568   // If this is a function with the amdgpu_cs_chain[_preserve] calling
1569   // convention and it doesn't contain any calls to llvm.amdgcn.cs.chain, then
1570   // we don't need to save and restore anything.
1571   if (MFI->isChainFunction() && !MF.getFrameInfo().hasTailCall())
1572     return;
1573 
1574   TargetFrameLowering::determineCalleeSaves(MF, SavedVGPRs, RS);
1575 
1576   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1577   const SIRegisterInfo *TRI = ST.getRegisterInfo();
1578   const SIInstrInfo *TII = ST.getInstrInfo();
1579   bool NeedExecCopyReservedReg = false;
1580 
1581   MachineInstr *ReturnMI = nullptr;
1582   for (MachineBasicBlock &MBB : MF) {
1583     for (MachineInstr &MI : MBB) {
1584       // TODO: Walking through all MBBs here would be a bad heuristic. Better
1585       // handle them elsewhere.
1586       if (TII->isWWMRegSpillOpcode(MI.getOpcode()))
1587         NeedExecCopyReservedReg = true;
1588       else if (MI.getOpcode() == AMDGPU::SI_RETURN ||
1589                MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
1590                (MFI->isChainFunction() &&
1591                 TII->isChainCallOpcode(MI.getOpcode()))) {
1592         // We expect all return to be the same size.
1593         assert(!ReturnMI ||
1594                (count_if(MI.operands(), [](auto Op) { return Op.isReg(); }) ==
1595                 count_if(ReturnMI->operands(), [](auto Op) { return Op.isReg(); })));
1596         ReturnMI = &MI;
1597       }
1598     }
1599   }
1600 
1601   SmallVector<Register> SortedWWMVGPRs;
1602   for (Register Reg : MFI->getWWMReservedRegs()) {
1603     // The shift-back is needed only for the VGPRs used for SGPR spills and they
1604     // are of 32-bit size. SIPreAllocateWWMRegs pass can add tuples into WWM
1605     // reserved registers.
1606     const TargetRegisterClass *RC = TRI->getPhysRegBaseClass(Reg);
1607     if (TRI->getRegSizeInBits(*RC) > 32)
1608       continue;
1609     SortedWWMVGPRs.push_back(Reg);
1610   }
1611 
1612   sort(SortedWWMVGPRs, std::greater<Register>());
1613   MFI->shiftWwmVGPRsToLowestRange(MF, SortedWWMVGPRs, SavedVGPRs);
1614 
1615   if (MFI->isEntryFunction())
1616     return;
1617 
1618   // Remove any VGPRs used in the return value because these do not need to be saved.
1619   // This prevents CSR restore from clobbering return VGPRs.
1620   if (ReturnMI) {
1621     for (auto &Op : ReturnMI->operands()) {
1622       if (Op.isReg())
1623         SavedVGPRs.reset(Op.getReg());
1624     }
1625   }
1626 
1627   // Create the stack objects for WWM registers now.
1628   for (Register Reg : MFI->getWWMReservedRegs()) {
1629     const TargetRegisterClass *RC = TRI->getPhysRegBaseClass(Reg);
1630     MFI->allocateWWMSpill(MF, Reg, TRI->getSpillSize(*RC),
1631                           TRI->getSpillAlign(*RC));
1632   }
1633 
1634   // Ignore the SGPRs the default implementation found.
1635   SavedVGPRs.clearBitsNotInMask(TRI->getAllVectorRegMask());
1636 
1637   // Do not save AGPRs prior to GFX90A because there was no easy way to do so.
1638   // In gfx908 there was do AGPR loads and stores and thus spilling also
1639   // require a temporary VGPR.
1640   if (!ST.hasGFX90AInsts())
1641     SavedVGPRs.clearBitsInMask(TRI->getAllAGPRRegMask());
1642 
1643   determinePrologEpilogSGPRSaves(MF, SavedVGPRs, NeedExecCopyReservedReg);
1644 
1645   // The Whole-Wave VGPRs need to be specially inserted in the prolog, so don't
1646   // allow the default insertion to handle them.
1647   for (auto &Reg : MFI->getWWMSpills())
1648     SavedVGPRs.reset(Reg.first);
1649 }
1650 
1651 void SIFrameLowering::determineCalleeSavesSGPR(MachineFunction &MF,
1652                                                BitVector &SavedRegs,
1653                                                RegScavenger *RS) const {
1654   TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1655   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1656   if (MFI->isEntryFunction())
1657     return;
1658 
1659   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1660   const SIRegisterInfo *TRI = ST.getRegisterInfo();
1661 
1662   // The SP is specifically managed and we don't want extra spills of it.
1663   SavedRegs.reset(MFI->getStackPtrOffsetReg());
1664 
1665   const BitVector AllSavedRegs = SavedRegs;
1666   SavedRegs.clearBitsInMask(TRI->getAllVectorRegMask());
1667 
1668   // We have to anticipate introducing CSR VGPR spills or spill of caller
1669   // save VGPR reserved for SGPR spills as we now always create stack entry
1670   // for it, if we don't have any stack objects already, since we require a FP
1671   // if there is a call and stack. We will allocate a VGPR for SGPR spills if
1672   // there are any SGPR spills. Whether they are CSR spills or otherwise.
1673   MachineFrameInfo &FrameInfo = MF.getFrameInfo();
1674   const bool WillHaveFP =
1675       FrameInfo.hasCalls() && (AllSavedRegs.any() || MFI->hasSpilledSGPRs());
1676 
1677   // FP will be specially managed like SP.
1678   if (WillHaveFP || hasFP(MF))
1679     SavedRegs.reset(MFI->getFrameOffsetReg());
1680 
1681   // Return address use with return instruction is hidden through the SI_RETURN
1682   // pseudo. Given that and since the IPRA computes actual register usage and
1683   // does not use CSR list, the clobbering of return address by function calls
1684   // (D117243) or otherwise (D120922) is ignored/not seen by the IPRA's register
1685   // usage collection. This will ensure save/restore of return address happens
1686   // in those scenarios.
1687   const MachineRegisterInfo &MRI = MF.getRegInfo();
1688   Register RetAddrReg = TRI->getReturnAddressReg(MF);
1689   if (!MFI->isEntryFunction() &&
1690       (FrameInfo.hasCalls() || MRI.isPhysRegModified(RetAddrReg))) {
1691     SavedRegs.set(TRI->getSubReg(RetAddrReg, AMDGPU::sub0));
1692     SavedRegs.set(TRI->getSubReg(RetAddrReg, AMDGPU::sub1));
1693   }
1694 }
1695 
1696 bool SIFrameLowering::assignCalleeSavedSpillSlots(
1697     MachineFunction &MF, const TargetRegisterInfo *TRI,
1698     std::vector<CalleeSavedInfo> &CSI) const {
1699   if (CSI.empty())
1700     return true; // Early exit if no callee saved registers are modified!
1701 
1702   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1703   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1704   const SIRegisterInfo *RI = ST.getRegisterInfo();
1705   Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1706   Register BasePtrReg = RI->getBaseRegister();
1707   Register SGPRForFPSaveRestoreCopy =
1708       FuncInfo->getScratchSGPRCopyDstReg(FramePtrReg);
1709   Register SGPRForBPSaveRestoreCopy =
1710       FuncInfo->getScratchSGPRCopyDstReg(BasePtrReg);
1711   if (!SGPRForFPSaveRestoreCopy && !SGPRForBPSaveRestoreCopy)
1712     return false;
1713 
1714   unsigned NumModifiedRegs = 0;
1715 
1716   if (SGPRForFPSaveRestoreCopy)
1717     NumModifiedRegs++;
1718   if (SGPRForBPSaveRestoreCopy)
1719     NumModifiedRegs++;
1720 
1721   for (auto &CS : CSI) {
1722     if (CS.getReg() == FramePtrReg && SGPRForFPSaveRestoreCopy) {
1723       CS.setDstReg(SGPRForFPSaveRestoreCopy);
1724       if (--NumModifiedRegs)
1725         break;
1726     } else if (CS.getReg() == BasePtrReg && SGPRForBPSaveRestoreCopy) {
1727       CS.setDstReg(SGPRForBPSaveRestoreCopy);
1728       if (--NumModifiedRegs)
1729         break;
1730     }
1731   }
1732 
1733   return false;
1734 }
1735 
1736 bool SIFrameLowering::allocateScavengingFrameIndexesNearIncomingSP(
1737   const MachineFunction &MF) const {
1738 
1739   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1740   const MachineFrameInfo &MFI = MF.getFrameInfo();
1741   const SIInstrInfo *TII = ST.getInstrInfo();
1742   uint64_t EstStackSize = MFI.estimateStackSize(MF);
1743   uint64_t MaxOffset = EstStackSize - 1;
1744 
1745   // We need the emergency stack slots to be allocated in range of the
1746   // MUBUF/flat scratch immediate offset from the base register, so assign these
1747   // first at the incoming SP position.
1748   //
1749   // TODO: We could try sorting the objects to find a hole in the first bytes
1750   // rather than allocating as close to possible. This could save a lot of space
1751   // on frames with alignment requirements.
1752   if (ST.enableFlatScratch()) {
1753     if (TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS,
1754                                SIInstrFlags::FlatScratch))
1755       return false;
1756   } else {
1757     if (TII->isLegalMUBUFImmOffset(MaxOffset))
1758       return false;
1759   }
1760 
1761   return true;
1762 }
1763 
1764 MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr(
1765   MachineFunction &MF,
1766   MachineBasicBlock &MBB,
1767   MachineBasicBlock::iterator I) const {
1768   int64_t Amount = I->getOperand(0).getImm();
1769   if (Amount == 0)
1770     return MBB.erase(I);
1771 
1772   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1773   const SIInstrInfo *TII = ST.getInstrInfo();
1774   const DebugLoc &DL = I->getDebugLoc();
1775   unsigned Opc = I->getOpcode();
1776   bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
1777   uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
1778 
1779   if (!hasReservedCallFrame(MF)) {
1780     Amount = alignTo(Amount, getStackAlign());
1781     assert(isUInt<32>(Amount) && "exceeded stack address space size");
1782     const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1783     Register SPReg = MFI->getStackPtrOffsetReg();
1784 
1785     Amount *= getScratchScaleFactor(ST);
1786     if (IsDestroy)
1787       Amount = -Amount;
1788     auto Add = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), SPReg)
1789         .addReg(SPReg)
1790         .addImm(Amount);
1791     Add->getOperand(3).setIsDead(); // Mark SCC as dead.
1792   } else if (CalleePopAmount != 0) {
1793     llvm_unreachable("is this used?");
1794   }
1795 
1796   return MBB.erase(I);
1797 }
1798 
1799 /// Returns true if the frame will require a reference to the stack pointer.
1800 ///
1801 /// This is the set of conditions common to setting up the stack pointer in a
1802 /// kernel, and for using a frame pointer in a callable function.
1803 ///
1804 /// FIXME: Should also check hasOpaqueSPAdjustment and if any inline asm
1805 /// references SP.
1806 static bool frameTriviallyRequiresSP(const MachineFrameInfo &MFI) {
1807   return MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint();
1808 }
1809 
1810 // The FP for kernels is always known 0, so we never really need to setup an
1811 // explicit register for it. However, DisableFramePointerElim will force us to
1812 // use a register for it.
1813 bool SIFrameLowering::hasFPImpl(const MachineFunction &MF) const {
1814   const MachineFrameInfo &MFI = MF.getFrameInfo();
1815 
1816   // For entry & chain functions we can use an immediate offset in most cases,
1817   // so the presence of calls doesn't imply we need a distinct frame pointer.
1818   if (MFI.hasCalls() &&
1819       !MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() &&
1820       !MF.getInfo<SIMachineFunctionInfo>()->isChainFunction()) {
1821     // All offsets are unsigned, so need to be addressed in the same direction
1822     // as stack growth.
1823 
1824     // FIXME: This function is pretty broken, since it can be called before the
1825     // frame layout is determined or CSR spills are inserted.
1826     return MFI.getStackSize() != 0;
1827   }
1828 
1829   return frameTriviallyRequiresSP(MFI) || MFI.isFrameAddressTaken() ||
1830          MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->hasStackRealignment(
1831              MF) ||
1832          MF.getTarget().Options.DisableFramePointerElim(MF);
1833 }
1834 
1835 // This is essentially a reduced version of hasFP for entry functions. Since the
1836 // stack pointer is known 0 on entry to kernels, we never really need an FP
1837 // register. We may need to initialize the stack pointer depending on the frame
1838 // properties, which logically overlaps many of the cases where an ordinary
1839 // function would require an FP.
1840 // Also used for chain functions. While not technically entry functions, chain
1841 // functions may need to set up a stack pointer in some situations.
1842 bool SIFrameLowering::requiresStackPointerReference(
1843     const MachineFunction &MF) const {
1844   // Callable functions always require a stack pointer reference.
1845   assert((MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() ||
1846           MF.getInfo<SIMachineFunctionInfo>()->isChainFunction()) &&
1847          "only expected to call this for entry points and chain functions");
1848 
1849   const MachineFrameInfo &MFI = MF.getFrameInfo();
1850 
1851   // Entry points ordinarily don't need to initialize SP. We have to set it up
1852   // for callees if there are any. Also note tail calls are impossible/don't
1853   // make any sense for kernels.
1854   if (MFI.hasCalls())
1855     return true;
1856 
1857   // We still need to initialize the SP if we're doing anything weird that
1858   // references the SP, like variable sized stack objects.
1859   return frameTriviallyRequiresSP(MFI);
1860 }
1861