xref: /llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp (revision b1d42465fc1485d46b4727e6830272f369fb6cb5)
1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPUCallLowering.h"
16 #include "AMDGPU.h"
17 #include "AMDGPULegalizerInfo.h"
18 #include "SIMachineFunctionInfo.h"
19 #include "SIRegisterInfo.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/FunctionLoweringInfo.h"
22 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/IR/IntrinsicsAMDGPU.h"
25 
26 #define DEBUG_TYPE "amdgpu-call-lowering"
27 
28 using namespace llvm;
29 
30 namespace {
31 
32 /// Wrapper around extendRegister to ensure we extend to a full 32-bit register.
33 static Register extendRegisterMin32(CallLowering::ValueHandler &Handler,
34                                     Register ValVReg, const CCValAssign &VA) {
35   if (VA.getLocVT().getSizeInBits() < 32) {
36     // 16-bit types are reported as legal for 32-bit registers. We need to
37     // extend and do a 32-bit copy to avoid the verifier complaining about it.
38     return Handler.MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0);
39   }
40 
41   return Handler.extendRegister(ValVReg, VA);
42 }
43 
44 struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
45   AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
46                              MachineInstrBuilder MIB)
47       : OutgoingValueHandler(B, MRI), MIB(MIB) {}
48 
49   MachineInstrBuilder MIB;
50 
51   Register getStackAddress(uint64_t Size, int64_t Offset,
52                            MachinePointerInfo &MPO,
53                            ISD::ArgFlagsTy Flags) override {
54     llvm_unreachable("not implemented");
55   }
56 
57   void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
58                             const MachinePointerInfo &MPO,
59                             const CCValAssign &VA) override {
60     llvm_unreachable("not implemented");
61   }
62 
63   void assignValueToReg(Register ValVReg, Register PhysReg,
64                         const CCValAssign &VA) override {
65     Register ExtReg = extendRegisterMin32(*this, ValVReg, VA);
66 
67     // If this is a scalar return, insert a readfirstlane just in case the value
68     // ends up in a VGPR.
69     // FIXME: Assert this is a shader return.
70     const SIRegisterInfo *TRI
71       = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
72     if (TRI->isSGPRReg(MRI, PhysReg)) {
73       LLT Ty = MRI.getType(ExtReg);
74       LLT S32 = LLT::scalar(32);
75       if (Ty != S32) {
76         // FIXME: We should probably support readfirstlane intrinsics with all
77         // legal 32-bit types.
78         assert(Ty.getSizeInBits() == 32);
79         if (Ty.isPointer())
80           ExtReg = MIRBuilder.buildPtrToInt(S32, ExtReg).getReg(0);
81         else
82           ExtReg = MIRBuilder.buildBitcast(S32, ExtReg).getReg(0);
83       }
84 
85       auto ToSGPR = MIRBuilder
86                         .buildIntrinsic(Intrinsic::amdgcn_readfirstlane,
87                                         {MRI.getType(ExtReg)})
88                         .addReg(ExtReg);
89       ExtReg = ToSGPR.getReg(0);
90     }
91 
92     MIRBuilder.buildCopy(PhysReg, ExtReg);
93     MIB.addUse(PhysReg, RegState::Implicit);
94   }
95 };
96 
97 struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler {
98   uint64_t StackUsed = 0;
99 
100   AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
101       : IncomingValueHandler(B, MRI) {}
102 
103   Register getStackAddress(uint64_t Size, int64_t Offset,
104                            MachinePointerInfo &MPO,
105                            ISD::ArgFlagsTy Flags) override {
106     auto &MFI = MIRBuilder.getMF().getFrameInfo();
107 
108     // Byval is assumed to be writable memory, but other stack passed arguments
109     // are not.
110     const bool IsImmutable = !Flags.isByVal();
111     int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable);
112     MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
113     auto AddrReg = MIRBuilder.buildFrameIndex(
114         LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI);
115     StackUsed = std::max(StackUsed, Size + Offset);
116     return AddrReg.getReg(0);
117   }
118 
119   void assignValueToReg(Register ValVReg, Register PhysReg,
120                         const CCValAssign &VA) override {
121     markPhysRegUsed(PhysReg);
122 
123     if (VA.getLocVT().getSizeInBits() < 32) {
124       // 16-bit types are reported as legal for 32-bit registers. We need to do
125       // a 32-bit copy, and truncate to avoid the verifier complaining about it.
126       auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg);
127 
128       // If we have signext/zeroext, it applies to the whole 32-bit register
129       // before truncation.
130       auto Extended =
131           buildExtensionHint(VA, Copy.getReg(0), LLT(VA.getLocVT()));
132       MIRBuilder.buildTrunc(ValVReg, Extended);
133       return;
134     }
135 
136     IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
137   }
138 
139   void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
140                             const MachinePointerInfo &MPO,
141                             const CCValAssign &VA) override {
142     MachineFunction &MF = MIRBuilder.getMF();
143 
144     auto *MMO = MF.getMachineMemOperand(
145         MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemTy,
146         inferAlignFromPtrInfo(MF, MPO));
147     MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
148   }
149 
150   /// How the physical register gets marked varies between formal
151   /// parameters (it's a basic-block live-in), and a call instruction
152   /// (it's an implicit-def of the BL).
153   virtual void markPhysRegUsed(unsigned PhysReg) = 0;
154 };
155 
156 struct FormalArgHandler : public AMDGPUIncomingArgHandler {
157   FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
158       : AMDGPUIncomingArgHandler(B, MRI) {}
159 
160   void markPhysRegUsed(unsigned PhysReg) override {
161     MIRBuilder.getMBB().addLiveIn(PhysReg);
162   }
163 };
164 
165 struct CallReturnHandler : public AMDGPUIncomingArgHandler {
166   CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
167                     MachineInstrBuilder MIB)
168       : AMDGPUIncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}
169 
170   void markPhysRegUsed(unsigned PhysReg) override {
171     MIB.addDef(PhysReg, RegState::Implicit);
172   }
173 
174   MachineInstrBuilder MIB;
175 };
176 
177 struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler {
178   /// For tail calls, the byte offset of the call's argument area from the
179   /// callee's. Unused elsewhere.
180   int FPDiff;
181 
182   // Cache the SP register vreg if we need it more than once in this call site.
183   Register SPReg;
184 
185   bool IsTailCall;
186 
187   AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder,
188                            MachineRegisterInfo &MRI, MachineInstrBuilder MIB,
189                            bool IsTailCall = false, int FPDiff = 0)
190       : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB), FPDiff(FPDiff),
191         IsTailCall(IsTailCall) {}
192 
193   Register getStackAddress(uint64_t Size, int64_t Offset,
194                            MachinePointerInfo &MPO,
195                            ISD::ArgFlagsTy Flags) override {
196     MachineFunction &MF = MIRBuilder.getMF();
197     const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32);
198     const LLT S32 = LLT::scalar(32);
199 
200     if (IsTailCall) {
201       Offset += FPDiff;
202       int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
203       auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI);
204       MPO = MachinePointerInfo::getFixedStack(MF, FI);
205       return FIReg.getReg(0);
206     }
207 
208     const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
209 
210     if (!SPReg) {
211       const GCNSubtarget &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>();
212       if (ST.enableFlatScratch()) {
213         // The stack is accessed unswizzled, so we can use a regular copy.
214         SPReg = MIRBuilder.buildCopy(PtrTy,
215                                      MFI->getStackPtrOffsetReg()).getReg(0);
216       } else {
217         // The address we produce here, without knowing the use context, is going
218         // to be interpreted as a vector address, so we need to convert to a
219         // swizzled address.
220         SPReg = MIRBuilder.buildInstr(AMDGPU::G_AMDGPU_WAVE_ADDRESS, {PtrTy},
221                                       {MFI->getStackPtrOffsetReg()}).getReg(0);
222       }
223     }
224 
225     auto OffsetReg = MIRBuilder.buildConstant(S32, Offset);
226 
227     auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg);
228     MPO = MachinePointerInfo::getStack(MF, Offset);
229     return AddrReg.getReg(0);
230   }
231 
232   void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
233                             const MachinePointerInfo &MPO,
234                             const CCValAssign &VA) override {
235     MachineFunction &MF = MIRBuilder.getMF();
236     uint64_t LocMemOffset = VA.getLocMemOffset();
237     const auto &ST = MF.getSubtarget<GCNSubtarget>();
238 
239     auto *MMO = MF.getMachineMemOperand(
240         MPO, MachineMemOperand::MOStore, MemTy,
241         commonAlignment(ST.getStackAlignment(), LocMemOffset));
242     MIRBuilder.buildStore(ValVReg, Addr, *MMO);
243   }
244 
245   void assignValueToAddress(const CallLowering::ArgInfo &Arg,
246                             unsigned ValRegIndex, Register Addr, LLT MemTy,
247                             const MachinePointerInfo &MPO,
248                             const CCValAssign &VA) override {
249     Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt
250                            ? extendRegister(Arg.Regs[ValRegIndex], VA)
251                            : Arg.Regs[ValRegIndex];
252     assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);
253   }
254 };
255 } // anonymous namespace
256 
257 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
258   : CallLowering(&TLI) {
259 }
260 
261 // FIXME: Compatibility shim
262 static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) {
263   switch (MIOpc) {
264   case TargetOpcode::G_SEXT:
265     return ISD::SIGN_EXTEND;
266   case TargetOpcode::G_ZEXT:
267     return ISD::ZERO_EXTEND;
268   case TargetOpcode::G_ANYEXT:
269     return ISD::ANY_EXTEND;
270   default:
271     llvm_unreachable("not an extend opcode");
272   }
273 }
274 
275 bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF,
276                                         CallingConv::ID CallConv,
277                                         SmallVectorImpl<BaseArgInfo> &Outs,
278                                         bool IsVarArg) const {
279   // For shaders. Vector types should be explicitly handled by CC.
280   if (AMDGPU::isEntryFunctionCC(CallConv))
281     return true;
282 
283   SmallVector<CCValAssign, 16> ArgLocs;
284   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
285   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
286                  MF.getFunction().getContext());
287 
288   return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg));
289 }
290 
291 /// Lower the return value for the already existing \p Ret. This assumes that
292 /// \p B's insertion point is correct.
293 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B,
294                                         const Value *Val, ArrayRef<Register> VRegs,
295                                         MachineInstrBuilder &Ret) const {
296   if (!Val)
297     return true;
298 
299   auto &MF = B.getMF();
300   const auto &F = MF.getFunction();
301   const DataLayout &DL = MF.getDataLayout();
302   MachineRegisterInfo *MRI = B.getMRI();
303   LLVMContext &Ctx = F.getContext();
304 
305   CallingConv::ID CC = F.getCallingConv();
306   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
307 
308   SmallVector<EVT, 8> SplitEVTs;
309   ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
310   assert(VRegs.size() == SplitEVTs.size() &&
311          "For each split Type there should be exactly one VReg.");
312 
313   SmallVector<ArgInfo, 8> SplitRetInfos;
314 
315   for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
316     EVT VT = SplitEVTs[i];
317     Register Reg = VRegs[i];
318     ArgInfo RetInfo(Reg, VT.getTypeForEVT(Ctx), 0);
319     setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F);
320 
321     if (VT.isScalarInteger()) {
322       unsigned ExtendOp = TargetOpcode::G_ANYEXT;
323       if (RetInfo.Flags[0].isSExt()) {
324         assert(RetInfo.Regs.size() == 1 && "expect only simple return values");
325         ExtendOp = TargetOpcode::G_SEXT;
326       } else if (RetInfo.Flags[0].isZExt()) {
327         assert(RetInfo.Regs.size() == 1 && "expect only simple return values");
328         ExtendOp = TargetOpcode::G_ZEXT;
329       }
330 
331       EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT,
332                                           extOpcodeToISDExtOpcode(ExtendOp));
333       if (ExtVT != VT) {
334         RetInfo.Ty = ExtVT.getTypeForEVT(Ctx);
335         LLT ExtTy = getLLTForType(*RetInfo.Ty, DL);
336         Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0);
337       }
338     }
339 
340     if (Reg != RetInfo.Regs[0]) {
341       RetInfo.Regs[0] = Reg;
342       // Reset the arg flags after modifying Reg.
343       setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F);
344     }
345 
346     splitToValueTypes(RetInfo, SplitRetInfos, DL, CC);
347   }
348 
349   CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg());
350 
351   OutgoingValueAssigner Assigner(AssignFn);
352   AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret);
353   return determineAndHandleAssignments(RetHandler, Assigner, SplitRetInfos, B,
354                                        CC, F.isVarArg());
355 }
356 
357 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val,
358                                      ArrayRef<Register> VRegs,
359                                      FunctionLoweringInfo &FLI) const {
360 
361   MachineFunction &MF = B.getMF();
362   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
363   MFI->setIfReturnsVoid(!Val);
364 
365   assert(!Val == VRegs.empty() && "Return value without a vreg");
366 
367   CallingConv::ID CC = B.getMF().getFunction().getCallingConv();
368   const bool IsShader = AMDGPU::isShader(CC);
369   const bool IsWaveEnd =
370       (IsShader && MFI->returnsVoid()) || AMDGPU::isKernel(CC);
371   if (IsWaveEnd) {
372     B.buildInstr(AMDGPU::S_ENDPGM)
373       .addImm(0);
374     return true;
375   }
376 
377   unsigned ReturnOpc =
378       IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::SI_RETURN;
379   auto Ret = B.buildInstrNoInsert(ReturnOpc);
380 
381   if (!FLI.CanLowerReturn)
382     insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister);
383   else if (!lowerReturnVal(B, Val, VRegs, Ret))
384     return false;
385 
386   // TODO: Handle CalleeSavedRegsViaCopy.
387 
388   B.insertInstr(Ret);
389   return true;
390 }
391 
392 void AMDGPUCallLowering::lowerParameterPtr(Register DstReg, MachineIRBuilder &B,
393                                            uint64_t Offset) const {
394   MachineFunction &MF = B.getMF();
395   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
396   MachineRegisterInfo &MRI = MF.getRegInfo();
397   Register KernArgSegmentPtr =
398     MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
399   Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
400 
401   auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset);
402 
403   B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg);
404 }
405 
406 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg,
407                                         uint64_t Offset,
408                                         Align Alignment) const {
409   MachineFunction &MF = B.getMF();
410   const Function &F = MF.getFunction();
411   const DataLayout &DL = F.getDataLayout();
412   MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
413 
414   LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
415 
416   SmallVector<ArgInfo, 32> SplitArgs;
417   SmallVector<uint64_t> FieldOffsets;
418   splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv(), &FieldOffsets);
419 
420   unsigned Idx = 0;
421   for (ArgInfo &SplitArg : SplitArgs) {
422     Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy);
423     lowerParameterPtr(PtrReg, B, Offset + FieldOffsets[Idx]);
424 
425     LLT ArgTy = getLLTForType(*SplitArg.Ty, DL);
426     if (SplitArg.Flags[0].isPointer()) {
427       // Compensate for losing pointeriness in splitValueTypes.
428       LLT PtrTy = LLT::pointer(SplitArg.Flags[0].getPointerAddrSpace(),
429                                ArgTy.getScalarSizeInBits());
430       ArgTy = ArgTy.isVector() ? LLT::vector(ArgTy.getElementCount(), PtrTy)
431                                : PtrTy;
432     }
433 
434     MachineMemOperand *MMO = MF.getMachineMemOperand(
435         PtrInfo,
436         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
437             MachineMemOperand::MOInvariant,
438         ArgTy, commonAlignment(Alignment, FieldOffsets[Idx]));
439 
440     assert(SplitArg.Regs.size() == 1);
441 
442     B.buildLoad(SplitArg.Regs[0], PtrReg, *MMO);
443     ++Idx;
444   }
445 }
446 
447 // Allocate special inputs passed in user SGPRs.
448 static void allocateHSAUserSGPRs(CCState &CCInfo,
449                                  MachineIRBuilder &B,
450                                  MachineFunction &MF,
451                                  const SIRegisterInfo &TRI,
452                                  SIMachineFunctionInfo &Info) {
453   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
454   const GCNUserSGPRUsageInfo &UserSGPRInfo = Info.getUserSGPRInfo();
455   if (UserSGPRInfo.hasPrivateSegmentBuffer()) {
456     Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
457     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
458     CCInfo.AllocateReg(PrivateSegmentBufferReg);
459   }
460 
461   if (UserSGPRInfo.hasDispatchPtr()) {
462     Register DispatchPtrReg = Info.addDispatchPtr(TRI);
463     MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
464     CCInfo.AllocateReg(DispatchPtrReg);
465   }
466 
467   if (UserSGPRInfo.hasQueuePtr()) {
468     Register QueuePtrReg = Info.addQueuePtr(TRI);
469     MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
470     CCInfo.AllocateReg(QueuePtrReg);
471   }
472 
473   if (UserSGPRInfo.hasKernargSegmentPtr()) {
474     MachineRegisterInfo &MRI = MF.getRegInfo();
475     Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
476     const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
477     Register VReg = MRI.createGenericVirtualRegister(P4);
478     MRI.addLiveIn(InputPtrReg, VReg);
479     B.getMBB().addLiveIn(InputPtrReg);
480     B.buildCopy(VReg, InputPtrReg);
481     CCInfo.AllocateReg(InputPtrReg);
482   }
483 
484   if (UserSGPRInfo.hasDispatchID()) {
485     Register DispatchIDReg = Info.addDispatchID(TRI);
486     MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
487     CCInfo.AllocateReg(DispatchIDReg);
488   }
489 
490   if (UserSGPRInfo.hasFlatScratchInit()) {
491     Register FlatScratchInitReg = Info.addFlatScratchInit(TRI);
492     MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
493     CCInfo.AllocateReg(FlatScratchInitReg);
494   }
495 
496   // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
497   // these from the dispatch pointer.
498 }
499 
500 bool AMDGPUCallLowering::lowerFormalArgumentsKernel(
501     MachineIRBuilder &B, const Function &F,
502     ArrayRef<ArrayRef<Register>> VRegs) const {
503   MachineFunction &MF = B.getMF();
504   const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>();
505   MachineRegisterInfo &MRI = MF.getRegInfo();
506   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
507   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
508   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
509   const DataLayout &DL = F.getDataLayout();
510 
511   SmallVector<CCValAssign, 16> ArgLocs;
512   CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
513 
514   allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info);
515 
516   unsigned i = 0;
517   const Align KernArgBaseAlign(16);
518   const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset();
519   uint64_t ExplicitArgOffset = 0;
520 
521   // TODO: Align down to dword alignment and extract bits for extending loads.
522   for (auto &Arg : F.args()) {
523     // TODO: Add support for kernarg preload.
524     if (Arg.hasAttribute("amdgpu-hidden-argument")) {
525       LLVM_DEBUG(dbgs() << "Preloading hidden arguments is not supported\n");
526       return false;
527     }
528 
529     const bool IsByRef = Arg.hasByRefAttr();
530     Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
531     unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
532     if (AllocSize == 0)
533       continue;
534 
535     MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt;
536     Align ABIAlign = DL.getValueOrABITypeAlignment(ParamAlign, ArgTy);
537 
538     uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset;
539     ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize;
540 
541     if (Arg.use_empty()) {
542       ++i;
543       continue;
544     }
545 
546     Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset);
547 
548     if (IsByRef) {
549       unsigned ByRefAS = cast<PointerType>(Arg.getType())->getAddressSpace();
550 
551       assert(VRegs[i].size() == 1 &&
552              "expected only one register for byval pointers");
553       if (ByRefAS == AMDGPUAS::CONSTANT_ADDRESS) {
554         lowerParameterPtr(VRegs[i][0], B, ArgOffset);
555       } else {
556         const LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
557         Register PtrReg = MRI.createGenericVirtualRegister(ConstPtrTy);
558         lowerParameterPtr(PtrReg, B, ArgOffset);
559 
560         B.buildAddrSpaceCast(VRegs[i][0], PtrReg);
561       }
562     } else {
563       ArgInfo OrigArg(VRegs[i], Arg, i);
564       const unsigned OrigArgIdx = i + AttributeList::FirstArgIndex;
565       setArgFlags(OrigArg, OrigArgIdx, DL, F);
566       lowerParameter(B, OrigArg, ArgOffset, Alignment);
567     }
568 
569     ++i;
570   }
571 
572   TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
573   TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false);
574   return true;
575 }
576 
577 bool AMDGPUCallLowering::lowerFormalArguments(
578     MachineIRBuilder &B, const Function &F, ArrayRef<ArrayRef<Register>> VRegs,
579     FunctionLoweringInfo &FLI) const {
580   CallingConv::ID CC = F.getCallingConv();
581 
582   // The infrastructure for normal calling convention lowering is essentially
583   // useless for kernels. We want to avoid any kind of legalization or argument
584   // splitting.
585   if (CC == CallingConv::AMDGPU_KERNEL)
586     return lowerFormalArgumentsKernel(B, F, VRegs);
587 
588   const bool IsGraphics = AMDGPU::isGraphics(CC);
589   const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC);
590 
591   MachineFunction &MF = B.getMF();
592   MachineBasicBlock &MBB = B.getMBB();
593   MachineRegisterInfo &MRI = MF.getRegInfo();
594   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
595   const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
596   const SIRegisterInfo *TRI = Subtarget.getRegisterInfo();
597   const DataLayout &DL = F.getDataLayout();
598 
599   SmallVector<CCValAssign, 16> ArgLocs;
600   CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext());
601   const GCNUserSGPRUsageInfo &UserSGPRInfo = Info->getUserSGPRInfo();
602 
603   if (UserSGPRInfo.hasImplicitBufferPtr()) {
604     Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI);
605     MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
606     CCInfo.AllocateReg(ImplicitBufferPtrReg);
607   }
608 
609   // FIXME: This probably isn't defined for mesa
610   if (UserSGPRInfo.hasFlatScratchInit() && !Subtarget.isAmdPalOS()) {
611     Register FlatScratchInitReg = Info->addFlatScratchInit(*TRI);
612     MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
613     CCInfo.AllocateReg(FlatScratchInitReg);
614   }
615 
616   SmallVector<ArgInfo, 32> SplitArgs;
617   unsigned Idx = 0;
618   unsigned PSInputNum = 0;
619 
620   // Insert the hidden sret parameter if the return value won't fit in the
621   // return registers.
622   if (!FLI.CanLowerReturn)
623     insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL);
624 
625   for (auto &Arg : F.args()) {
626     if (DL.getTypeStoreSize(Arg.getType()) == 0)
627       continue;
628 
629     const bool InReg = Arg.hasAttribute(Attribute::InReg);
630 
631     if (Arg.hasAttribute(Attribute::SwiftSelf) ||
632         Arg.hasAttribute(Attribute::SwiftError) ||
633         Arg.hasAttribute(Attribute::Nest))
634       return false;
635 
636     if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) {
637       const bool ArgUsed = !Arg.use_empty();
638       bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum);
639 
640       if (!SkipArg) {
641         Info->markPSInputAllocated(PSInputNum);
642         if (ArgUsed)
643           Info->markPSInputEnabled(PSInputNum);
644       }
645 
646       ++PSInputNum;
647 
648       if (SkipArg) {
649         for (Register R : VRegs[Idx])
650           B.buildUndef(R);
651 
652         ++Idx;
653         continue;
654       }
655     }
656 
657     ArgInfo OrigArg(VRegs[Idx], Arg, Idx);
658     const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex;
659     setArgFlags(OrigArg, OrigArgIdx, DL, F);
660 
661     splitToValueTypes(OrigArg, SplitArgs, DL, CC);
662     ++Idx;
663   }
664 
665   // At least one interpolation mode must be enabled or else the GPU will
666   // hang.
667   //
668   // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
669   // set PSInputAddr, the user wants to enable some bits after the compilation
670   // based on run-time states. Since we can't know what the final PSInputEna
671   // will look like, so we shouldn't do anything here and the user should take
672   // responsibility for the correct programming.
673   //
674   // Otherwise, the following restrictions apply:
675   // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
676   // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
677   //   enabled too.
678   if (CC == CallingConv::AMDGPU_PS) {
679     if ((Info->getPSInputAddr() & 0x7F) == 0 ||
680         ((Info->getPSInputAddr() & 0xF) == 0 &&
681          Info->isPSInputAllocated(11))) {
682       CCInfo.AllocateReg(AMDGPU::VGPR0);
683       CCInfo.AllocateReg(AMDGPU::VGPR1);
684       Info->markPSInputAllocated(0);
685       Info->markPSInputEnabled(0);
686     }
687 
688     if (Subtarget.isAmdPalOS()) {
689       // For isAmdPalOS, the user does not enable some bits after compilation
690       // based on run-time states; the register values being generated here are
691       // the final ones set in hardware. Therefore we need to apply the
692       // workaround to PSInputAddr and PSInputEnable together.  (The case where
693       // a bit is set in PSInputAddr but not PSInputEnable is where the frontend
694       // set up an input arg for a particular interpolation mode, but nothing
695       // uses that input arg. Really we should have an earlier pass that removes
696       // such an arg.)
697       unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
698       if ((PsInputBits & 0x7F) == 0 ||
699           ((PsInputBits & 0xF) == 0 &&
700            (PsInputBits >> 11 & 1)))
701         Info->markPSInputEnabled(llvm::countr_zero(Info->getPSInputAddr()));
702     }
703   }
704 
705   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
706   CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg());
707 
708   if (!MBB.empty())
709     B.setInstr(*MBB.begin());
710 
711   if (!IsEntryFunc && !IsGraphics) {
712     // For the fixed ABI, pass workitem IDs in the last argument register.
713     TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info);
714 
715     if (!Subtarget.enableFlatScratch())
716       CCInfo.AllocateReg(Info->getScratchRSrcReg());
717     TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
718   }
719 
720   IncomingValueAssigner Assigner(AssignFn);
721   if (!determineAssignments(Assigner, SplitArgs, CCInfo))
722     return false;
723 
724   FormalArgHandler Handler(B, MRI);
725   if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B))
726     return false;
727 
728   uint64_t StackSize = Assigner.StackSize;
729 
730   // Start adding system SGPRs.
731   if (IsEntryFunc)
732     TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics);
733 
734   // When we tail call, we need to check if the callee's arguments will fit on
735   // the caller's stack. So, whenever we lower formal arguments, we should keep
736   // track of this information, since we might lower a tail call in this
737   // function later.
738   Info->setBytesInStackArgArea(StackSize);
739 
740   // Move back to the end of the basic block.
741   B.setMBB(MBB);
742 
743   return true;
744 }
745 
746 bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder,
747                                            CCState &CCInfo,
748                                            SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs,
749                                            CallLoweringInfo &Info) const {
750   MachineFunction &MF = MIRBuilder.getMF();
751 
752   // If there's no call site, this doesn't correspond to a call from the IR and
753   // doesn't need implicit inputs.
754   if (!Info.CB)
755     return true;
756 
757   const AMDGPUFunctionArgInfo *CalleeArgInfo
758     = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo;
759 
760   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
761   const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo();
762 
763 
764   // TODO: Unify with private memory register handling. This is complicated by
765   // the fact that at least in kernels, the input argument is not necessarily
766   // in the same location as the input.
767   AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
768     AMDGPUFunctionArgInfo::DISPATCH_PTR,
769     AMDGPUFunctionArgInfo::QUEUE_PTR,
770     AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR,
771     AMDGPUFunctionArgInfo::DISPATCH_ID,
772     AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
773     AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
774     AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
775     AMDGPUFunctionArgInfo::LDS_KERNEL_ID,
776   };
777 
778   static constexpr StringLiteral ImplicitAttrNames[] = {
779     "amdgpu-no-dispatch-ptr",
780     "amdgpu-no-queue-ptr",
781     "amdgpu-no-implicitarg-ptr",
782     "amdgpu-no-dispatch-id",
783     "amdgpu-no-workgroup-id-x",
784     "amdgpu-no-workgroup-id-y",
785     "amdgpu-no-workgroup-id-z",
786     "amdgpu-no-lds-kernel-id",
787   };
788 
789   MachineRegisterInfo &MRI = MF.getRegInfo();
790 
791   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
792   const AMDGPULegalizerInfo *LI
793     = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo());
794 
795   unsigned I = 0;
796   for (auto InputID : InputRegs) {
797     const ArgDescriptor *OutgoingArg;
798     const TargetRegisterClass *ArgRC;
799     LLT ArgTy;
800 
801     // If the callee does not use the attribute value, skip copying the value.
802     if (Info.CB->hasFnAttr(ImplicitAttrNames[I++]))
803       continue;
804 
805     std::tie(OutgoingArg, ArgRC, ArgTy) =
806         CalleeArgInfo->getPreloadedValue(InputID);
807     if (!OutgoingArg)
808       continue;
809 
810     const ArgDescriptor *IncomingArg;
811     const TargetRegisterClass *IncomingArgRC;
812     std::tie(IncomingArg, IncomingArgRC, ArgTy) =
813         CallerArgInfo.getPreloadedValue(InputID);
814     assert(IncomingArgRC == ArgRC);
815 
816     Register InputReg = MRI.createGenericVirtualRegister(ArgTy);
817 
818     if (IncomingArg) {
819       LI->loadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy);
820     } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) {
821       LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder);
822     } else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) {
823       std::optional<uint32_t> Id =
824           AMDGPUMachineFunction::getLDSKernelIdMetadata(MF.getFunction());
825       if (Id) {
826         MIRBuilder.buildConstant(InputReg, *Id);
827       } else {
828         MIRBuilder.buildUndef(InputReg);
829       }
830     } else {
831       // We may have proven the input wasn't needed, although the ABI is
832       // requiring it. We just need to allocate the register appropriately.
833       MIRBuilder.buildUndef(InputReg);
834     }
835 
836     if (OutgoingArg->isRegister()) {
837       ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg);
838       if (!CCInfo.AllocateReg(OutgoingArg->getRegister()))
839         report_fatal_error("failed to allocate implicit input argument");
840     } else {
841       LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n");
842       return false;
843     }
844   }
845 
846   // Pack workitem IDs into a single register or pass it as is if already
847   // packed.
848   const ArgDescriptor *OutgoingArg;
849   const TargetRegisterClass *ArgRC;
850   LLT ArgTy;
851 
852   std::tie(OutgoingArg, ArgRC, ArgTy) =
853       CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
854   if (!OutgoingArg)
855     std::tie(OutgoingArg, ArgRC, ArgTy) =
856         CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
857   if (!OutgoingArg)
858     std::tie(OutgoingArg, ArgRC, ArgTy) =
859         CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
860   if (!OutgoingArg)
861     return false;
862 
863   auto WorkitemIDX =
864       CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
865   auto WorkitemIDY =
866       CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
867   auto WorkitemIDZ =
868       CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
869 
870   const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX);
871   const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY);
872   const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ);
873   const LLT S32 = LLT::scalar(32);
874 
875   const bool NeedWorkItemIDX = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-x");
876   const bool NeedWorkItemIDY = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-y");
877   const bool NeedWorkItemIDZ = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-z");
878 
879   // If incoming ids are not packed we need to pack them.
880   // FIXME: Should consider known workgroup size to eliminate known 0 cases.
881   Register InputReg;
882   if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX &&
883       NeedWorkItemIDX) {
884     if (ST.getMaxWorkitemID(MF.getFunction(), 0) != 0) {
885       InputReg = MRI.createGenericVirtualRegister(S32);
886       LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX,
887                          std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX));
888     } else {
889       InputReg = MIRBuilder.buildConstant(S32, 0).getReg(0);
890     }
891   }
892 
893   if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY &&
894       NeedWorkItemIDY && ST.getMaxWorkitemID(MF.getFunction(), 1) != 0) {
895     Register Y = MRI.createGenericVirtualRegister(S32);
896     LI->loadInputValue(Y, MIRBuilder, IncomingArgY, std::get<1>(WorkitemIDY),
897                        std::get<2>(WorkitemIDY));
898 
899     Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0);
900     InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y;
901   }
902 
903   if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ &&
904       NeedWorkItemIDZ && ST.getMaxWorkitemID(MF.getFunction(), 2) != 0) {
905     Register Z = MRI.createGenericVirtualRegister(S32);
906     LI->loadInputValue(Z, MIRBuilder, IncomingArgZ, std::get<1>(WorkitemIDZ),
907                        std::get<2>(WorkitemIDZ));
908 
909     Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0);
910     InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z;
911   }
912 
913   if (!InputReg &&
914       (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) {
915     InputReg = MRI.createGenericVirtualRegister(S32);
916     if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) {
917       // We're in a situation where the outgoing function requires the workitem
918       // ID, but the calling function does not have it (e.g a graphics function
919       // calling a C calling convention function). This is illegal, but we need
920       // to produce something.
921       MIRBuilder.buildUndef(InputReg);
922     } else {
923       // Workitem ids are already packed, any of present incoming arguments will
924       // carry all required fields.
925       ArgDescriptor IncomingArg = ArgDescriptor::createArg(
926         IncomingArgX ? *IncomingArgX :
927         IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u);
928       LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg,
929                          &AMDGPU::VGPR_32RegClass, S32);
930     }
931   }
932 
933   if (OutgoingArg->isRegister()) {
934     if (InputReg)
935       ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg);
936 
937     if (!CCInfo.AllocateReg(OutgoingArg->getRegister()))
938       report_fatal_error("failed to allocate implicit input argument");
939   } else {
940     LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n");
941     return false;
942   }
943 
944   return true;
945 }
946 
947 /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
948 /// CC.
949 static std::pair<CCAssignFn *, CCAssignFn *>
950 getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) {
951   return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)};
952 }
953 
954 static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
955                               bool IsTailCall, bool isWave32,
956                               CallingConv::ID CC) {
957   // For calls to amdgpu_cs_chain functions, the address is known to be uniform.
958   assert((AMDGPU::isChainCC(CC) || !IsIndirect || !IsTailCall) &&
959          "Indirect calls can't be tail calls, "
960          "because the address can be divergent");
961   if (!IsTailCall)
962     return AMDGPU::G_SI_CALL;
963 
964   if (AMDGPU::isChainCC(CC))
965     return isWave32 ? AMDGPU::SI_CS_CHAIN_TC_W32 : AMDGPU::SI_CS_CHAIN_TC_W64;
966 
967   return CC == CallingConv::AMDGPU_Gfx ? AMDGPU::SI_TCRETURN_GFX :
968                                          AMDGPU::SI_TCRETURN;
969 }
970 
971 // Add operands to call instruction to track the callee.
972 static bool addCallTargetOperands(MachineInstrBuilder &CallInst,
973                                   MachineIRBuilder &MIRBuilder,
974                                   AMDGPUCallLowering::CallLoweringInfo &Info) {
975   if (Info.Callee.isReg()) {
976     CallInst.addReg(Info.Callee.getReg());
977     CallInst.addImm(0);
978   } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) {
979     // The call lowering lightly assumed we can directly encode a call target in
980     // the instruction, which is not the case. Materialize the address here.
981     const GlobalValue *GV = Info.Callee.getGlobal();
982     auto Ptr = MIRBuilder.buildGlobalValue(
983       LLT::pointer(GV->getAddressSpace(), 64), GV);
984     CallInst.addReg(Ptr.getReg(0));
985     CallInst.add(Info.Callee);
986   } else
987     return false;
988 
989   return true;
990 }
991 
992 bool AMDGPUCallLowering::doCallerAndCalleePassArgsTheSameWay(
993     CallLoweringInfo &Info, MachineFunction &MF,
994     SmallVectorImpl<ArgInfo> &InArgs) const {
995   const Function &CallerF = MF.getFunction();
996   CallingConv::ID CalleeCC = Info.CallConv;
997   CallingConv::ID CallerCC = CallerF.getCallingConv();
998 
999   // If the calling conventions match, then everything must be the same.
1000   if (CalleeCC == CallerCC)
1001     return true;
1002 
1003   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1004 
1005   // Make sure that the caller and callee preserve all of the same registers.
1006   const auto *TRI = ST.getRegisterInfo();
1007 
1008   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1009   const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
1010   if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
1011     return false;
1012 
1013   // Check if the caller and callee will handle arguments in the same way.
1014   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
1015   CCAssignFn *CalleeAssignFnFixed;
1016   CCAssignFn *CalleeAssignFnVarArg;
1017   std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
1018       getAssignFnsForCC(CalleeCC, TLI);
1019 
1020   CCAssignFn *CallerAssignFnFixed;
1021   CCAssignFn *CallerAssignFnVarArg;
1022   std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
1023       getAssignFnsForCC(CallerCC, TLI);
1024 
1025   // FIXME: We are not accounting for potential differences in implicitly passed
1026   // inputs, but only the fixed ABI is supported now anyway.
1027   IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
1028                                        CalleeAssignFnVarArg);
1029   IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
1030                                        CallerAssignFnVarArg);
1031   return resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner);
1032 }
1033 
1034 bool AMDGPUCallLowering::areCalleeOutgoingArgsTailCallable(
1035     CallLoweringInfo &Info, MachineFunction &MF,
1036     SmallVectorImpl<ArgInfo> &OutArgs) const {
1037   // If there are no outgoing arguments, then we are done.
1038   if (OutArgs.empty())
1039     return true;
1040 
1041   const Function &CallerF = MF.getFunction();
1042   CallingConv::ID CalleeCC = Info.CallConv;
1043   CallingConv::ID CallerCC = CallerF.getCallingConv();
1044   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
1045 
1046   CCAssignFn *AssignFnFixed;
1047   CCAssignFn *AssignFnVarArg;
1048   std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
1049 
1050   // We have outgoing arguments. Make sure that we can tail call with them.
1051   SmallVector<CCValAssign, 16> OutLocs;
1052   CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext());
1053   OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg);
1054 
1055   if (!determineAssignments(Assigner, OutArgs, OutInfo)) {
1056     LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");
1057     return false;
1058   }
1059 
1060   // Make sure that they can fit on the caller's stack.
1061   const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1062   if (OutInfo.getStackSize() > FuncInfo->getBytesInStackArgArea()) {
1063     LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");
1064     return false;
1065   }
1066 
1067   // Verify that the parameters in callee-saved registers match.
1068   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1069   const SIRegisterInfo *TRI = ST.getRegisterInfo();
1070   const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
1071   MachineRegisterInfo &MRI = MF.getRegInfo();
1072   return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs);
1073 }
1074 
1075 /// Return true if the calling convention is one that we can guarantee TCO for.
1076 static bool canGuaranteeTCO(CallingConv::ID CC) {
1077   return CC == CallingConv::Fast;
1078 }
1079 
1080 /// Return true if we might ever do TCO for calls with this calling convention.
1081 static bool mayTailCallThisCC(CallingConv::ID CC) {
1082   switch (CC) {
1083   case CallingConv::C:
1084   case CallingConv::AMDGPU_Gfx:
1085     return true;
1086   default:
1087     return canGuaranteeTCO(CC);
1088   }
1089 }
1090 
1091 bool AMDGPUCallLowering::isEligibleForTailCallOptimization(
1092     MachineIRBuilder &B, CallLoweringInfo &Info,
1093     SmallVectorImpl<ArgInfo> &InArgs, SmallVectorImpl<ArgInfo> &OutArgs) const {
1094   // Must pass all target-independent checks in order to tail call optimize.
1095   if (!Info.IsTailCall)
1096     return false;
1097 
1098   // Indirect calls can't be tail calls, because the address can be divergent.
1099   // TODO Check divergence info if the call really is divergent.
1100   if (Info.Callee.isReg())
1101     return false;
1102 
1103   MachineFunction &MF = B.getMF();
1104   const Function &CallerF = MF.getFunction();
1105   CallingConv::ID CalleeCC = Info.CallConv;
1106   CallingConv::ID CallerCC = CallerF.getCallingConv();
1107 
1108   const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
1109   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1110   // Kernels aren't callable, and don't have a live in return address so it
1111   // doesn't make sense to do a tail call with entry functions.
1112   if (!CallerPreserved)
1113     return false;
1114 
1115   if (!mayTailCallThisCC(CalleeCC)) {
1116     LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");
1117     return false;
1118   }
1119 
1120   if (any_of(CallerF.args(), [](const Argument &A) {
1121         return A.hasByValAttr() || A.hasSwiftErrorAttr();
1122       })) {
1123     LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval "
1124                          "or swifterror arguments\n");
1125     return false;
1126   }
1127 
1128   // If we have -tailcallopt, then we're done.
1129   if (MF.getTarget().Options.GuaranteedTailCallOpt)
1130     return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv();
1131 
1132   // Verify that the incoming and outgoing arguments from the callee are
1133   // safe to tail call.
1134   if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
1135     LLVM_DEBUG(
1136         dbgs()
1137         << "... Caller and callee have incompatible calling conventions.\n");
1138     return false;
1139   }
1140 
1141   // FIXME: We need to check if any arguments passed in SGPR are uniform. If
1142   // they are not, this cannot be a tail call. If they are uniform, but may be
1143   // VGPR, we need to insert readfirstlanes.
1144   if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
1145     return false;
1146 
1147   LLVM_DEBUG(dbgs() << "... Call is eligible for tail call optimization.\n");
1148   return true;
1149 }
1150 
1151 // Insert outgoing implicit arguments for a call, by inserting copies to the
1152 // implicit argument registers and adding the necessary implicit uses to the
1153 // call instruction.
1154 void AMDGPUCallLowering::handleImplicitCallArguments(
1155     MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst,
1156     const GCNSubtarget &ST, const SIMachineFunctionInfo &FuncInfo,
1157     CallingConv::ID CalleeCC,
1158     ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const {
1159   if (!ST.enableFlatScratch()) {
1160     // Insert copies for the SRD. In the HSA case, this should be an identity
1161     // copy.
1162     auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32),
1163                                                FuncInfo.getScratchRSrcReg());
1164 
1165     auto CalleeRSrcReg = AMDGPU::isChainCC(CalleeCC)
1166                              ? AMDGPU::SGPR48_SGPR49_SGPR50_SGPR51
1167                              : AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
1168 
1169     MIRBuilder.buildCopy(CalleeRSrcReg, ScratchRSrcReg);
1170     CallInst.addReg(CalleeRSrcReg, RegState::Implicit);
1171   }
1172 
1173   for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) {
1174     MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second);
1175     CallInst.addReg(ArgReg.first, RegState::Implicit);
1176   }
1177 }
1178 
1179 bool AMDGPUCallLowering::lowerTailCall(
1180     MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
1181     SmallVectorImpl<ArgInfo> &OutArgs) const {
1182   MachineFunction &MF = MIRBuilder.getMF();
1183   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1184   SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1185   const Function &F = MF.getFunction();
1186   MachineRegisterInfo &MRI = MF.getRegInfo();
1187   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
1188 
1189   // True when we're tail calling, but without -tailcallopt.
1190   bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt;
1191 
1192   // Find out which ABI gets to decide where things go.
1193   CallingConv::ID CalleeCC = Info.CallConv;
1194   CCAssignFn *AssignFnFixed;
1195   CCAssignFn *AssignFnVarArg;
1196   std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
1197 
1198   MachineInstrBuilder CallSeqStart;
1199   if (!IsSibCall)
1200     CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP);
1201 
1202   unsigned Opc =
1203       getCallOpcode(MF, Info.Callee.isReg(), true, ST.isWave32(), CalleeCC);
1204   auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
1205   if (!addCallTargetOperands(MIB, MIRBuilder, Info))
1206     return false;
1207 
1208   // Byte offset for the tail call. When we are sibcalling, this will always
1209   // be 0.
1210   MIB.addImm(0);
1211 
1212   // If this is a chain call, we need to pass in the EXEC mask.
1213   const SIRegisterInfo *TRI = ST.getRegisterInfo();
1214   if (AMDGPU::isChainCC(Info.CallConv)) {
1215     ArgInfo ExecArg = Info.OrigArgs[1];
1216     assert(ExecArg.Regs.size() == 1 && "Too many regs for EXEC");
1217 
1218     if (!ExecArg.Ty->isIntegerTy(ST.getWavefrontSize()))
1219       return false;
1220 
1221     if (const auto *CI = dyn_cast<ConstantInt>(ExecArg.OrigValue)) {
1222       MIB.addImm(CI->getSExtValue());
1223     } else {
1224       MIB.addReg(ExecArg.Regs[0]);
1225       unsigned Idx = MIB->getNumOperands() - 1;
1226       MIB->getOperand(Idx).setReg(constrainOperandRegClass(
1227           MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB,
1228           MIB->getDesc(), MIB->getOperand(Idx), Idx));
1229     }
1230   }
1231 
1232   // Tell the call which registers are clobbered.
1233   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
1234   MIB.addRegMask(Mask);
1235 
1236   // FPDiff is the byte offset of the call's argument area from the callee's.
1237   // Stores to callee stack arguments will be placed in FixedStackSlots offset
1238   // by this amount for a tail call. In a sibling call it must be 0 because the
1239   // caller will deallocate the entire stack and the callee still expects its
1240   // arguments to begin at SP+0.
1241   int FPDiff = 0;
1242 
1243   // This will be 0 for sibcalls, potentially nonzero for tail calls produced
1244   // by -tailcallopt. For sibcalls, the memory operands for the call are
1245   // already available in the caller's incoming argument space.
1246   unsigned NumBytes = 0;
1247   if (!IsSibCall) {
1248     // We aren't sibcalling, so we need to compute FPDiff. We need to do this
1249     // before handling assignments, because FPDiff must be known for memory
1250     // arguments.
1251     unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1252     SmallVector<CCValAssign, 16> OutLocs;
1253     CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
1254 
1255     // FIXME: Not accounting for callee implicit inputs
1256     OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg);
1257     if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo))
1258       return false;
1259 
1260     // The callee will pop the argument stack as a tail call. Thus, we must
1261     // keep it 16-byte aligned.
1262     NumBytes = alignTo(OutInfo.getStackSize(), ST.getStackAlignment());
1263 
1264     // FPDiff will be negative if this tail call requires more space than we
1265     // would automatically have in our incoming argument space. Positive if we
1266     // actually shrink the stack.
1267     FPDiff = NumReusableBytes - NumBytes;
1268 
1269     // The stack pointer must be 16-byte aligned at all times it's used for a
1270     // memory operation, which in practice means at *all* times and in
1271     // particular across call boundaries. Therefore our own arguments started at
1272     // a 16-byte aligned SP and the delta applied for the tail call should
1273     // satisfy the same constraint.
1274     assert(isAligned(ST.getStackAlignment(), FPDiff) &&
1275            "unaligned stack on tail call");
1276   }
1277 
1278   SmallVector<CCValAssign, 16> ArgLocs;
1279   CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext());
1280 
1281   // We could pass MIB and directly add the implicit uses to the call
1282   // now. However, as an aesthetic choice, place implicit argument operands
1283   // after the ordinary user argument registers.
1284   SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs;
1285 
1286   if (Info.CallConv != CallingConv::AMDGPU_Gfx &&
1287       !AMDGPU::isChainCC(Info.CallConv)) {
1288     // With a fixed ABI, allocate fixed registers before user arguments.
1289     if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info))
1290       return false;
1291   }
1292 
1293   OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg);
1294 
1295   if (!determineAssignments(Assigner, OutArgs, CCInfo))
1296     return false;
1297 
1298   // Do the actual argument marshalling.
1299   AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, true, FPDiff);
1300   if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder))
1301     return false;
1302 
1303   if (Info.ConvergenceCtrlToken) {
1304     MIB.addUse(Info.ConvergenceCtrlToken, RegState::Implicit);
1305   }
1306   handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, CalleeCC,
1307                               ImplicitArgRegs);
1308 
1309   // If we have -tailcallopt, we need to adjust the stack. We'll do the call
1310   // sequence start and end here.
1311   if (!IsSibCall) {
1312     MIB->getOperand(1).setImm(FPDiff);
1313     CallSeqStart.addImm(NumBytes).addImm(0);
1314     // End the call sequence *before* emitting the call. Normally, we would
1315     // tidy the frame up after the call. However, here, we've laid out the
1316     // parameters so that when SP is reset, they will be in the correct
1317     // location.
1318     MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN).addImm(NumBytes).addImm(0);
1319   }
1320 
1321   // Now we can add the actual call instruction to the correct basic block.
1322   MIRBuilder.insertInstr(MIB);
1323 
1324   // If Callee is a reg, since it is used by a target specific
1325   // instruction, it must have a register class matching the
1326   // constraint of that instruction.
1327 
1328   // FIXME: We should define regbankselectable call instructions to handle
1329   // divergent call targets.
1330   if (MIB->getOperand(0).isReg()) {
1331     MIB->getOperand(0).setReg(constrainOperandRegClass(
1332         MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB,
1333         MIB->getDesc(), MIB->getOperand(0), 0));
1334   }
1335 
1336   MF.getFrameInfo().setHasTailCall();
1337   Info.LoweredTailCall = true;
1338   return true;
1339 }
1340 
1341 /// Lower a call to the @llvm.amdgcn.cs.chain intrinsic.
1342 bool AMDGPUCallLowering::lowerChainCall(MachineIRBuilder &MIRBuilder,
1343                                         CallLoweringInfo &Info) const {
1344   ArgInfo Callee = Info.OrigArgs[0];
1345   ArgInfo SGPRArgs = Info.OrigArgs[2];
1346   ArgInfo VGPRArgs = Info.OrigArgs[3];
1347   ArgInfo Flags = Info.OrigArgs[4];
1348 
1349   assert(cast<ConstantInt>(Flags.OrigValue)->isZero() &&
1350          "Non-zero flags aren't supported yet.");
1351   assert(Info.OrigArgs.size() == 5 && "Additional args aren't supported yet.");
1352 
1353   MachineFunction &MF = MIRBuilder.getMF();
1354   const Function &F = MF.getFunction();
1355   const DataLayout &DL = F.getDataLayout();
1356 
1357   // The function to jump to is actually the first argument, so we'll change the
1358   // Callee and other info to match that before using our existing helper.
1359   const Value *CalleeV = Callee.OrigValue->stripPointerCasts();
1360   if (const Function *F = dyn_cast<Function>(CalleeV)) {
1361     Info.Callee = MachineOperand::CreateGA(F, 0);
1362     Info.CallConv = F->getCallingConv();
1363   } else {
1364     assert(Callee.Regs.size() == 1 && "Too many regs for the callee");
1365     Info.Callee = MachineOperand::CreateReg(Callee.Regs[0], false);
1366     Info.CallConv = CallingConv::AMDGPU_CS_Chain; // amdgpu_cs_chain_preserve
1367                                                   // behaves the same here.
1368   }
1369 
1370   // The function that we're calling cannot be vararg (only the intrinsic is).
1371   Info.IsVarArg = false;
1372 
1373   assert(
1374       all_of(SGPRArgs.Flags, [](ISD::ArgFlagsTy F) { return F.isInReg(); }) &&
1375       "SGPR arguments should be marked inreg");
1376   assert(
1377       none_of(VGPRArgs.Flags, [](ISD::ArgFlagsTy F) { return F.isInReg(); }) &&
1378       "VGPR arguments should not be marked inreg");
1379 
1380   SmallVector<ArgInfo, 8> OutArgs;
1381   splitToValueTypes(SGPRArgs, OutArgs, DL, Info.CallConv);
1382   splitToValueTypes(VGPRArgs, OutArgs, DL, Info.CallConv);
1383 
1384   Info.IsMustTailCall = true;
1385   return lowerTailCall(MIRBuilder, Info, OutArgs);
1386 }
1387 
1388 bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
1389                                    CallLoweringInfo &Info) const {
1390   if (Function *F = Info.CB->getCalledFunction())
1391     if (F->isIntrinsic()) {
1392       assert(F->getIntrinsicID() == Intrinsic::amdgcn_cs_chain &&
1393              "Unexpected intrinsic");
1394       return lowerChainCall(MIRBuilder, Info);
1395     }
1396 
1397   if (Info.IsVarArg) {
1398     LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n");
1399     return false;
1400   }
1401 
1402   MachineFunction &MF = MIRBuilder.getMF();
1403   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1404   const SIRegisterInfo *TRI = ST.getRegisterInfo();
1405 
1406   const Function &F = MF.getFunction();
1407   MachineRegisterInfo &MRI = MF.getRegInfo();
1408   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
1409   const DataLayout &DL = F.getDataLayout();
1410 
1411   SmallVector<ArgInfo, 8> OutArgs;
1412   for (auto &OrigArg : Info.OrigArgs)
1413     splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv);
1414 
1415   SmallVector<ArgInfo, 8> InArgs;
1416   if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy())
1417     splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv);
1418 
1419   // If we can lower as a tail call, do that instead.
1420   bool CanTailCallOpt =
1421       isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs);
1422 
1423   // We must emit a tail call if we have musttail.
1424   if (Info.IsMustTailCall && !CanTailCallOpt) {
1425     LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");
1426     return false;
1427   }
1428 
1429   Info.IsTailCall = CanTailCallOpt;
1430   if (CanTailCallOpt)
1431     return lowerTailCall(MIRBuilder, Info, OutArgs);
1432 
1433   // Find out which ABI gets to decide where things go.
1434   CCAssignFn *AssignFnFixed;
1435   CCAssignFn *AssignFnVarArg;
1436   std::tie(AssignFnFixed, AssignFnVarArg) =
1437       getAssignFnsForCC(Info.CallConv, TLI);
1438 
1439   MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP)
1440     .addImm(0)
1441     .addImm(0);
1442 
1443   // Create a temporarily-floating call instruction so we can add the implicit
1444   // uses of arg registers.
1445   unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false, ST.isWave32(),
1446                                Info.CallConv);
1447 
1448   auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
1449   MIB.addDef(TRI->getReturnAddressReg(MF));
1450 
1451   if (!Info.IsConvergent)
1452     MIB.setMIFlag(MachineInstr::NoConvergent);
1453 
1454   if (!addCallTargetOperands(MIB, MIRBuilder, Info))
1455     return false;
1456 
1457   // Tell the call which registers are clobbered.
1458   const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv);
1459   MIB.addRegMask(Mask);
1460 
1461   SmallVector<CCValAssign, 16> ArgLocs;
1462   CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext());
1463 
1464   // We could pass MIB and directly add the implicit uses to the call
1465   // now. However, as an aesthetic choice, place implicit argument operands
1466   // after the ordinary user argument registers.
1467   SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs;
1468 
1469   if (Info.CallConv != CallingConv::AMDGPU_Gfx) {
1470     // With a fixed ABI, allocate fixed registers before user arguments.
1471     if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info))
1472       return false;
1473   }
1474 
1475   // Do the actual argument marshalling.
1476   SmallVector<Register, 8> PhysRegs;
1477 
1478   OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg);
1479   if (!determineAssignments(Assigner, OutArgs, CCInfo))
1480     return false;
1481 
1482   AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false);
1483   if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder))
1484     return false;
1485 
1486   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1487 
1488   if (Info.ConvergenceCtrlToken) {
1489     MIB.addUse(Info.ConvergenceCtrlToken, RegState::Implicit);
1490   }
1491   handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, Info.CallConv,
1492                               ImplicitArgRegs);
1493 
1494   // Get a count of how many bytes are to be pushed on the stack.
1495   unsigned NumBytes = CCInfo.getStackSize();
1496 
1497   // If Callee is a reg, since it is used by a target specific
1498   // instruction, it must have a register class matching the
1499   // constraint of that instruction.
1500 
1501   // FIXME: We should define regbankselectable call instructions to handle
1502   // divergent call targets.
1503   if (MIB->getOperand(1).isReg()) {
1504     MIB->getOperand(1).setReg(constrainOperandRegClass(
1505         MF, *TRI, MRI, *ST.getInstrInfo(),
1506         *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1),
1507         1));
1508   }
1509 
1510   // Now we can add the actual call instruction to the correct position.
1511   MIRBuilder.insertInstr(MIB);
1512 
1513   // Finally we can copy the returned value back into its virtual-register. In
1514   // symmetry with the arguments, the physical register must be an
1515   // implicit-define of the call instruction.
1516   if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {
1517     CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv,
1518                                                       Info.IsVarArg);
1519     IncomingValueAssigner Assigner(RetAssignFn);
1520     CallReturnHandler Handler(MIRBuilder, MRI, MIB);
1521     if (!determineAndHandleAssignments(Handler, Assigner, InArgs, MIRBuilder,
1522                                        Info.CallConv, Info.IsVarArg))
1523       return false;
1524   }
1525 
1526   uint64_t CalleePopBytes = NumBytes;
1527 
1528   MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN)
1529             .addImm(0)
1530             .addImm(CalleePopBytes);
1531 
1532   if (!Info.CanLowerReturn) {
1533     insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs,
1534                     Info.DemoteRegister, Info.DemoteStackIndex);
1535   }
1536 
1537   return true;
1538 }
1539