xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp (revision f4504083634ee73cfbb4593613a8f11487207ce6)
1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
15 #include "llvm/CodeGen/Analysis.h"
16 #include "llvm/CodeGen/CallingConvLower.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/GlobalISel/Utils.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineOperand.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/Target/TargetMachine.h"
28 
29 #define DEBUG_TYPE "call-lowering"
30 
31 using namespace llvm;
32 
33 void CallLowering::anchor() {}
34 
35 /// Helper function which updates \p Flags when \p AttrFn returns true.
36 static void
37 addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags,
38                     const std::function<bool(Attribute::AttrKind)> &AttrFn) {
39   // TODO: There are missing flags. Add them here.
40   if (AttrFn(Attribute::SExt))
41     Flags.setSExt();
42   if (AttrFn(Attribute::ZExt))
43     Flags.setZExt();
44   if (AttrFn(Attribute::InReg))
45     Flags.setInReg();
46   if (AttrFn(Attribute::StructRet))
47     Flags.setSRet();
48   if (AttrFn(Attribute::Nest))
49     Flags.setNest();
50   if (AttrFn(Attribute::ByVal))
51     Flags.setByVal();
52   if (AttrFn(Attribute::Preallocated))
53     Flags.setPreallocated();
54   if (AttrFn(Attribute::InAlloca))
55     Flags.setInAlloca();
56   if (AttrFn(Attribute::Returned))
57     Flags.setReturned();
58   if (AttrFn(Attribute::SwiftSelf))
59     Flags.setSwiftSelf();
60   if (AttrFn(Attribute::SwiftAsync))
61     Flags.setSwiftAsync();
62   if (AttrFn(Attribute::SwiftError))
63     Flags.setSwiftError();
64 }
65 
66 ISD::ArgFlagsTy CallLowering::getAttributesForArgIdx(const CallBase &Call,
67                                                      unsigned ArgIdx) const {
68   ISD::ArgFlagsTy Flags;
69   addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
70     return Call.paramHasAttr(ArgIdx, Attr);
71   });
72   return Flags;
73 }
74 
75 ISD::ArgFlagsTy
76 CallLowering::getAttributesForReturn(const CallBase &Call) const {
77   ISD::ArgFlagsTy Flags;
78   addFlagsUsingAttrFn(Flags, [&Call](Attribute::AttrKind Attr) {
79     return Call.hasRetAttr(Attr);
80   });
81   return Flags;
82 }
83 
84 void CallLowering::addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
85                                              const AttributeList &Attrs,
86                                              unsigned OpIdx) const {
87   addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
88     return Attrs.hasAttributeAtIndex(OpIdx, Attr);
89   });
90 }
91 
92 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
93                              ArrayRef<Register> ResRegs,
94                              ArrayRef<ArrayRef<Register>> ArgRegs,
95                              Register SwiftErrorVReg,
96                              std::optional<PtrAuthInfo> PAI,
97                              Register ConvergenceCtrlToken,
98                              std::function<unsigned()> GetCalleeReg) const {
99   CallLoweringInfo Info;
100   const DataLayout &DL = MIRBuilder.getDataLayout();
101   MachineFunction &MF = MIRBuilder.getMF();
102   MachineRegisterInfo &MRI = MF.getRegInfo();
103   bool CanBeTailCalled = CB.isTailCall() &&
104                          isInTailCallPosition(CB, MF.getTarget()) &&
105                          (MF.getFunction()
106                               .getFnAttribute("disable-tail-calls")
107                               .getValueAsString() != "true");
108 
109   CallingConv::ID CallConv = CB.getCallingConv();
110   Type *RetTy = CB.getType();
111   bool IsVarArg = CB.getFunctionType()->isVarArg();
112 
113   SmallVector<BaseArgInfo, 4> SplitArgs;
114   getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
115   Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
116 
117   Info.IsConvergent = CB.isConvergent();
118 
119   if (!Info.CanLowerReturn) {
120     // Callee requires sret demotion.
121     insertSRetOutgoingArgument(MIRBuilder, CB, Info);
122 
123     // The sret demotion isn't compatible with tail-calls, since the sret
124     // argument points into the caller's stack frame.
125     CanBeTailCalled = false;
126   }
127 
128   // First step is to marshall all the function's parameters into the correct
129   // physregs and memory locations. Gather the sequence of argument types that
130   // we'll pass to the assigner function.
131   unsigned i = 0;
132   unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
133   for (const auto &Arg : CB.args()) {
134     ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(CB, i),
135                     i < NumFixedArgs};
136     setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
137 
138     // If we have an explicit sret argument that is an Instruction, (i.e., it
139     // might point to function-local memory), we can't meaningfully tail-call.
140     if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
141       CanBeTailCalled = false;
142 
143     Info.OrigArgs.push_back(OrigArg);
144     ++i;
145   }
146 
147   // Try looking through a bitcast from one function type to another.
148   // Commonly happens with calls to objc_msgSend().
149   const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
150   if (const Function *F = dyn_cast<Function>(CalleeV)) {
151     if (F->hasFnAttribute(Attribute::NonLazyBind)) {
152       LLT Ty = getLLTForType(*F->getType(), DL);
153       Register Reg = MIRBuilder.buildGlobalValue(Ty, F).getReg(0);
154       Info.Callee = MachineOperand::CreateReg(Reg, false);
155     } else {
156       Info.Callee = MachineOperand::CreateGA(F, 0);
157     }
158   } else if (isa<GlobalIFunc>(CalleeV) || isa<GlobalAlias>(CalleeV)) {
159     // IR IFuncs and Aliases can't be forward declared (only defined), so the
160     // callee must be in the same TU and therefore we can direct-call it without
161     // worrying about it being out of range.
162     Info.Callee = MachineOperand::CreateGA(cast<GlobalValue>(CalleeV), 0);
163   } else
164     Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
165 
166   Register ReturnHintAlignReg;
167   Align ReturnHintAlign;
168 
169   Info.OrigRet = ArgInfo{ResRegs, RetTy, 0, getAttributesForReturn(CB)};
170 
171   if (!Info.OrigRet.Ty->isVoidTy()) {
172     setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
173 
174     if (MaybeAlign Alignment = CB.getRetAlign()) {
175       if (*Alignment > Align(1)) {
176         ReturnHintAlignReg = MRI.cloneVirtualRegister(ResRegs[0]);
177         Info.OrigRet.Regs[0] = ReturnHintAlignReg;
178         ReturnHintAlign = *Alignment;
179       }
180     }
181   }
182 
183   auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi);
184   if (Bundle && CB.isIndirectCall()) {
185     Info.CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
186     assert(Info.CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
187   }
188 
189   Info.CB = &CB;
190   Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
191   Info.CallConv = CallConv;
192   Info.SwiftErrorVReg = SwiftErrorVReg;
193   Info.PAI = PAI;
194   Info.ConvergenceCtrlToken = ConvergenceCtrlToken;
195   Info.IsMustTailCall = CB.isMustTailCall();
196   Info.IsTailCall = CanBeTailCalled;
197   Info.IsVarArg = IsVarArg;
198   if (!lowerCall(MIRBuilder, Info))
199     return false;
200 
201   if (ReturnHintAlignReg && !Info.LoweredTailCall) {
202     MIRBuilder.buildAssertAlign(ResRegs[0], ReturnHintAlignReg,
203                                 ReturnHintAlign);
204   }
205 
206   return true;
207 }
208 
209 template <typename FuncInfoTy>
210 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
211                                const DataLayout &DL,
212                                const FuncInfoTy &FuncInfo) const {
213   auto &Flags = Arg.Flags[0];
214   const AttributeList &Attrs = FuncInfo.getAttributes();
215   addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
216 
217   PointerType *PtrTy = dyn_cast<PointerType>(Arg.Ty->getScalarType());
218   if (PtrTy) {
219     Flags.setPointer();
220     Flags.setPointerAddrSpace(PtrTy->getPointerAddressSpace());
221   }
222 
223   Align MemAlign = DL.getABITypeAlign(Arg.Ty);
224   if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
225     assert(OpIdx >= AttributeList::FirstArgIndex);
226     unsigned ParamIdx = OpIdx - AttributeList::FirstArgIndex;
227 
228     Type *ElementTy = FuncInfo.getParamByValType(ParamIdx);
229     if (!ElementTy)
230       ElementTy = FuncInfo.getParamInAllocaType(ParamIdx);
231     if (!ElementTy)
232       ElementTy = FuncInfo.getParamPreallocatedType(ParamIdx);
233     assert(ElementTy && "Must have byval, inalloca or preallocated type");
234     Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
235 
236     // For ByVal, alignment should be passed from FE.  BE will guess if
237     // this info is not there but there are cases it cannot get right.
238     if (auto ParamAlign = FuncInfo.getParamStackAlign(ParamIdx))
239       MemAlign = *ParamAlign;
240     else if ((ParamAlign = FuncInfo.getParamAlign(ParamIdx)))
241       MemAlign = *ParamAlign;
242     else
243       MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
244   } else if (OpIdx >= AttributeList::FirstArgIndex) {
245     if (auto ParamAlign =
246             FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
247       MemAlign = *ParamAlign;
248   }
249   Flags.setMemAlign(MemAlign);
250   Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
251 
252   // Don't try to use the returned attribute if the argument is marked as
253   // swiftself, since it won't be passed in x0.
254   if (Flags.isSwiftSelf())
255     Flags.setReturned(false);
256 }
257 
258 template void
259 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
260                                     const DataLayout &DL,
261                                     const Function &FuncInfo) const;
262 
263 template void
264 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
265                                     const DataLayout &DL,
266                                     const CallBase &FuncInfo) const;
267 
268 void CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
269                                      SmallVectorImpl<ArgInfo> &SplitArgs,
270                                      const DataLayout &DL,
271                                      CallingConv::ID CallConv,
272                                      SmallVectorImpl<uint64_t> *Offsets) const {
273   LLVMContext &Ctx = OrigArg.Ty->getContext();
274 
275   SmallVector<EVT, 4> SplitVTs;
276   ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, Offsets, 0);
277 
278   if (SplitVTs.size() == 0)
279     return;
280 
281   if (SplitVTs.size() == 1) {
282     // No splitting to do, but we want to replace the original type (e.g. [1 x
283     // double] -> double).
284     SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
285                            OrigArg.OrigArgIndex, OrigArg.Flags[0],
286                            OrigArg.IsFixed, OrigArg.OrigValue);
287     return;
288   }
289 
290   // Create one ArgInfo for each virtual register in the original ArgInfo.
291   assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
292 
293   bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
294       OrigArg.Ty, CallConv, false, DL);
295   for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
296     Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
297     SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.OrigArgIndex,
298                            OrigArg.Flags[0], OrigArg.IsFixed);
299     if (NeedsRegBlock)
300       SplitArgs.back().Flags[0].setInConsecutiveRegs();
301   }
302 
303   SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
304 }
305 
306 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
307 static MachineInstrBuilder
308 mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
309                             ArrayRef<Register> SrcRegs) {
310   MachineRegisterInfo &MRI = *B.getMRI();
311   LLT LLTy = MRI.getType(DstRegs[0]);
312   LLT PartLLT = MRI.getType(SrcRegs[0]);
313 
314   // Deal with v3s16 split into v2s16
315   LLT LCMTy = getCoverTy(LLTy, PartLLT);
316   if (LCMTy == LLTy) {
317     // Common case where no padding is needed.
318     assert(DstRegs.size() == 1);
319     return B.buildConcatVectors(DstRegs[0], SrcRegs);
320   }
321 
322   // We need to create an unmerge to the result registers, which may require
323   // widening the original value.
324   Register UnmergeSrcReg;
325   if (LCMTy != PartLLT) {
326     assert(DstRegs.size() == 1);
327     return B.buildDeleteTrailingVectorElements(
328         DstRegs[0], B.buildMergeLikeInstr(LCMTy, SrcRegs));
329   } else {
330     // We don't need to widen anything if we're extracting a scalar which was
331     // promoted to a vector e.g. s8 -> v4s8 -> s8
332     assert(SrcRegs.size() == 1);
333     UnmergeSrcReg = SrcRegs[0];
334   }
335 
336   int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
337 
338   SmallVector<Register, 8> PadDstRegs(NumDst);
339   std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
340 
341   // Create the excess dead defs for the unmerge.
342   for (int I = DstRegs.size(); I != NumDst; ++I)
343     PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
344 
345   if (PadDstRegs.size() == 1)
346     return B.buildDeleteTrailingVectorElements(DstRegs[0], UnmergeSrcReg);
347   return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
348 }
349 
350 /// Create a sequence of instructions to combine pieces split into register
351 /// typed values to the original IR value. \p OrigRegs contains the destination
352 /// value registers of type \p LLTy, and \p Regs contains the legalized pieces
353 /// with type \p PartLLT. This is used for incoming values (physregs to vregs).
354 static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
355                               ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
356                               const ISD::ArgFlagsTy Flags) {
357   MachineRegisterInfo &MRI = *B.getMRI();
358 
359   if (PartLLT == LLTy) {
360     // We should have avoided introducing a new virtual register, and just
361     // directly assigned here.
362     assert(OrigRegs[0] == Regs[0]);
363     return;
364   }
365 
366   if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
367       Regs.size() == 1) {
368     B.buildBitcast(OrigRegs[0], Regs[0]);
369     return;
370   }
371 
372   // A vector PartLLT needs extending to LLTy's element size.
373   // E.g. <2 x s64> = G_SEXT <2 x s32>.
374   if (PartLLT.isVector() == LLTy.isVector() &&
375       PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
376       (!PartLLT.isVector() ||
377        PartLLT.getElementCount() == LLTy.getElementCount()) &&
378       OrigRegs.size() == 1 && Regs.size() == 1) {
379     Register SrcReg = Regs[0];
380 
381     LLT LocTy = MRI.getType(SrcReg);
382 
383     if (Flags.isSExt()) {
384       SrcReg = B.buildAssertSExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
385                    .getReg(0);
386     } else if (Flags.isZExt()) {
387       SrcReg = B.buildAssertZExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
388                    .getReg(0);
389     }
390 
391     // Sometimes pointers are passed zero extended.
392     LLT OrigTy = MRI.getType(OrigRegs[0]);
393     if (OrigTy.isPointer()) {
394       LLT IntPtrTy = LLT::scalar(OrigTy.getSizeInBits());
395       B.buildIntToPtr(OrigRegs[0], B.buildTrunc(IntPtrTy, SrcReg));
396       return;
397     }
398 
399     B.buildTrunc(OrigRegs[0], SrcReg);
400     return;
401   }
402 
403   if (!LLTy.isVector() && !PartLLT.isVector()) {
404     assert(OrigRegs.size() == 1);
405     LLT OrigTy = MRI.getType(OrigRegs[0]);
406 
407     unsigned SrcSize = PartLLT.getSizeInBits().getFixedValue() * Regs.size();
408     if (SrcSize == OrigTy.getSizeInBits())
409       B.buildMergeValues(OrigRegs[0], Regs);
410     else {
411       auto Widened = B.buildMergeLikeInstr(LLT::scalar(SrcSize), Regs);
412       B.buildTrunc(OrigRegs[0], Widened);
413     }
414 
415     return;
416   }
417 
418   if (PartLLT.isVector()) {
419     assert(OrigRegs.size() == 1);
420     SmallVector<Register> CastRegs(Regs.begin(), Regs.end());
421 
422     // If PartLLT is a mismatched vector in both number of elements and element
423     // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
424     // have the same elt type, i.e. v4s32.
425     // TODO: Extend this coersion to element multiples other than just 2.
426     if (TypeSize::isKnownGT(PartLLT.getSizeInBits(), LLTy.getSizeInBits()) &&
427         PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
428         Regs.size() == 1) {
429       LLT NewTy = PartLLT.changeElementType(LLTy.getElementType())
430                       .changeElementCount(PartLLT.getElementCount() * 2);
431       CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0);
432       PartLLT = NewTy;
433     }
434 
435     if (LLTy.getScalarType() == PartLLT.getElementType()) {
436       mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
437     } else {
438       unsigned I = 0;
439       LLT GCDTy = getGCDType(LLTy, PartLLT);
440 
441       // We are both splitting a vector, and bitcasting its element types. Cast
442       // the source pieces into the appropriate number of pieces with the result
443       // element type.
444       for (Register SrcReg : CastRegs)
445         CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
446       mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
447     }
448 
449     return;
450   }
451 
452   assert(LLTy.isVector() && !PartLLT.isVector());
453 
454   LLT DstEltTy = LLTy.getElementType();
455 
456   // Pointer information was discarded. We'll need to coerce some register types
457   // to avoid violating type constraints.
458   LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
459 
460   assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
461 
462   if (DstEltTy == PartLLT) {
463     // Vector was trivially scalarized.
464 
465     if (RealDstEltTy.isPointer()) {
466       for (Register Reg : Regs)
467         MRI.setType(Reg, RealDstEltTy);
468     }
469 
470     B.buildBuildVector(OrigRegs[0], Regs);
471   } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
472     // Deal with vector with 64-bit elements decomposed to 32-bit
473     // registers. Need to create intermediate 64-bit elements.
474     SmallVector<Register, 8> EltMerges;
475     int PartsPerElt =
476         divideCeil(DstEltTy.getSizeInBits(), PartLLT.getSizeInBits());
477     LLT ExtendedPartTy = LLT::scalar(PartLLT.getSizeInBits() * PartsPerElt);
478 
479     for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
480       auto Merge =
481           B.buildMergeLikeInstr(ExtendedPartTy, Regs.take_front(PartsPerElt));
482       if (ExtendedPartTy.getSizeInBits() > RealDstEltTy.getSizeInBits())
483         Merge = B.buildTrunc(RealDstEltTy, Merge);
484       // Fix the type in case this is really a vector of pointers.
485       MRI.setType(Merge.getReg(0), RealDstEltTy);
486       EltMerges.push_back(Merge.getReg(0));
487       Regs = Regs.drop_front(PartsPerElt);
488     }
489 
490     B.buildBuildVector(OrigRegs[0], EltMerges);
491   } else {
492     // Vector was split, and elements promoted to a wider type.
493     // FIXME: Should handle floating point promotions.
494     unsigned NumElts = LLTy.getNumElements();
495     LLT BVType = LLT::fixed_vector(NumElts, PartLLT);
496 
497     Register BuildVec;
498     if (NumElts == Regs.size())
499       BuildVec = B.buildBuildVector(BVType, Regs).getReg(0);
500     else {
501       // Vector elements are packed in the inputs.
502       // e.g. we have a <4 x s16> but 2 x s32 in regs.
503       assert(NumElts > Regs.size());
504       LLT SrcEltTy = MRI.getType(Regs[0]);
505 
506       LLT OriginalEltTy = MRI.getType(OrigRegs[0]).getElementType();
507 
508       // Input registers contain packed elements.
509       // Determine how many elements per reg.
510       assert((SrcEltTy.getSizeInBits() % OriginalEltTy.getSizeInBits()) == 0);
511       unsigned EltPerReg =
512           (SrcEltTy.getSizeInBits() / OriginalEltTy.getSizeInBits());
513 
514       SmallVector<Register, 0> BVRegs;
515       BVRegs.reserve(Regs.size() * EltPerReg);
516       for (Register R : Regs) {
517         auto Unmerge = B.buildUnmerge(OriginalEltTy, R);
518         for (unsigned K = 0; K < EltPerReg; ++K)
519           BVRegs.push_back(B.buildAnyExt(PartLLT, Unmerge.getReg(K)).getReg(0));
520       }
521 
522       // We may have some more elements in BVRegs, e.g. if we have 2 s32 pieces
523       // for a <3 x s16> vector. We should have less than EltPerReg extra items.
524       if (BVRegs.size() > NumElts) {
525         assert((BVRegs.size() - NumElts) < EltPerReg);
526         BVRegs.truncate(NumElts);
527       }
528       BuildVec = B.buildBuildVector(BVType, BVRegs).getReg(0);
529     }
530     B.buildTrunc(OrigRegs[0], BuildVec);
531   }
532 }
533 
534 /// Create a sequence of instructions to expand the value in \p SrcReg (of type
535 /// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
536 /// contain the type of scalar value extension if necessary.
537 ///
538 /// This is used for outgoing values (vregs to physregs)
539 static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
540                             Register SrcReg, LLT SrcTy, LLT PartTy,
541                             unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
542   // We could just insert a regular copy, but this is unreachable at the moment.
543   assert(SrcTy != PartTy && "identical part types shouldn't reach here");
544 
545   const TypeSize PartSize = PartTy.getSizeInBits();
546 
547   if (PartTy.isVector() == SrcTy.isVector() &&
548       PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
549     assert(DstRegs.size() == 1);
550     B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
551     return;
552   }
553 
554   if (SrcTy.isVector() && !PartTy.isVector() &&
555       TypeSize::isKnownGT(PartSize, SrcTy.getElementType().getSizeInBits())) {
556     // Vector was scalarized, and the elements extended.
557     auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
558     for (int i = 0, e = DstRegs.size(); i != e; ++i)
559       B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
560     return;
561   }
562 
563   if (SrcTy.isVector() && PartTy.isVector() &&
564       PartTy.getSizeInBits() == SrcTy.getSizeInBits() &&
565       ElementCount::isKnownLT(SrcTy.getElementCount(),
566                               PartTy.getElementCount())) {
567     // A coercion like: v2f32 -> v4f32 or nxv2f32 -> nxv4f32
568     Register DstReg = DstRegs.front();
569     B.buildPadVectorWithUndefElements(DstReg, SrcReg);
570     return;
571   }
572 
573   LLT GCDTy = getGCDType(SrcTy, PartTy);
574   if (GCDTy == PartTy) {
575     // If this already evenly divisible, we can create a simple unmerge.
576     B.buildUnmerge(DstRegs, SrcReg);
577     return;
578   }
579 
580   if (SrcTy.isVector() && !PartTy.isVector() &&
581       SrcTy.getScalarSizeInBits() > PartTy.getSizeInBits()) {
582     LLT ExtTy =
583         LLT::vector(SrcTy.getElementCount(),
584                     LLT::scalar(PartTy.getScalarSizeInBits() * DstRegs.size() /
585                                 SrcTy.getNumElements()));
586     auto Ext = B.buildAnyExt(ExtTy, SrcReg);
587     B.buildUnmerge(DstRegs, Ext);
588     return;
589   }
590 
591   MachineRegisterInfo &MRI = *B.getMRI();
592   LLT DstTy = MRI.getType(DstRegs[0]);
593   LLT LCMTy = getCoverTy(SrcTy, PartTy);
594 
595   if (PartTy.isVector() && LCMTy == PartTy) {
596     assert(DstRegs.size() == 1);
597     B.buildPadVectorWithUndefElements(DstRegs[0], SrcReg);
598     return;
599   }
600 
601   const unsigned DstSize = DstTy.getSizeInBits();
602   const unsigned SrcSize = SrcTy.getSizeInBits();
603   unsigned CoveringSize = LCMTy.getSizeInBits();
604 
605   Register UnmergeSrc = SrcReg;
606 
607   if (!LCMTy.isVector() && CoveringSize != SrcSize) {
608     // For scalars, it's common to be able to use a simple extension.
609     if (SrcTy.isScalar() && DstTy.isScalar()) {
610       CoveringSize = alignTo(SrcSize, DstSize);
611       LLT CoverTy = LLT::scalar(CoveringSize);
612       UnmergeSrc = B.buildInstr(ExtendOp, {CoverTy}, {SrcReg}).getReg(0);
613     } else {
614       // Widen to the common type.
615       // FIXME: This should respect the extend type
616       Register Undef = B.buildUndef(SrcTy).getReg(0);
617       SmallVector<Register, 8> MergeParts(1, SrcReg);
618       for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)
619         MergeParts.push_back(Undef);
620       UnmergeSrc = B.buildMergeLikeInstr(LCMTy, MergeParts).getReg(0);
621     }
622   }
623 
624   if (LCMTy.isVector() && CoveringSize != SrcSize)
625     UnmergeSrc = B.buildPadVectorWithUndefElements(LCMTy, SrcReg).getReg(0);
626 
627   B.buildUnmerge(DstRegs, UnmergeSrc);
628 }
629 
630 bool CallLowering::determineAndHandleAssignments(
631     ValueHandler &Handler, ValueAssigner &Assigner,
632     SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder,
633     CallingConv::ID CallConv, bool IsVarArg,
634     ArrayRef<Register> ThisReturnRegs) const {
635   MachineFunction &MF = MIRBuilder.getMF();
636   const Function &F = MF.getFunction();
637   SmallVector<CCValAssign, 16> ArgLocs;
638 
639   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
640   if (!determineAssignments(Assigner, Args, CCInfo))
641     return false;
642 
643   return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
644                            ThisReturnRegs);
645 }
646 
647 static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
648   if (Flags.isSExt())
649     return TargetOpcode::G_SEXT;
650   if (Flags.isZExt())
651     return TargetOpcode::G_ZEXT;
652   return TargetOpcode::G_ANYEXT;
653 }
654 
655 bool CallLowering::determineAssignments(ValueAssigner &Assigner,
656                                         SmallVectorImpl<ArgInfo> &Args,
657                                         CCState &CCInfo) const {
658   LLVMContext &Ctx = CCInfo.getContext();
659   const CallingConv::ID CallConv = CCInfo.getCallingConv();
660 
661   unsigned NumArgs = Args.size();
662   for (unsigned i = 0; i != NumArgs; ++i) {
663     EVT CurVT = EVT::getEVT(Args[i].Ty);
664 
665     MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
666 
667     // If we need to split the type over multiple regs, check it's a scenario
668     // we currently support.
669     unsigned NumParts =
670         TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
671 
672     if (NumParts == 1) {
673       // Try to use the register type if we couldn't assign the VT.
674       if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
675                              Args[i].Flags[0], CCInfo))
676         return false;
677       continue;
678     }
679 
680     // For incoming arguments (physregs to vregs), we could have values in
681     // physregs (or memlocs) which we want to extract and copy to vregs.
682     // During this, we might have to deal with the LLT being split across
683     // multiple regs, so we have to record this information for later.
684     //
685     // If we have outgoing args, then we have the opposite case. We have a
686     // vreg with an LLT which we want to assign to a physical location, and
687     // we might have to record that the value has to be split later.
688 
689     // We're handling an incoming arg which is split over multiple regs.
690     // E.g. passing an s128 on AArch64.
691     ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
692     Args[i].Flags.clear();
693 
694     for (unsigned Part = 0; Part < NumParts; ++Part) {
695       ISD::ArgFlagsTy Flags = OrigFlags;
696       if (Part == 0) {
697         Flags.setSplit();
698       } else {
699         Flags.setOrigAlign(Align(1));
700         if (Part == NumParts - 1)
701           Flags.setSplitEnd();
702       }
703 
704       Args[i].Flags.push_back(Flags);
705       if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
706                              Args[i].Flags[Part], CCInfo)) {
707         // Still couldn't assign this smaller part type for some reason.
708         return false;
709       }
710     }
711   }
712 
713   return true;
714 }
715 
716 bool CallLowering::handleAssignments(ValueHandler &Handler,
717                                      SmallVectorImpl<ArgInfo> &Args,
718                                      CCState &CCInfo,
719                                      SmallVectorImpl<CCValAssign> &ArgLocs,
720                                      MachineIRBuilder &MIRBuilder,
721                                      ArrayRef<Register> ThisReturnRegs) const {
722   MachineFunction &MF = MIRBuilder.getMF();
723   MachineRegisterInfo &MRI = MF.getRegInfo();
724   const Function &F = MF.getFunction();
725   const DataLayout &DL = F.getParent()->getDataLayout();
726 
727   const unsigned NumArgs = Args.size();
728 
729   // Stores thunks for outgoing register assignments. This is used so we delay
730   // generating register copies until mem loc assignments are done. We do this
731   // so that if the target is using the delayed stack protector feature, we can
732   // find the split point of the block accurately. E.g. if we have:
733   // G_STORE %val, %memloc
734   // $x0 = COPY %foo
735   // $x1 = COPY %bar
736   // CALL func
737   // ... then the split point for the block will correctly be at, and including,
738   // the copy to $x0. If instead the G_STORE instruction immediately precedes
739   // the CALL, then we'd prematurely choose the CALL as the split point, thus
740   // generating a split block with a CALL that uses undefined physregs.
741   SmallVector<std::function<void()>> DelayedOutgoingRegAssignments;
742 
743   for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
744     assert(j < ArgLocs.size() && "Skipped too many arg locs");
745     CCValAssign &VA = ArgLocs[j];
746     assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
747 
748     if (VA.needsCustom()) {
749       std::function<void()> Thunk;
750       unsigned NumArgRegs = Handler.assignCustomValue(
751           Args[i], ArrayRef(ArgLocs).slice(j), &Thunk);
752       if (Thunk)
753         DelayedOutgoingRegAssignments.emplace_back(Thunk);
754       if (!NumArgRegs)
755         return false;
756       j += (NumArgRegs - 1);
757       continue;
758     }
759 
760     auto AllocaAddressSpace = MF.getDataLayout().getAllocaAddrSpace();
761 
762     const MVT ValVT = VA.getValVT();
763     const MVT LocVT = VA.getLocVT();
764 
765     const LLT LocTy(LocVT);
766     const LLT ValTy(ValVT);
767     const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
768     const EVT OrigVT = EVT::getEVT(Args[i].Ty);
769     const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
770     const LLT PointerTy = LLT::pointer(
771         AllocaAddressSpace, DL.getPointerSizeInBits(AllocaAddressSpace));
772 
773     // Expected to be multiple regs for a single incoming arg.
774     // There should be Regs.size() ArgLocs per argument.
775     // This should be the same as getNumRegistersForCallingConv
776     const unsigned NumParts = Args[i].Flags.size();
777 
778     // Now split the registers into the assigned types.
779     Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());
780 
781     if (NumParts != 1 || NewLLT != OrigTy) {
782       // If we can't directly assign the register, we need one or more
783       // intermediate values.
784       Args[i].Regs.resize(NumParts);
785 
786       // When we have indirect parameter passing we are receiving a pointer,
787       // that points to the actual value, so we need one "temporary" pointer.
788       if (VA.getLocInfo() == CCValAssign::Indirect) {
789         if (Handler.isIncomingArgumentHandler())
790           Args[i].Regs[0] = MRI.createGenericVirtualRegister(PointerTy);
791       } else {
792         // For each split register, create and assign a vreg that will store
793         // the incoming component of the larger value. These will later be
794         // merged to form the final vreg.
795         for (unsigned Part = 0; Part < NumParts; ++Part)
796           Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
797       }
798     }
799 
800     assert((j + (NumParts - 1)) < ArgLocs.size() &&
801            "Too many regs for number of args");
802 
803     // Coerce into outgoing value types before register assignment.
804     if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy &&
805         VA.getLocInfo() != CCValAssign::Indirect) {
806       assert(Args[i].OrigRegs.size() == 1);
807       buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
808                       ValTy, extendOpFromFlags(Args[i].Flags[0]));
809     }
810 
811     bool IndirectParameterPassingHandled = false;
812     bool BigEndianPartOrdering = TLI->hasBigEndianPartOrdering(OrigVT, DL);
813     for (unsigned Part = 0; Part < NumParts; ++Part) {
814       assert((VA.getLocInfo() != CCValAssign::Indirect || Part == 0) &&
815              "Only the first parameter should be processed when "
816              "handling indirect passing!");
817       Register ArgReg = Args[i].Regs[Part];
818       // There should be Regs.size() ArgLocs per argument.
819       unsigned Idx = BigEndianPartOrdering ? NumParts - 1 - Part : Part;
820       CCValAssign &VA = ArgLocs[j + Idx];
821       const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
822 
823       // We found an indirect parameter passing, and we have an
824       // OutgoingValueHandler as our handler (so we are at the call site or the
825       // return value). In this case, start the construction of the following
826       // GMIR, that is responsible for the preparation of indirect parameter
827       // passing:
828       //
829       // %1(indirectly passed type) = The value to pass
830       // %3(pointer) = G_FRAME_INDEX %stack.0
831       // G_STORE %1, %3 :: (store (s128), align 8)
832       //
833       // After this GMIR, the remaining part of the loop body will decide how
834       // to get the value to the caller and we break out of the loop.
835       if (VA.getLocInfo() == CCValAssign::Indirect &&
836           !Handler.isIncomingArgumentHandler()) {
837         Align AlignmentForStored = DL.getPrefTypeAlign(Args[i].Ty);
838         MachineFrameInfo &MFI = MF.getFrameInfo();
839         // Get some space on the stack for the value, so later we can pass it
840         // as a reference.
841         int FrameIdx = MFI.CreateStackObject(OrigTy.getScalarSizeInBits(),
842                                              AlignmentForStored, false);
843         Register PointerToStackReg =
844             MIRBuilder.buildFrameIndex(PointerTy, FrameIdx).getReg(0);
845         MachinePointerInfo StackPointerMPO =
846             MachinePointerInfo::getFixedStack(MF, FrameIdx);
847         // Store the value in the previously created stack space.
848         MIRBuilder.buildStore(Args[i].OrigRegs[Part], PointerToStackReg,
849                               StackPointerMPO,
850                               inferAlignFromPtrInfo(MF, StackPointerMPO));
851 
852         ArgReg = PointerToStackReg;
853         IndirectParameterPassingHandled = true;
854       }
855 
856       if (VA.isMemLoc() && !Flags.isByVal()) {
857         // Individual pieces may have been spilled to the stack and others
858         // passed in registers.
859 
860         // TODO: The memory size may be larger than the value we need to
861         // store. We may need to adjust the offset for big endian targets.
862         LLT MemTy = Handler.getStackValueStoreType(DL, VA, Flags);
863 
864         MachinePointerInfo MPO;
865         Register StackAddr =
866             Handler.getStackAddress(VA.getLocInfo() == CCValAssign::Indirect
867                                         ? PointerTy.getSizeInBytes()
868                                         : MemTy.getSizeInBytes(),
869                                     VA.getLocMemOffset(), MPO, Flags);
870 
871         // Finish the handling of indirect passing from the passers
872         // (OutgoingParameterHandler) side.
873         // This branch is needed, so the pointer to the value is loaded onto the
874         // stack.
875         if (VA.getLocInfo() == CCValAssign::Indirect)
876           Handler.assignValueToAddress(ArgReg, StackAddr, PointerTy, MPO, VA);
877         else
878           Handler.assignValueToAddress(Args[i], Part, StackAddr, MemTy, MPO, VA);
879       } else if (VA.isMemLoc() && Flags.isByVal()) {
880         assert(Args[i].Regs.size() == 1 &&
881                "didn't expect split byval pointer");
882 
883         if (Handler.isIncomingArgumentHandler()) {
884           // We just need to copy the frame index value to the pointer.
885           MachinePointerInfo MPO;
886           Register StackAddr = Handler.getStackAddress(
887               Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
888           MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
889         } else {
890           // For outgoing byval arguments, insert the implicit copy byval
891           // implies, such that writes in the callee do not modify the caller's
892           // value.
893           uint64_t MemSize = Flags.getByValSize();
894           int64_t Offset = VA.getLocMemOffset();
895 
896           MachinePointerInfo DstMPO;
897           Register StackAddr =
898               Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
899 
900           MachinePointerInfo SrcMPO(Args[i].OrigValue);
901           if (!Args[i].OrigValue) {
902             // We still need to accurately track the stack address space if we
903             // don't know the underlying value.
904             const LLT PtrTy = MRI.getType(StackAddr);
905             SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
906           }
907 
908           Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
909                                     inferAlignFromPtrInfo(MF, DstMPO));
910 
911           Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
912                                     inferAlignFromPtrInfo(MF, SrcMPO));
913 
914           Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
915                                      DstMPO, DstAlign, SrcMPO, SrcAlign,
916                                      MemSize, VA);
917         }
918       } else if (i == 0 && !ThisReturnRegs.empty() &&
919                  Handler.isIncomingArgumentHandler() &&
920                  isTypeIsValidForThisReturn(ValVT)) {
921         Handler.assignValueToReg(ArgReg, ThisReturnRegs[Part], VA);
922       } else if (Handler.isIncomingArgumentHandler()) {
923         Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
924       } else {
925         DelayedOutgoingRegAssignments.emplace_back([=, &Handler]() {
926           Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
927         });
928       }
929 
930       // Finish the handling of indirect parameter passing when receiving
931       // the value (we are in the called function or the caller when receiving
932       // the return value).
933       if (VA.getLocInfo() == CCValAssign::Indirect &&
934           Handler.isIncomingArgumentHandler()) {
935         Align Alignment = DL.getABITypeAlign(Args[i].Ty);
936         MachinePointerInfo MPO = MachinePointerInfo::getUnknownStack(MF);
937 
938         // Since we are doing indirect parameter passing, we know that the value
939         // in the temporary register is not the value passed to the function,
940         // but rather a pointer to that value. Let's load that value into the
941         // virtual register where the parameter should go.
942         MIRBuilder.buildLoad(Args[i].OrigRegs[0], Args[i].Regs[0], MPO,
943                              Alignment);
944 
945         IndirectParameterPassingHandled = true;
946       }
947 
948       if (IndirectParameterPassingHandled)
949         break;
950     }
951 
952     // Now that all pieces have been assigned, re-pack the register typed values
953     // into the original value typed registers. This is only necessary, when
954     // the value was passed in multiple registers, not indirectly.
955     if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT &&
956         !IndirectParameterPassingHandled) {
957       // Merge the split registers into the expected larger result vregs of
958       // the original call.
959       buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
960                         LocTy, Args[i].Flags[0]);
961     }
962 
963     j += NumParts - 1;
964   }
965   for (auto &Fn : DelayedOutgoingRegAssignments)
966     Fn();
967 
968   return true;
969 }
970 
971 void CallLowering::insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
972                                    ArrayRef<Register> VRegs, Register DemoteReg,
973                                    int FI) const {
974   MachineFunction &MF = MIRBuilder.getMF();
975   MachineRegisterInfo &MRI = MF.getRegInfo();
976   const DataLayout &DL = MF.getDataLayout();
977 
978   SmallVector<EVT, 4> SplitVTs;
979   SmallVector<uint64_t, 4> Offsets;
980   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
981 
982   assert(VRegs.size() == SplitVTs.size());
983 
984   unsigned NumValues = SplitVTs.size();
985   Align BaseAlign = DL.getPrefTypeAlign(RetTy);
986   Type *RetPtrTy =
987       PointerType::get(RetTy->getContext(), DL.getAllocaAddrSpace());
988   LLT OffsetLLTy = getLLTForType(*DL.getIndexType(RetPtrTy), DL);
989 
990   MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
991 
992   for (unsigned I = 0; I < NumValues; ++I) {
993     Register Addr;
994     MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
995     auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
996                                         MRI.getType(VRegs[I]),
997                                         commonAlignment(BaseAlign, Offsets[I]));
998     MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
999   }
1000 }
1001 
1002 void CallLowering::insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
1003                                     ArrayRef<Register> VRegs,
1004                                     Register DemoteReg) const {
1005   MachineFunction &MF = MIRBuilder.getMF();
1006   MachineRegisterInfo &MRI = MF.getRegInfo();
1007   const DataLayout &DL = MF.getDataLayout();
1008 
1009   SmallVector<EVT, 4> SplitVTs;
1010   SmallVector<uint64_t, 4> Offsets;
1011   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
1012 
1013   assert(VRegs.size() == SplitVTs.size());
1014 
1015   unsigned NumValues = SplitVTs.size();
1016   Align BaseAlign = DL.getPrefTypeAlign(RetTy);
1017   unsigned AS = DL.getAllocaAddrSpace();
1018   LLT OffsetLLTy = getLLTForType(*DL.getIndexType(RetTy->getPointerTo(AS)), DL);
1019 
1020   MachinePointerInfo PtrInfo(AS);
1021 
1022   for (unsigned I = 0; I < NumValues; ++I) {
1023     Register Addr;
1024     MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
1025     auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
1026                                         MRI.getType(VRegs[I]),
1027                                         commonAlignment(BaseAlign, Offsets[I]));
1028     MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
1029   }
1030 }
1031 
1032 void CallLowering::insertSRetIncomingArgument(
1033     const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
1034     MachineRegisterInfo &MRI, const DataLayout &DL) const {
1035   unsigned AS = DL.getAllocaAddrSpace();
1036   DemoteReg = MRI.createGenericVirtualRegister(
1037       LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
1038 
1039   Type *PtrTy = PointerType::get(F.getReturnType(), AS);
1040 
1041   SmallVector<EVT, 1> ValueVTs;
1042   ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
1043 
1044   // NOTE: Assume that a pointer won't get split into more than one VT.
1045   assert(ValueVTs.size() == 1);
1046 
1047   ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()),
1048                     ArgInfo::NoArgIndex);
1049   setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, F);
1050   DemoteArg.Flags[0].setSRet();
1051   SplitArgs.insert(SplitArgs.begin(), DemoteArg);
1052 }
1053 
1054 void CallLowering::insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
1055                                               const CallBase &CB,
1056                                               CallLoweringInfo &Info) const {
1057   const DataLayout &DL = MIRBuilder.getDataLayout();
1058   Type *RetTy = CB.getType();
1059   unsigned AS = DL.getAllocaAddrSpace();
1060   LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
1061 
1062   int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
1063       DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
1064 
1065   Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
1066   ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS),
1067                     ArgInfo::NoArgIndex);
1068   setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);
1069   DemoteArg.Flags[0].setSRet();
1070 
1071   Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
1072   Info.DemoteStackIndex = FI;
1073   Info.DemoteRegister = DemoteReg;
1074 }
1075 
1076 bool CallLowering::checkReturn(CCState &CCInfo,
1077                                SmallVectorImpl<BaseArgInfo> &Outs,
1078                                CCAssignFn *Fn) const {
1079   for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
1080     MVT VT = MVT::getVT(Outs[I].Ty);
1081     if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
1082       return false;
1083   }
1084   return true;
1085 }
1086 
1087 void CallLowering::getReturnInfo(CallingConv::ID CallConv, Type *RetTy,
1088                                  AttributeList Attrs,
1089                                  SmallVectorImpl<BaseArgInfo> &Outs,
1090                                  const DataLayout &DL) const {
1091   LLVMContext &Context = RetTy->getContext();
1092   ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1093 
1094   SmallVector<EVT, 4> SplitVTs;
1095   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
1096   addArgFlagsFromAttributes(Flags, Attrs, AttributeList::ReturnIndex);
1097 
1098   for (EVT VT : SplitVTs) {
1099     unsigned NumParts =
1100         TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
1101     MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
1102     Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
1103 
1104     for (unsigned I = 0; I < NumParts; ++I) {
1105       Outs.emplace_back(PartTy, Flags);
1106     }
1107   }
1108 }
1109 
1110 bool CallLowering::checkReturnTypeForCallConv(MachineFunction &MF) const {
1111   const auto &F = MF.getFunction();
1112   Type *ReturnType = F.getReturnType();
1113   CallingConv::ID CallConv = F.getCallingConv();
1114 
1115   SmallVector<BaseArgInfo, 4> SplitArgs;
1116   getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
1117                 MF.getDataLayout());
1118   return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
1119 }
1120 
1121 bool CallLowering::parametersInCSRMatch(
1122     const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
1123     const SmallVectorImpl<CCValAssign> &OutLocs,
1124     const SmallVectorImpl<ArgInfo> &OutArgs) const {
1125   for (unsigned i = 0; i < OutLocs.size(); ++i) {
1126     const auto &ArgLoc = OutLocs[i];
1127     // If it's not a register, it's fine.
1128     if (!ArgLoc.isRegLoc())
1129       continue;
1130 
1131     MCRegister PhysReg = ArgLoc.getLocReg();
1132 
1133     // Only look at callee-saved registers.
1134     if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
1135       continue;
1136 
1137     LLVM_DEBUG(
1138         dbgs()
1139         << "... Call has an argument passed in a callee-saved register.\n");
1140 
1141     // Check if it was copied from.
1142     const ArgInfo &OutInfo = OutArgs[i];
1143 
1144     if (OutInfo.Regs.size() > 1) {
1145       LLVM_DEBUG(
1146           dbgs() << "... Cannot handle arguments in multiple registers.\n");
1147       return false;
1148     }
1149 
1150     // Check if we copy the register, walking through copies from virtual
1151     // registers. Note that getDefIgnoringCopies does not ignore copies from
1152     // physical registers.
1153     MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
1154     if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
1155       LLVM_DEBUG(
1156           dbgs()
1157           << "... Parameter was not copied into a VReg, cannot tail call.\n");
1158       return false;
1159     }
1160 
1161     // Got a copy. Verify that it's the same as the register we want.
1162     Register CopyRHS = RegDef->getOperand(1).getReg();
1163     if (CopyRHS != PhysReg) {
1164       LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
1165                            "VReg, cannot tail call.\n");
1166       return false;
1167     }
1168   }
1169 
1170   return true;
1171 }
1172 
1173 bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
1174                                      MachineFunction &MF,
1175                                      SmallVectorImpl<ArgInfo> &InArgs,
1176                                      ValueAssigner &CalleeAssigner,
1177                                      ValueAssigner &CallerAssigner) const {
1178   const Function &F = MF.getFunction();
1179   CallingConv::ID CalleeCC = Info.CallConv;
1180   CallingConv::ID CallerCC = F.getCallingConv();
1181 
1182   if (CallerCC == CalleeCC)
1183     return true;
1184 
1185   SmallVector<CCValAssign, 16> ArgLocs1;
1186   CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
1187   if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
1188     return false;
1189 
1190   SmallVector<CCValAssign, 16> ArgLocs2;
1191   CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
1192   if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
1193     return false;
1194 
1195   // We need the argument locations to match up exactly. If there's more in
1196   // one than the other, then we are done.
1197   if (ArgLocs1.size() != ArgLocs2.size())
1198     return false;
1199 
1200   // Make sure that each location is passed in exactly the same way.
1201   for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
1202     const CCValAssign &Loc1 = ArgLocs1[i];
1203     const CCValAssign &Loc2 = ArgLocs2[i];
1204 
1205     // We need both of them to be the same. So if one is a register and one
1206     // isn't, we're done.
1207     if (Loc1.isRegLoc() != Loc2.isRegLoc())
1208       return false;
1209 
1210     if (Loc1.isRegLoc()) {
1211       // If they don't have the same register location, we're done.
1212       if (Loc1.getLocReg() != Loc2.getLocReg())
1213         return false;
1214 
1215       // They matched, so we can move to the next ArgLoc.
1216       continue;
1217     }
1218 
1219     // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
1220     if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
1221       return false;
1222   }
1223 
1224   return true;
1225 }
1226 
1227 LLT CallLowering::ValueHandler::getStackValueStoreType(
1228     const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const {
1229   const MVT ValVT = VA.getValVT();
1230   if (ValVT != MVT::iPTR) {
1231     LLT ValTy(ValVT);
1232 
1233     // We lost the pointeriness going through CCValAssign, so try to restore it
1234     // based on the flags.
1235     if (Flags.isPointer()) {
1236       LLT PtrTy = LLT::pointer(Flags.getPointerAddrSpace(),
1237                                ValTy.getScalarSizeInBits());
1238       if (ValVT.isVector())
1239         return LLT::vector(ValTy.getElementCount(), PtrTy);
1240       return PtrTy;
1241     }
1242 
1243     return ValTy;
1244   }
1245 
1246   unsigned AddrSpace = Flags.getPointerAddrSpace();
1247   return LLT::pointer(AddrSpace, DL.getPointerSize(AddrSpace));
1248 }
1249 
1250 void CallLowering::ValueHandler::copyArgumentMemory(
1251     const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1252     const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1253     const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1254     CCValAssign &VA) const {
1255   MachineFunction &MF = MIRBuilder.getMF();
1256   MachineMemOperand *SrcMMO = MF.getMachineMemOperand(
1257       SrcPtrInfo,
1258       MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable, MemSize,
1259       SrcAlign);
1260 
1261   MachineMemOperand *DstMMO = MF.getMachineMemOperand(
1262       DstPtrInfo,
1263       MachineMemOperand::MOStore | MachineMemOperand::MODereferenceable,
1264       MemSize, DstAlign);
1265 
1266   const LLT PtrTy = MRI.getType(DstPtr);
1267   const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
1268 
1269   auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
1270   MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
1271 }
1272 
1273 Register CallLowering::ValueHandler::extendRegister(Register ValReg,
1274                                                     const CCValAssign &VA,
1275                                                     unsigned MaxSizeBits) {
1276   LLT LocTy{VA.getLocVT()};
1277   LLT ValTy{VA.getValVT()};
1278 
1279   if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1280     return ValReg;
1281 
1282   if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1283     if (MaxSizeBits <= ValTy.getSizeInBits())
1284       return ValReg;
1285     LocTy = LLT::scalar(MaxSizeBits);
1286   }
1287 
1288   const LLT ValRegTy = MRI.getType(ValReg);
1289   if (ValRegTy.isPointer()) {
1290     // The x32 ABI wants to zero extend 32-bit pointers to 64-bit registers, so
1291     // we have to cast to do the extension.
1292     LLT IntPtrTy = LLT::scalar(ValRegTy.getSizeInBits());
1293     ValReg = MIRBuilder.buildPtrToInt(IntPtrTy, ValReg).getReg(0);
1294   }
1295 
1296   switch (VA.getLocInfo()) {
1297   default: break;
1298   case CCValAssign::Full:
1299   case CCValAssign::BCvt:
1300     // FIXME: bitconverting between vector types may or may not be a
1301     // nop in big-endian situations.
1302     return ValReg;
1303   case CCValAssign::AExt: {
1304     auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1305     return MIB.getReg(0);
1306   }
1307   case CCValAssign::SExt: {
1308     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1309     MIRBuilder.buildSExt(NewReg, ValReg);
1310     return NewReg;
1311   }
1312   case CCValAssign::ZExt: {
1313     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1314     MIRBuilder.buildZExt(NewReg, ValReg);
1315     return NewReg;
1316   }
1317   }
1318   llvm_unreachable("unable to extend register");
1319 }
1320 
1321 void CallLowering::ValueAssigner::anchor() {}
1322 
1323 Register CallLowering::IncomingValueHandler::buildExtensionHint(
1324     const CCValAssign &VA, Register SrcReg, LLT NarrowTy) {
1325   switch (VA.getLocInfo()) {
1326   case CCValAssign::LocInfo::ZExt: {
1327     return MIRBuilder
1328         .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1329                          NarrowTy.getScalarSizeInBits())
1330         .getReg(0);
1331   }
1332   case CCValAssign::LocInfo::SExt: {
1333     return MIRBuilder
1334         .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1335                          NarrowTy.getScalarSizeInBits())
1336         .getReg(0);
1337     break;
1338   }
1339   default:
1340     return SrcReg;
1341   }
1342 }
1343 
1344 /// Check if we can use a basic COPY instruction between the two types.
1345 ///
1346 /// We're currently building on top of the infrastructure using MVT, which loses
1347 /// pointer information in the CCValAssign. We accept copies from physical
1348 /// registers that have been reported as integers if it's to an equivalent sized
1349 /// pointer LLT.
1350 static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1351   if (SrcTy == DstTy)
1352     return true;
1353 
1354   if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1355     return false;
1356 
1357   SrcTy = SrcTy.getScalarType();
1358   DstTy = DstTy.getScalarType();
1359 
1360   return (SrcTy.isPointer() && DstTy.isScalar()) ||
1361          (DstTy.isPointer() && SrcTy.isScalar());
1362 }
1363 
1364 void CallLowering::IncomingValueHandler::assignValueToReg(
1365     Register ValVReg, Register PhysReg, const CCValAssign &VA) {
1366   const MVT LocVT = VA.getLocVT();
1367   const LLT LocTy(LocVT);
1368   const LLT RegTy = MRI.getType(ValVReg);
1369 
1370   if (isCopyCompatibleType(RegTy, LocTy)) {
1371     MIRBuilder.buildCopy(ValVReg, PhysReg);
1372     return;
1373   }
1374 
1375   auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1376   auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy);
1377   MIRBuilder.buildTrunc(ValVReg, Hint);
1378 }
1379