xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp (revision 810567dc691a57c8c13fef06368d7549f7d9c064)
1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
16 #include "llvm/CodeGen/GlobalISel/Utils.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 
26 #define DEBUG_TYPE "call-lowering"
27 
28 using namespace llvm;
29 
30 void CallLowering::anchor() {}
31 
32 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
33                              ArrayRef<Register> ResRegs,
34                              ArrayRef<ArrayRef<Register>> ArgRegs,
35                              Register SwiftErrorVReg,
36                              std::function<unsigned()> GetCalleeReg) const {
37   CallLoweringInfo Info;
38   const DataLayout &DL = MIRBuilder.getDataLayout();
39 
40   // First step is to marshall all the function's parameters into the correct
41   // physregs and memory locations. Gather the sequence of argument types that
42   // we'll pass to the assigner function.
43   unsigned i = 0;
44   unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
45   for (auto &Arg : CB.args()) {
46     ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{},
47                     i < NumFixedArgs};
48     setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
49     Info.OrigArgs.push_back(OrigArg);
50     ++i;
51   }
52 
53   // Try looking through a bitcast from one function type to another.
54   // Commonly happens with calls to objc_msgSend().
55   const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
56   if (const Function *F = dyn_cast<Function>(CalleeV))
57     Info.Callee = MachineOperand::CreateGA(F, 0);
58   else
59     Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
60 
61   Info.OrigRet = ArgInfo{ResRegs, CB.getType(), ISD::ArgFlagsTy{}};
62   if (!Info.OrigRet.Ty->isVoidTy())
63     setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
64 
65   MachineFunction &MF = MIRBuilder.getMF();
66   Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
67   Info.CallConv = CB.getCallingConv();
68   Info.SwiftErrorVReg = SwiftErrorVReg;
69   Info.IsMustTailCall = CB.isMustTailCall();
70   Info.IsTailCall =
71       CB.isTailCall() && isInTailCallPosition(CB, MF.getTarget()) &&
72       (MF.getFunction()
73            .getFnAttribute("disable-tail-calls")
74            .getValueAsString() != "true");
75   Info.IsVarArg = CB.getFunctionType()->isVarArg();
76   return lowerCall(MIRBuilder, Info);
77 }
78 
79 template <typename FuncInfoTy>
80 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
81                                const DataLayout &DL,
82                                const FuncInfoTy &FuncInfo) const {
83   auto &Flags = Arg.Flags[0];
84   const AttributeList &Attrs = FuncInfo.getAttributes();
85   if (Attrs.hasAttribute(OpIdx, Attribute::ZExt))
86     Flags.setZExt();
87   if (Attrs.hasAttribute(OpIdx, Attribute::SExt))
88     Flags.setSExt();
89   if (Attrs.hasAttribute(OpIdx, Attribute::InReg))
90     Flags.setInReg();
91   if (Attrs.hasAttribute(OpIdx, Attribute::StructRet))
92     Flags.setSRet();
93   if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf))
94     Flags.setSwiftSelf();
95   if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError))
96     Flags.setSwiftError();
97   if (Attrs.hasAttribute(OpIdx, Attribute::ByVal))
98     Flags.setByVal();
99   if (Attrs.hasAttribute(OpIdx, Attribute::Preallocated))
100     Flags.setPreallocated();
101   if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca))
102     Flags.setInAlloca();
103 
104   if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
105     Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
106 
107     auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
108     Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
109 
110     // For ByVal, alignment should be passed from FE.  BE will guess if
111     // this info is not there but there are cases it cannot get right.
112     Align FrameAlign;
113     if (auto ParamAlign = FuncInfo.getParamAlign(OpIdx - 2))
114       FrameAlign = *ParamAlign;
115     else
116       FrameAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
117     Flags.setByValAlign(FrameAlign);
118   }
119   if (Attrs.hasAttribute(OpIdx, Attribute::Nest))
120     Flags.setNest();
121   Flags.setOrigAlign(Align(DL.getABITypeAlignment(Arg.Ty)));
122 }
123 
124 template void
125 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
126                                     const DataLayout &DL,
127                                     const Function &FuncInfo) const;
128 
129 template void
130 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
131                                     const DataLayout &DL,
132                                     const CallBase &FuncInfo) const;
133 
134 Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy,
135                                 MachineIRBuilder &MIRBuilder) const {
136   assert(SrcRegs.size() > 1 && "Nothing to pack");
137 
138   const DataLayout &DL = MIRBuilder.getMF().getDataLayout();
139   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
140 
141   LLT PackedLLT = getLLTForType(*PackedTy, DL);
142 
143   SmallVector<LLT, 8> LLTs;
144   SmallVector<uint64_t, 8> Offsets;
145   computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
146   assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch");
147 
148   Register Dst = MRI->createGenericVirtualRegister(PackedLLT);
149   MIRBuilder.buildUndef(Dst);
150   for (unsigned i = 0; i < SrcRegs.size(); ++i) {
151     Register NewDst = MRI->createGenericVirtualRegister(PackedLLT);
152     MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]);
153     Dst = NewDst;
154   }
155 
156   return Dst;
157 }
158 
159 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
160                               Type *PackedTy,
161                               MachineIRBuilder &MIRBuilder) const {
162   assert(DstRegs.size() > 1 && "Nothing to unpack");
163 
164   const DataLayout &DL = MIRBuilder.getDataLayout();
165 
166   SmallVector<LLT, 8> LLTs;
167   SmallVector<uint64_t, 8> Offsets;
168   computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
169   assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
170 
171   for (unsigned i = 0; i < DstRegs.size(); ++i)
172     MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
173 }
174 
175 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
176                                      SmallVectorImpl<ArgInfo> &Args,
177                                      ValueHandler &Handler) const {
178   MachineFunction &MF = MIRBuilder.getMF();
179   const Function &F = MF.getFunction();
180   SmallVector<CCValAssign, 16> ArgLocs;
181   CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
182   return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler);
183 }
184 
185 bool CallLowering::handleAssignments(CCState &CCInfo,
186                                      SmallVectorImpl<CCValAssign> &ArgLocs,
187                                      MachineIRBuilder &MIRBuilder,
188                                      SmallVectorImpl<ArgInfo> &Args,
189                                      ValueHandler &Handler) const {
190   MachineFunction &MF = MIRBuilder.getMF();
191   const Function &F = MF.getFunction();
192   const DataLayout &DL = F.getParent()->getDataLayout();
193 
194   unsigned NumArgs = Args.size();
195   for (unsigned i = 0; i != NumArgs; ++i) {
196     MVT CurVT = MVT::getVT(Args[i].Ty);
197     if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i],
198                           Args[i].Flags[0], CCInfo)) {
199       if (!CurVT.isValid())
200         return false;
201       MVT NewVT = TLI->getRegisterTypeForCallingConv(
202           F.getContext(), F.getCallingConv(), EVT(CurVT));
203 
204       // If we need to split the type over multiple regs, check it's a scenario
205       // we currently support.
206       unsigned NumParts = TLI->getNumRegistersForCallingConv(
207           F.getContext(), F.getCallingConv(), CurVT);
208       if (NumParts > 1) {
209         // For now only handle exact splits.
210         if (NewVT.getSizeInBits() * NumParts != CurVT.getSizeInBits())
211           return false;
212       }
213 
214       // For incoming arguments (physregs to vregs), we could have values in
215       // physregs (or memlocs) which we want to extract and copy to vregs.
216       // During this, we might have to deal with the LLT being split across
217       // multiple regs, so we have to record this information for later.
218       //
219       // If we have outgoing args, then we have the opposite case. We have a
220       // vreg with an LLT which we want to assign to a physical location, and
221       // we might have to record that the value has to be split later.
222       if (Handler.isIncomingArgumentHandler()) {
223         if (NumParts == 1) {
224           // Try to use the register type if we couldn't assign the VT.
225           if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
226                                 Args[i].Flags[0], CCInfo))
227             return false;
228         } else {
229           // We're handling an incoming arg which is split over multiple regs.
230           // E.g. passing an s128 on AArch64.
231           ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
232           Args[i].OrigRegs.push_back(Args[i].Regs[0]);
233           Args[i].Regs.clear();
234           Args[i].Flags.clear();
235           LLT NewLLT = getLLTForMVT(NewVT);
236           // For each split register, create and assign a vreg that will store
237           // the incoming component of the larger value. These will later be
238           // merged to form the final vreg.
239           for (unsigned Part = 0; Part < NumParts; ++Part) {
240             Register Reg =
241                 MIRBuilder.getMRI()->createGenericVirtualRegister(NewLLT);
242             ISD::ArgFlagsTy Flags = OrigFlags;
243             if (Part == 0) {
244               Flags.setSplit();
245             } else {
246               Flags.setOrigAlign(Align(1));
247               if (Part == NumParts - 1)
248                 Flags.setSplitEnd();
249             }
250             Args[i].Regs.push_back(Reg);
251             Args[i].Flags.push_back(Flags);
252             if (Handler.assignArg(i + Part, NewVT, NewVT, CCValAssign::Full,
253                                   Args[i], Args[i].Flags[Part], CCInfo)) {
254               // Still couldn't assign this smaller part type for some reason.
255               return false;
256             }
257           }
258         }
259       } else {
260         // Handling an outgoing arg that might need to be split.
261         if (NumParts < 2)
262           return false; // Don't know how to deal with this type combination.
263 
264         // This type is passed via multiple registers in the calling convention.
265         // We need to extract the individual parts.
266         Register LargeReg = Args[i].Regs[0];
267         LLT SmallTy = LLT::scalar(NewVT.getSizeInBits());
268         auto Unmerge = MIRBuilder.buildUnmerge(SmallTy, LargeReg);
269         assert(Unmerge->getNumOperands() == NumParts + 1);
270         ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
271         // We're going to replace the regs and flags with the split ones.
272         Args[i].Regs.clear();
273         Args[i].Flags.clear();
274         for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) {
275           ISD::ArgFlagsTy Flags = OrigFlags;
276           if (PartIdx == 0) {
277             Flags.setSplit();
278           } else {
279             Flags.setOrigAlign(Align(1));
280             if (PartIdx == NumParts - 1)
281               Flags.setSplitEnd();
282           }
283           Args[i].Regs.push_back(Unmerge.getReg(PartIdx));
284           Args[i].Flags.push_back(Flags);
285           if (Handler.assignArg(i + PartIdx, NewVT, NewVT, CCValAssign::Full,
286                                 Args[i], Args[i].Flags[PartIdx], CCInfo))
287             return false;
288         }
289       }
290     }
291   }
292 
293   for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) {
294     assert(j < ArgLocs.size() && "Skipped too many arg locs");
295 
296     CCValAssign &VA = ArgLocs[j];
297     assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
298 
299     if (VA.needsCustom()) {
300       j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
301       continue;
302     }
303 
304     // FIXME: Pack registers if we have more than one.
305     Register ArgReg = Args[i].Regs[0];
306 
307     MVT OrigVT = MVT::getVT(Args[i].Ty);
308     MVT VAVT = VA.getValVT();
309     if (VA.isRegLoc()) {
310       if (Handler.isIncomingArgumentHandler() && VAVT != OrigVT) {
311         if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) {
312           // Expected to be multiple regs for a single incoming arg.
313           unsigned NumArgRegs = Args[i].Regs.size();
314           if (NumArgRegs < 2)
315             return false;
316 
317           assert((j + (NumArgRegs - 1)) < ArgLocs.size() &&
318                  "Too many regs for number of args");
319           for (unsigned Part = 0; Part < NumArgRegs; ++Part) {
320             // There should be Regs.size() ArgLocs per argument.
321             VA = ArgLocs[j + Part];
322             Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
323           }
324           j += NumArgRegs - 1;
325           // Merge the split registers into the expected larger result vreg
326           // of the original call.
327           MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs);
328           continue;
329         }
330         const LLT VATy(VAVT);
331         Register NewReg =
332             MIRBuilder.getMRI()->createGenericVirtualRegister(VATy);
333         Handler.assignValueToReg(NewReg, VA.getLocReg(), VA);
334         // If it's a vector type, we either need to truncate the elements
335         // or do an unmerge to get the lower block of elements.
336         if (VATy.isVector() &&
337             VATy.getNumElements() > OrigVT.getVectorNumElements()) {
338           const LLT OrigTy(OrigVT);
339           // Just handle the case where the VA type is 2 * original type.
340           if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) {
341             LLVM_DEBUG(dbgs()
342                        << "Incoming promoted vector arg has too many elts");
343             return false;
344           }
345           auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg});
346           MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0));
347         } else {
348           MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0);
349         }
350       } else if (!Handler.isIncomingArgumentHandler()) {
351         assert((j + (Args[i].Regs.size() - 1)) < ArgLocs.size() &&
352                "Too many regs for number of args");
353         // This is an outgoing argument that might have been split.
354         for (unsigned Part = 0; Part < Args[i].Regs.size(); ++Part) {
355           // There should be Regs.size() ArgLocs per argument.
356           VA = ArgLocs[j + Part];
357           Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
358         }
359         j += Args[i].Regs.size() - 1;
360       } else {
361         Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
362       }
363     } else if (VA.isMemLoc()) {
364       // Don't currently support loading/storing a type that needs to be split
365       // to the stack. Should be easy, just not implemented yet.
366       if (Args[i].Regs.size() > 1) {
367         LLVM_DEBUG(
368             dbgs()
369             << "Load/store a split arg to/from the stack not implemented yet");
370         return false;
371       }
372       MVT VT = MVT::getVT(Args[i].Ty);
373       unsigned Size = VT == MVT::iPTR ? DL.getPointerSize()
374                                       : alignTo(VT.getSizeInBits(), 8) / 8;
375       unsigned Offset = VA.getLocMemOffset();
376       MachinePointerInfo MPO;
377       Register StackAddr = Handler.getStackAddress(Size, Offset, MPO);
378       Handler.assignValueToAddress(Args[i], StackAddr, Size, MPO, VA);
379     } else {
380       // FIXME: Support byvals and other weirdness
381       return false;
382     }
383   }
384   return true;
385 }
386 
387 bool CallLowering::analyzeArgInfo(CCState &CCState,
388                                   SmallVectorImpl<ArgInfo> &Args,
389                                   CCAssignFn &AssignFnFixed,
390                                   CCAssignFn &AssignFnVarArg) const {
391   for (unsigned i = 0, e = Args.size(); i < e; ++i) {
392     MVT VT = MVT::getVT(Args[i].Ty);
393     CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg;
394     if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) {
395       // Bail out on anything we can't handle.
396       LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString()
397                         << " (arg number = " << i << "\n");
398       return false;
399     }
400   }
401   return true;
402 }
403 
404 bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
405                                      MachineFunction &MF,
406                                      SmallVectorImpl<ArgInfo> &InArgs,
407                                      CCAssignFn &CalleeAssignFnFixed,
408                                      CCAssignFn &CalleeAssignFnVarArg,
409                                      CCAssignFn &CallerAssignFnFixed,
410                                      CCAssignFn &CallerAssignFnVarArg) const {
411   const Function &F = MF.getFunction();
412   CallingConv::ID CalleeCC = Info.CallConv;
413   CallingConv::ID CallerCC = F.getCallingConv();
414 
415   if (CallerCC == CalleeCC)
416     return true;
417 
418   SmallVector<CCValAssign, 16> ArgLocs1;
419   CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext());
420   if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed,
421                       CalleeAssignFnVarArg))
422     return false;
423 
424   SmallVector<CCValAssign, 16> ArgLocs2;
425   CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext());
426   if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed,
427                       CalleeAssignFnVarArg))
428     return false;
429 
430   // We need the argument locations to match up exactly. If there's more in
431   // one than the other, then we are done.
432   if (ArgLocs1.size() != ArgLocs2.size())
433     return false;
434 
435   // Make sure that each location is passed in exactly the same way.
436   for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
437     const CCValAssign &Loc1 = ArgLocs1[i];
438     const CCValAssign &Loc2 = ArgLocs2[i];
439 
440     // We need both of them to be the same. So if one is a register and one
441     // isn't, we're done.
442     if (Loc1.isRegLoc() != Loc2.isRegLoc())
443       return false;
444 
445     if (Loc1.isRegLoc()) {
446       // If they don't have the same register location, we're done.
447       if (Loc1.getLocReg() != Loc2.getLocReg())
448         return false;
449 
450       // They matched, so we can move to the next ArgLoc.
451       continue;
452     }
453 
454     // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
455     if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
456       return false;
457   }
458 
459   return true;
460 }
461 
462 Register CallLowering::ValueHandler::extendRegister(Register ValReg,
463                                                     CCValAssign &VA,
464                                                     unsigned MaxSizeBits) {
465   LLT LocTy{VA.getLocVT()};
466   LLT ValTy = MRI.getType(ValReg);
467   if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
468     return ValReg;
469 
470   if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
471     if (MaxSizeBits <= ValTy.getSizeInBits())
472       return ValReg;
473     LocTy = LLT::scalar(MaxSizeBits);
474   }
475 
476   switch (VA.getLocInfo()) {
477   default: break;
478   case CCValAssign::Full:
479   case CCValAssign::BCvt:
480     // FIXME: bitconverting between vector types may or may not be a
481     // nop in big-endian situations.
482     return ValReg;
483   case CCValAssign::AExt: {
484     auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
485     return MIB.getReg(0);
486   }
487   case CCValAssign::SExt: {
488     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
489     MIRBuilder.buildSExt(NewReg, ValReg);
490     return NewReg;
491   }
492   case CCValAssign::ZExt: {
493     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
494     MIRBuilder.buildZExt(NewReg, ValReg);
495     return NewReg;
496   }
497   }
498   llvm_unreachable("unable to extend register");
499 }
500 
501 void CallLowering::ValueHandler::anchor() {}
502