xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp (revision f29e6277ad6bcff36ed950dbf8effddc59ba9c28)
1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
16 #include "llvm/CodeGen/GlobalISel/Utils.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Target/TargetMachine.h"
26 
27 #define DEBUG_TYPE "call-lowering"
28 
29 using namespace llvm;
30 
31 void CallLowering::anchor() {}
32 
33 ISD::ArgFlagsTy CallLowering::getAttributesForArgIdx(const CallBase &Call,
34                                                      unsigned ArgIdx) const {
35   ISD::ArgFlagsTy Flags;
36   if (Call.paramHasAttr(ArgIdx, Attribute::SExt))
37     Flags.setSExt();
38   if (Call.paramHasAttr(ArgIdx, Attribute::ZExt))
39     Flags.setZExt();
40   if (Call.paramHasAttr(ArgIdx, Attribute::InReg))
41     Flags.setInReg();
42   if (Call.paramHasAttr(ArgIdx, Attribute::StructRet))
43     Flags.setSRet();
44   if (Call.paramHasAttr(ArgIdx, Attribute::Nest))
45     Flags.setNest();
46   if (Call.paramHasAttr(ArgIdx, Attribute::ByVal))
47     Flags.setByVal();
48   if (Call.paramHasAttr(ArgIdx, Attribute::Preallocated))
49     Flags.setPreallocated();
50   if (Call.paramHasAttr(ArgIdx, Attribute::InAlloca))
51     Flags.setInAlloca();
52   if (Call.paramHasAttr(ArgIdx, Attribute::Returned))
53     Flags.setReturned();
54   if (Call.paramHasAttr(ArgIdx, Attribute::SwiftSelf))
55     Flags.setSwiftSelf();
56   if (Call.paramHasAttr(ArgIdx, Attribute::SwiftError))
57     Flags.setSwiftError();
58   return Flags;
59 }
60 
61 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
62                              ArrayRef<Register> ResRegs,
63                              ArrayRef<ArrayRef<Register>> ArgRegs,
64                              Register SwiftErrorVReg,
65                              std::function<unsigned()> GetCalleeReg) const {
66   CallLoweringInfo Info;
67   const DataLayout &DL = MIRBuilder.getDataLayout();
68   MachineFunction &MF = MIRBuilder.getMF();
69   bool CanBeTailCalled = CB.isTailCall() &&
70                          isInTailCallPosition(CB, MF.getTarget()) &&
71                          (MF.getFunction()
72                               .getFnAttribute("disable-tail-calls")
73                               .getValueAsString() != "true");
74 
75   // First step is to marshall all the function's parameters into the correct
76   // physregs and memory locations. Gather the sequence of argument types that
77   // we'll pass to the assigner function.
78   unsigned i = 0;
79   unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
80   for (auto &Arg : CB.args()) {
81     ArgInfo OrigArg{ArgRegs[i], Arg->getType(), getAttributesForArgIdx(CB, i),
82                     i < NumFixedArgs};
83     setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
84 
85     // If we have an explicit sret argument that is an Instruction, (i.e., it
86     // might point to function-local memory), we can't meaningfully tail-call.
87     if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
88       CanBeTailCalled = false;
89 
90     Info.OrigArgs.push_back(OrigArg);
91     ++i;
92   }
93 
94   // Try looking through a bitcast from one function type to another.
95   // Commonly happens with calls to objc_msgSend().
96   const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
97   if (const Function *F = dyn_cast<Function>(CalleeV))
98     Info.Callee = MachineOperand::CreateGA(F, 0);
99   else
100     Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
101 
102   Info.OrigRet = ArgInfo{ResRegs, CB.getType(), ISD::ArgFlagsTy{}};
103   if (!Info.OrigRet.Ty->isVoidTy())
104     setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
105 
106   Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
107   Info.CallConv = CB.getCallingConv();
108   Info.SwiftErrorVReg = SwiftErrorVReg;
109   Info.IsMustTailCall = CB.isMustTailCall();
110   Info.IsTailCall = CanBeTailCalled;
111   Info.IsVarArg = CB.getFunctionType()->isVarArg();
112   return lowerCall(MIRBuilder, Info);
113 }
114 
115 template <typename FuncInfoTy>
116 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
117                                const DataLayout &DL,
118                                const FuncInfoTy &FuncInfo) const {
119   auto &Flags = Arg.Flags[0];
120   const AttributeList &Attrs = FuncInfo.getAttributes();
121   if (Attrs.hasAttribute(OpIdx, Attribute::ZExt))
122     Flags.setZExt();
123   if (Attrs.hasAttribute(OpIdx, Attribute::SExt))
124     Flags.setSExt();
125   if (Attrs.hasAttribute(OpIdx, Attribute::InReg))
126     Flags.setInReg();
127   if (Attrs.hasAttribute(OpIdx, Attribute::StructRet))
128     Flags.setSRet();
129   if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf))
130     Flags.setSwiftSelf();
131   if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError))
132     Flags.setSwiftError();
133   if (Attrs.hasAttribute(OpIdx, Attribute::ByVal))
134     Flags.setByVal();
135   if (Attrs.hasAttribute(OpIdx, Attribute::Preallocated))
136     Flags.setPreallocated();
137   if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca))
138     Flags.setInAlloca();
139 
140   if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
141     Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
142 
143     auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
144     Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
145 
146     // For ByVal, alignment should be passed from FE.  BE will guess if
147     // this info is not there but there are cases it cannot get right.
148     Align FrameAlign;
149     if (auto ParamAlign = FuncInfo.getParamAlign(OpIdx - 2))
150       FrameAlign = *ParamAlign;
151     else
152       FrameAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
153     Flags.setByValAlign(FrameAlign);
154   }
155   if (Attrs.hasAttribute(OpIdx, Attribute::Nest))
156     Flags.setNest();
157   Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
158 }
159 
160 template void
161 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
162                                     const DataLayout &DL,
163                                     const Function &FuncInfo) const;
164 
165 template void
166 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
167                                     const DataLayout &DL,
168                                     const CallBase &FuncInfo) const;
169 
170 Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy,
171                                 MachineIRBuilder &MIRBuilder) const {
172   assert(SrcRegs.size() > 1 && "Nothing to pack");
173 
174   const DataLayout &DL = MIRBuilder.getMF().getDataLayout();
175   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
176 
177   LLT PackedLLT = getLLTForType(*PackedTy, DL);
178 
179   SmallVector<LLT, 8> LLTs;
180   SmallVector<uint64_t, 8> Offsets;
181   computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
182   assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch");
183 
184   Register Dst = MRI->createGenericVirtualRegister(PackedLLT);
185   MIRBuilder.buildUndef(Dst);
186   for (unsigned i = 0; i < SrcRegs.size(); ++i) {
187     Register NewDst = MRI->createGenericVirtualRegister(PackedLLT);
188     MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]);
189     Dst = NewDst;
190   }
191 
192   return Dst;
193 }
194 
195 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
196                               Type *PackedTy,
197                               MachineIRBuilder &MIRBuilder) const {
198   assert(DstRegs.size() > 1 && "Nothing to unpack");
199 
200   const DataLayout &DL = MIRBuilder.getDataLayout();
201 
202   SmallVector<LLT, 8> LLTs;
203   SmallVector<uint64_t, 8> Offsets;
204   computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
205   assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
206 
207   for (unsigned i = 0; i < DstRegs.size(); ++i)
208     MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
209 }
210 
211 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
212                                      SmallVectorImpl<ArgInfo> &Args,
213                                      ValueHandler &Handler) const {
214   MachineFunction &MF = MIRBuilder.getMF();
215   const Function &F = MF.getFunction();
216   SmallVector<CCValAssign, 16> ArgLocs;
217   CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
218   return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler);
219 }
220 
221 bool CallLowering::handleAssignments(CCState &CCInfo,
222                                      SmallVectorImpl<CCValAssign> &ArgLocs,
223                                      MachineIRBuilder &MIRBuilder,
224                                      SmallVectorImpl<ArgInfo> &Args,
225                                      ValueHandler &Handler) const {
226   MachineFunction &MF = MIRBuilder.getMF();
227   const Function &F = MF.getFunction();
228   const DataLayout &DL = F.getParent()->getDataLayout();
229 
230   unsigned NumArgs = Args.size();
231   for (unsigned i = 0; i != NumArgs; ++i) {
232     EVT CurVT = EVT::getEVT(Args[i].Ty);
233     if (CurVT.isSimple() &&
234         !Handler.assignArg(i, CurVT.getSimpleVT(), CurVT.getSimpleVT(),
235                            CCValAssign::Full, Args[i], Args[i].Flags[0],
236                            CCInfo))
237       continue;
238 
239     MVT NewVT = TLI->getRegisterTypeForCallingConv(
240         F.getContext(), F.getCallingConv(), EVT(CurVT));
241 
242     // If we need to split the type over multiple regs, check it's a scenario
243     // we currently support.
244     unsigned NumParts = TLI->getNumRegistersForCallingConv(
245         F.getContext(), F.getCallingConv(), CurVT);
246     if (NumParts > 1) {
247       // For now only handle exact splits.
248       if (NewVT.getSizeInBits() * NumParts != CurVT.getSizeInBits())
249         return false;
250     }
251 
252     // For incoming arguments (physregs to vregs), we could have values in
253     // physregs (or memlocs) which we want to extract and copy to vregs.
254     // During this, we might have to deal with the LLT being split across
255     // multiple regs, so we have to record this information for later.
256     //
257     // If we have outgoing args, then we have the opposite case. We have a
258     // vreg with an LLT which we want to assign to a physical location, and
259     // we might have to record that the value has to be split later.
260     if (Handler.isIncomingArgumentHandler()) {
261       if (NumParts == 1) {
262         // Try to use the register type if we couldn't assign the VT.
263         if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
264                               Args[i].Flags[0], CCInfo))
265           return false;
266       } else {
267         // We're handling an incoming arg which is split over multiple regs.
268         // E.g. passing an s128 on AArch64.
269         ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
270         Args[i].OrigRegs.push_back(Args[i].Regs[0]);
271         Args[i].Regs.clear();
272         Args[i].Flags.clear();
273         LLT NewLLT = getLLTForMVT(NewVT);
274         // For each split register, create and assign a vreg that will store
275         // the incoming component of the larger value. These will later be
276         // merged to form the final vreg.
277         for (unsigned Part = 0; Part < NumParts; ++Part) {
278           Register Reg =
279               MIRBuilder.getMRI()->createGenericVirtualRegister(NewLLT);
280           ISD::ArgFlagsTy Flags = OrigFlags;
281           if (Part == 0) {
282             Flags.setSplit();
283           } else {
284             Flags.setOrigAlign(Align(1));
285             if (Part == NumParts - 1)
286               Flags.setSplitEnd();
287           }
288           Args[i].Regs.push_back(Reg);
289           Args[i].Flags.push_back(Flags);
290           if (Handler.assignArg(i + Part, NewVT, NewVT, CCValAssign::Full,
291                                 Args[i], Args[i].Flags[Part], CCInfo)) {
292             // Still couldn't assign this smaller part type for some reason.
293             return false;
294           }
295         }
296       }
297     } else {
298       // Handling an outgoing arg that might need to be split.
299       if (NumParts < 2)
300         return false; // Don't know how to deal with this type combination.
301 
302       // This type is passed via multiple registers in the calling convention.
303       // We need to extract the individual parts.
304       Register LargeReg = Args[i].Regs[0];
305       LLT SmallTy = LLT::scalar(NewVT.getSizeInBits());
306       auto Unmerge = MIRBuilder.buildUnmerge(SmallTy, LargeReg);
307       assert(Unmerge->getNumOperands() == NumParts + 1);
308       ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
309       // We're going to replace the regs and flags with the split ones.
310       Args[i].Regs.clear();
311       Args[i].Flags.clear();
312       for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) {
313         ISD::ArgFlagsTy Flags = OrigFlags;
314         if (PartIdx == 0) {
315           Flags.setSplit();
316         } else {
317           Flags.setOrigAlign(Align(1));
318           if (PartIdx == NumParts - 1)
319             Flags.setSplitEnd();
320         }
321         Args[i].Regs.push_back(Unmerge.getReg(PartIdx));
322         Args[i].Flags.push_back(Flags);
323         if (Handler.assignArg(i + PartIdx, NewVT, NewVT, CCValAssign::Full,
324                               Args[i], Args[i].Flags[PartIdx], CCInfo))
325           return false;
326       }
327     }
328   }
329 
330   for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) {
331     assert(j < ArgLocs.size() && "Skipped too many arg locs");
332 
333     CCValAssign &VA = ArgLocs[j];
334     assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
335 
336     if (VA.needsCustom()) {
337       unsigned NumArgRegs =
338           Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
339       if (!NumArgRegs)
340         return false;
341       j += NumArgRegs;
342       continue;
343     }
344 
345     // FIXME: Pack registers if we have more than one.
346     Register ArgReg = Args[i].Regs[0];
347 
348     EVT OrigVT = EVT::getEVT(Args[i].Ty);
349     EVT VAVT = VA.getValVT();
350     const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
351 
352     // Expected to be multiple regs for a single incoming arg.
353     // There should be Regs.size() ArgLocs per argument.
354     unsigned NumArgRegs = Args[i].Regs.size();
355 
356     assert((j + (NumArgRegs - 1)) < ArgLocs.size() &&
357            "Too many regs for number of args");
358     for (unsigned Part = 0; Part < NumArgRegs; ++Part) {
359       // There should be Regs.size() ArgLocs per argument.
360       VA = ArgLocs[j + Part];
361       if (VA.isMemLoc()) {
362         // Don't currently support loading/storing a type that needs to be split
363         // to the stack. Should be easy, just not implemented yet.
364         if (NumArgRegs > 1) {
365           LLVM_DEBUG(
366             dbgs()
367             << "Load/store a split arg to/from the stack not implemented yet\n");
368           return false;
369         }
370 
371         // FIXME: Use correct address space for pointer size
372         EVT LocVT = VA.getValVT();
373         unsigned MemSize = LocVT == MVT::iPTR ? DL.getPointerSize()
374                                               : LocVT.getStoreSize();
375         unsigned Offset = VA.getLocMemOffset();
376         MachinePointerInfo MPO;
377         Register StackAddr = Handler.getStackAddress(MemSize, Offset, MPO);
378         Handler.assignValueToAddress(Args[i], StackAddr,
379                                      MemSize, MPO, VA);
380         continue;
381       }
382 
383       assert(VA.isRegLoc() && "custom loc should have been handled already");
384 
385       if (OrigVT.getSizeInBits() >= VAVT.getSizeInBits() ||
386           !Handler.isIncomingArgumentHandler()) {
387         // This is an argument that might have been split. There should be
388         // Regs.size() ArgLocs per argument.
389 
390         // Insert the argument copies. If VAVT < OrigVT, we'll insert the merge
391         // to the original register after handling all of the parts.
392         Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
393         continue;
394       }
395 
396       // This ArgLoc covers multiple pieces, so we need to split it.
397       const LLT VATy(VAVT.getSimpleVT());
398       Register NewReg =
399         MIRBuilder.getMRI()->createGenericVirtualRegister(VATy);
400       Handler.assignValueToReg(NewReg, VA.getLocReg(), VA);
401       // If it's a vector type, we either need to truncate the elements
402       // or do an unmerge to get the lower block of elements.
403       if (VATy.isVector() &&
404           VATy.getNumElements() > OrigVT.getVectorNumElements()) {
405         // Just handle the case where the VA type is 2 * original type.
406         if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) {
407           LLVM_DEBUG(dbgs()
408                      << "Incoming promoted vector arg has too many elts");
409           return false;
410         }
411         auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg});
412         MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0));
413       } else {
414         MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0);
415       }
416     }
417 
418     // Now that all pieces have been handled, re-pack any arguments into any
419     // wider, original registers.
420     if (Handler.isIncomingArgumentHandler()) {
421       if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) {
422         assert(NumArgRegs >= 2);
423 
424         // Merge the split registers into the expected larger result vreg
425         // of the original call.
426         MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs);
427       }
428     }
429 
430     j += NumArgRegs - 1;
431   }
432 
433   return true;
434 }
435 
436 bool CallLowering::analyzeArgInfo(CCState &CCState,
437                                   SmallVectorImpl<ArgInfo> &Args,
438                                   CCAssignFn &AssignFnFixed,
439                                   CCAssignFn &AssignFnVarArg) const {
440   for (unsigned i = 0, e = Args.size(); i < e; ++i) {
441     MVT VT = MVT::getVT(Args[i].Ty);
442     CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg;
443     if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) {
444       // Bail out on anything we can't handle.
445       LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString()
446                         << " (arg number = " << i << "\n");
447       return false;
448     }
449   }
450   return true;
451 }
452 
453 bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
454                                      MachineFunction &MF,
455                                      SmallVectorImpl<ArgInfo> &InArgs,
456                                      CCAssignFn &CalleeAssignFnFixed,
457                                      CCAssignFn &CalleeAssignFnVarArg,
458                                      CCAssignFn &CallerAssignFnFixed,
459                                      CCAssignFn &CallerAssignFnVarArg) const {
460   const Function &F = MF.getFunction();
461   CallingConv::ID CalleeCC = Info.CallConv;
462   CallingConv::ID CallerCC = F.getCallingConv();
463 
464   if (CallerCC == CalleeCC)
465     return true;
466 
467   SmallVector<CCValAssign, 16> ArgLocs1;
468   CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext());
469   if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed,
470                       CalleeAssignFnVarArg))
471     return false;
472 
473   SmallVector<CCValAssign, 16> ArgLocs2;
474   CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext());
475   if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed,
476                       CalleeAssignFnVarArg))
477     return false;
478 
479   // We need the argument locations to match up exactly. If there's more in
480   // one than the other, then we are done.
481   if (ArgLocs1.size() != ArgLocs2.size())
482     return false;
483 
484   // Make sure that each location is passed in exactly the same way.
485   for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
486     const CCValAssign &Loc1 = ArgLocs1[i];
487     const CCValAssign &Loc2 = ArgLocs2[i];
488 
489     // We need both of them to be the same. So if one is a register and one
490     // isn't, we're done.
491     if (Loc1.isRegLoc() != Loc2.isRegLoc())
492       return false;
493 
494     if (Loc1.isRegLoc()) {
495       // If they don't have the same register location, we're done.
496       if (Loc1.getLocReg() != Loc2.getLocReg())
497         return false;
498 
499       // They matched, so we can move to the next ArgLoc.
500       continue;
501     }
502 
503     // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
504     if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
505       return false;
506   }
507 
508   return true;
509 }
510 
511 Register CallLowering::ValueHandler::extendRegister(Register ValReg,
512                                                     CCValAssign &VA,
513                                                     unsigned MaxSizeBits) {
514   LLT LocTy{VA.getLocVT()};
515   LLT ValTy = MRI.getType(ValReg);
516   if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
517     return ValReg;
518 
519   if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
520     if (MaxSizeBits <= ValTy.getSizeInBits())
521       return ValReg;
522     LocTy = LLT::scalar(MaxSizeBits);
523   }
524 
525   switch (VA.getLocInfo()) {
526   default: break;
527   case CCValAssign::Full:
528   case CCValAssign::BCvt:
529     // FIXME: bitconverting between vector types may or may not be a
530     // nop in big-endian situations.
531     return ValReg;
532   case CCValAssign::AExt: {
533     auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
534     return MIB.getReg(0);
535   }
536   case CCValAssign::SExt: {
537     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
538     MIRBuilder.buildSExt(NewReg, ValReg);
539     return NewReg;
540   }
541   case CCValAssign::ZExt: {
542     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
543     MIRBuilder.buildZExt(NewReg, ValReg);
544     return NewReg;
545   }
546   }
547   llvm_unreachable("unable to extend register");
548 }
549 
550 void CallLowering::ValueHandler::anchor() {}
551