xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp (revision 224a8c639eeb36b7a5ac6f8a50295f9ee2cb2518)
1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
16 #include "llvm/CodeGen/GlobalISel/Utils.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Target/TargetMachine.h"
26 
27 #define DEBUG_TYPE "call-lowering"
28 
29 using namespace llvm;
30 
31 void CallLowering::anchor() {}
32 
33 ISD::ArgFlagsTy CallLowering::getAttributesForArgIdx(const CallBase &Call,
34                                                      unsigned ArgIdx) const {
35   ISD::ArgFlagsTy Flags;
36   if (Call.paramHasAttr(ArgIdx, Attribute::SExt))
37     Flags.setSExt();
38   if (Call.paramHasAttr(ArgIdx, Attribute::ZExt))
39     Flags.setZExt();
40   if (Call.paramHasAttr(ArgIdx, Attribute::InReg))
41     Flags.setInReg();
42   if (Call.paramHasAttr(ArgIdx, Attribute::StructRet))
43     Flags.setSRet();
44   if (Call.paramHasAttr(ArgIdx, Attribute::Nest))
45     Flags.setNest();
46   if (Call.paramHasAttr(ArgIdx, Attribute::ByVal))
47     Flags.setByVal();
48   if (Call.paramHasAttr(ArgIdx, Attribute::Preallocated))
49     Flags.setPreallocated();
50   if (Call.paramHasAttr(ArgIdx, Attribute::InAlloca))
51     Flags.setInAlloca();
52   if (Call.paramHasAttr(ArgIdx, Attribute::Returned))
53     Flags.setReturned();
54   if (Call.paramHasAttr(ArgIdx, Attribute::SwiftSelf))
55     Flags.setSwiftSelf();
56   if (Call.paramHasAttr(ArgIdx, Attribute::SwiftError))
57     Flags.setSwiftError();
58   return Flags;
59 }
60 
61 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
62                              ArrayRef<Register> ResRegs,
63                              ArrayRef<ArrayRef<Register>> ArgRegs,
64                              Register SwiftErrorVReg,
65                              std::function<unsigned()> GetCalleeReg) const {
66   CallLoweringInfo Info;
67   const DataLayout &DL = MIRBuilder.getDataLayout();
68 
69   // First step is to marshall all the function's parameters into the correct
70   // physregs and memory locations. Gather the sequence of argument types that
71   // we'll pass to the assigner function.
72   unsigned i = 0;
73   unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
74   for (auto &Arg : CB.args()) {
75     ArgInfo OrigArg{ArgRegs[i], Arg->getType(), getAttributesForArgIdx(CB, i),
76                     i < NumFixedArgs};
77     setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
78     Info.OrigArgs.push_back(OrigArg);
79     ++i;
80   }
81 
82   // Try looking through a bitcast from one function type to another.
83   // Commonly happens with calls to objc_msgSend().
84   const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
85   if (const Function *F = dyn_cast<Function>(CalleeV))
86     Info.Callee = MachineOperand::CreateGA(F, 0);
87   else
88     Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
89 
90   Info.OrigRet = ArgInfo{ResRegs, CB.getType(), ISD::ArgFlagsTy{}};
91   if (!Info.OrigRet.Ty->isVoidTy())
92     setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
93 
94   MachineFunction &MF = MIRBuilder.getMF();
95   Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
96   Info.CallConv = CB.getCallingConv();
97   Info.SwiftErrorVReg = SwiftErrorVReg;
98   Info.IsMustTailCall = CB.isMustTailCall();
99   Info.IsTailCall =
100       CB.isTailCall() && isInTailCallPosition(CB, MF.getTarget()) &&
101       (MF.getFunction()
102            .getFnAttribute("disable-tail-calls")
103            .getValueAsString() != "true");
104   Info.IsVarArg = CB.getFunctionType()->isVarArg();
105   return lowerCall(MIRBuilder, Info);
106 }
107 
108 template <typename FuncInfoTy>
109 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
110                                const DataLayout &DL,
111                                const FuncInfoTy &FuncInfo) const {
112   auto &Flags = Arg.Flags[0];
113   const AttributeList &Attrs = FuncInfo.getAttributes();
114   if (Attrs.hasAttribute(OpIdx, Attribute::ZExt))
115     Flags.setZExt();
116   if (Attrs.hasAttribute(OpIdx, Attribute::SExt))
117     Flags.setSExt();
118   if (Attrs.hasAttribute(OpIdx, Attribute::InReg))
119     Flags.setInReg();
120   if (Attrs.hasAttribute(OpIdx, Attribute::StructRet))
121     Flags.setSRet();
122   if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf))
123     Flags.setSwiftSelf();
124   if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError))
125     Flags.setSwiftError();
126   if (Attrs.hasAttribute(OpIdx, Attribute::ByVal))
127     Flags.setByVal();
128   if (Attrs.hasAttribute(OpIdx, Attribute::Preallocated))
129     Flags.setPreallocated();
130   if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca))
131     Flags.setInAlloca();
132 
133   if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
134     Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
135 
136     auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
137     Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
138 
139     // For ByVal, alignment should be passed from FE.  BE will guess if
140     // this info is not there but there are cases it cannot get right.
141     Align FrameAlign;
142     if (auto ParamAlign = FuncInfo.getParamAlign(OpIdx - 2))
143       FrameAlign = *ParamAlign;
144     else
145       FrameAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
146     Flags.setByValAlign(FrameAlign);
147   }
148   if (Attrs.hasAttribute(OpIdx, Attribute::Nest))
149     Flags.setNest();
150   Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
151 }
152 
153 template void
154 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
155                                     const DataLayout &DL,
156                                     const Function &FuncInfo) const;
157 
158 template void
159 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
160                                     const DataLayout &DL,
161                                     const CallBase &FuncInfo) const;
162 
163 Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy,
164                                 MachineIRBuilder &MIRBuilder) const {
165   assert(SrcRegs.size() > 1 && "Nothing to pack");
166 
167   const DataLayout &DL = MIRBuilder.getMF().getDataLayout();
168   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
169 
170   LLT PackedLLT = getLLTForType(*PackedTy, DL);
171 
172   SmallVector<LLT, 8> LLTs;
173   SmallVector<uint64_t, 8> Offsets;
174   computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
175   assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch");
176 
177   Register Dst = MRI->createGenericVirtualRegister(PackedLLT);
178   MIRBuilder.buildUndef(Dst);
179   for (unsigned i = 0; i < SrcRegs.size(); ++i) {
180     Register NewDst = MRI->createGenericVirtualRegister(PackedLLT);
181     MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]);
182     Dst = NewDst;
183   }
184 
185   return Dst;
186 }
187 
188 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
189                               Type *PackedTy,
190                               MachineIRBuilder &MIRBuilder) const {
191   assert(DstRegs.size() > 1 && "Nothing to unpack");
192 
193   const DataLayout &DL = MIRBuilder.getDataLayout();
194 
195   SmallVector<LLT, 8> LLTs;
196   SmallVector<uint64_t, 8> Offsets;
197   computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
198   assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
199 
200   for (unsigned i = 0; i < DstRegs.size(); ++i)
201     MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
202 }
203 
204 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
205                                      SmallVectorImpl<ArgInfo> &Args,
206                                      ValueHandler &Handler) const {
207   MachineFunction &MF = MIRBuilder.getMF();
208   const Function &F = MF.getFunction();
209   SmallVector<CCValAssign, 16> ArgLocs;
210   CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
211   return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler);
212 }
213 
214 bool CallLowering::handleAssignments(CCState &CCInfo,
215                                      SmallVectorImpl<CCValAssign> &ArgLocs,
216                                      MachineIRBuilder &MIRBuilder,
217                                      SmallVectorImpl<ArgInfo> &Args,
218                                      ValueHandler &Handler) const {
219   MachineFunction &MF = MIRBuilder.getMF();
220   const Function &F = MF.getFunction();
221   const DataLayout &DL = F.getParent()->getDataLayout();
222 
223   unsigned NumArgs = Args.size();
224   for (unsigned i = 0; i != NumArgs; ++i) {
225     EVT CurVT = EVT::getEVT(Args[i].Ty);
226     if (CurVT.isSimple() &&
227         !Handler.assignArg(i, CurVT.getSimpleVT(), CurVT.getSimpleVT(),
228                            CCValAssign::Full, Args[i], Args[i].Flags[0],
229                            CCInfo))
230       continue;
231 
232     MVT NewVT = TLI->getRegisterTypeForCallingConv(
233         F.getContext(), F.getCallingConv(), EVT(CurVT));
234 
235     // If we need to split the type over multiple regs, check it's a scenario
236     // we currently support.
237     unsigned NumParts = TLI->getNumRegistersForCallingConv(
238         F.getContext(), F.getCallingConv(), CurVT);
239     if (NumParts > 1) {
240       // For now only handle exact splits.
241       if (NewVT.getSizeInBits() * NumParts != CurVT.getSizeInBits())
242         return false;
243     }
244 
245     // For incoming arguments (physregs to vregs), we could have values in
246     // physregs (or memlocs) which we want to extract and copy to vregs.
247     // During this, we might have to deal with the LLT being split across
248     // multiple regs, so we have to record this information for later.
249     //
250     // If we have outgoing args, then we have the opposite case. We have a
251     // vreg with an LLT which we want to assign to a physical location, and
252     // we might have to record that the value has to be split later.
253     if (Handler.isIncomingArgumentHandler()) {
254       if (NumParts == 1) {
255         // Try to use the register type if we couldn't assign the VT.
256         if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
257                               Args[i].Flags[0], CCInfo))
258           return false;
259       } else {
260         // We're handling an incoming arg which is split over multiple regs.
261         // E.g. passing an s128 on AArch64.
262         ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
263         Args[i].OrigRegs.push_back(Args[i].Regs[0]);
264         Args[i].Regs.clear();
265         Args[i].Flags.clear();
266         LLT NewLLT = getLLTForMVT(NewVT);
267         // For each split register, create and assign a vreg that will store
268         // the incoming component of the larger value. These will later be
269         // merged to form the final vreg.
270         for (unsigned Part = 0; Part < NumParts; ++Part) {
271           Register Reg =
272               MIRBuilder.getMRI()->createGenericVirtualRegister(NewLLT);
273           ISD::ArgFlagsTy Flags = OrigFlags;
274           if (Part == 0) {
275             Flags.setSplit();
276           } else {
277             Flags.setOrigAlign(Align(1));
278             if (Part == NumParts - 1)
279               Flags.setSplitEnd();
280           }
281           Args[i].Regs.push_back(Reg);
282           Args[i].Flags.push_back(Flags);
283           if (Handler.assignArg(i + Part, NewVT, NewVT, CCValAssign::Full,
284                                 Args[i], Args[i].Flags[Part], CCInfo)) {
285             // Still couldn't assign this smaller part type for some reason.
286             return false;
287           }
288         }
289       }
290     } else {
291       // Handling an outgoing arg that might need to be split.
292       if (NumParts < 2)
293         return false; // Don't know how to deal with this type combination.
294 
295       // This type is passed via multiple registers in the calling convention.
296       // We need to extract the individual parts.
297       Register LargeReg = Args[i].Regs[0];
298       LLT SmallTy = LLT::scalar(NewVT.getSizeInBits());
299       auto Unmerge = MIRBuilder.buildUnmerge(SmallTy, LargeReg);
300       assert(Unmerge->getNumOperands() == NumParts + 1);
301       ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
302       // We're going to replace the regs and flags with the split ones.
303       Args[i].Regs.clear();
304       Args[i].Flags.clear();
305       for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) {
306         ISD::ArgFlagsTy Flags = OrigFlags;
307         if (PartIdx == 0) {
308           Flags.setSplit();
309         } else {
310           Flags.setOrigAlign(Align(1));
311           if (PartIdx == NumParts - 1)
312             Flags.setSplitEnd();
313         }
314         Args[i].Regs.push_back(Unmerge.getReg(PartIdx));
315         Args[i].Flags.push_back(Flags);
316         if (Handler.assignArg(i + PartIdx, NewVT, NewVT, CCValAssign::Full,
317                               Args[i], Args[i].Flags[PartIdx], CCInfo))
318           return false;
319       }
320     }
321   }
322 
323   for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) {
324     assert(j < ArgLocs.size() && "Skipped too many arg locs");
325 
326     CCValAssign &VA = ArgLocs[j];
327     assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
328 
329     if (VA.needsCustom()) {
330       unsigned NumArgRegs =
331           Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
332       if (!NumArgRegs)
333         return false;
334       j += NumArgRegs;
335       continue;
336     }
337 
338     // FIXME: Pack registers if we have more than one.
339     Register ArgReg = Args[i].Regs[0];
340 
341     EVT OrigVT = EVT::getEVT(Args[i].Ty);
342     EVT VAVT = VA.getValVT();
343     const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
344 
345     // Expected to be multiple regs for a single incoming arg.
346     // There should be Regs.size() ArgLocs per argument.
347     unsigned NumArgRegs = Args[i].Regs.size();
348 
349     assert((j + (NumArgRegs - 1)) < ArgLocs.size() &&
350            "Too many regs for number of args");
351     for (unsigned Part = 0; Part < NumArgRegs; ++Part) {
352       // There should be Regs.size() ArgLocs per argument.
353       VA = ArgLocs[j + Part];
354       if (VA.isMemLoc()) {
355         // Don't currently support loading/storing a type that needs to be split
356         // to the stack. Should be easy, just not implemented yet.
357         if (NumArgRegs > 1) {
358           LLVM_DEBUG(
359             dbgs()
360             << "Load/store a split arg to/from the stack not implemented yet\n");
361           return false;
362         }
363 
364         // FIXME: Use correct address space for pointer size
365         EVT LocVT = VA.getValVT();
366         unsigned MemSize = LocVT == MVT::iPTR ? DL.getPointerSize()
367                                               : LocVT.getStoreSize();
368         unsigned Offset = VA.getLocMemOffset();
369         MachinePointerInfo MPO;
370         Register StackAddr = Handler.getStackAddress(MemSize, Offset, MPO);
371         Handler.assignValueToAddress(Args[i], StackAddr,
372                                      MemSize, MPO, VA);
373         continue;
374       }
375 
376       assert(VA.isRegLoc() && "custom loc should have been handled already");
377 
378       if (OrigVT.getSizeInBits() >= VAVT.getSizeInBits() ||
379           !Handler.isIncomingArgumentHandler()) {
380         // This is an argument that might have been split. There should be
381         // Regs.size() ArgLocs per argument.
382 
383         // Insert the argument copies. If VAVT < OrigVT, we'll insert the merge
384         // to the original register after handling all of the parts.
385         Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
386         continue;
387       }
388 
389       // This ArgLoc covers multiple pieces, so we need to split it.
390       const LLT VATy(VAVT.getSimpleVT());
391       Register NewReg =
392         MIRBuilder.getMRI()->createGenericVirtualRegister(VATy);
393       Handler.assignValueToReg(NewReg, VA.getLocReg(), VA);
394       // If it's a vector type, we either need to truncate the elements
395       // or do an unmerge to get the lower block of elements.
396       if (VATy.isVector() &&
397           VATy.getNumElements() > OrigVT.getVectorNumElements()) {
398         // Just handle the case where the VA type is 2 * original type.
399         if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) {
400           LLVM_DEBUG(dbgs()
401                      << "Incoming promoted vector arg has too many elts");
402           return false;
403         }
404         auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg});
405         MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0));
406       } else {
407         MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0);
408       }
409     }
410 
411     // Now that all pieces have been handled, re-pack any arguments into any
412     // wider, original registers.
413     if (Handler.isIncomingArgumentHandler()) {
414       if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) {
415         assert(NumArgRegs >= 2);
416 
417         // Merge the split registers into the expected larger result vreg
418         // of the original call.
419         MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs);
420       }
421     }
422 
423     j += NumArgRegs - 1;
424   }
425 
426   return true;
427 }
428 
429 bool CallLowering::analyzeArgInfo(CCState &CCState,
430                                   SmallVectorImpl<ArgInfo> &Args,
431                                   CCAssignFn &AssignFnFixed,
432                                   CCAssignFn &AssignFnVarArg) const {
433   for (unsigned i = 0, e = Args.size(); i < e; ++i) {
434     MVT VT = MVT::getVT(Args[i].Ty);
435     CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg;
436     if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) {
437       // Bail out on anything we can't handle.
438       LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString()
439                         << " (arg number = " << i << "\n");
440       return false;
441     }
442   }
443   return true;
444 }
445 
446 bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
447                                      MachineFunction &MF,
448                                      SmallVectorImpl<ArgInfo> &InArgs,
449                                      CCAssignFn &CalleeAssignFnFixed,
450                                      CCAssignFn &CalleeAssignFnVarArg,
451                                      CCAssignFn &CallerAssignFnFixed,
452                                      CCAssignFn &CallerAssignFnVarArg) const {
453   const Function &F = MF.getFunction();
454   CallingConv::ID CalleeCC = Info.CallConv;
455   CallingConv::ID CallerCC = F.getCallingConv();
456 
457   if (CallerCC == CalleeCC)
458     return true;
459 
460   SmallVector<CCValAssign, 16> ArgLocs1;
461   CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext());
462   if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed,
463                       CalleeAssignFnVarArg))
464     return false;
465 
466   SmallVector<CCValAssign, 16> ArgLocs2;
467   CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext());
468   if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed,
469                       CalleeAssignFnVarArg))
470     return false;
471 
472   // We need the argument locations to match up exactly. If there's more in
473   // one than the other, then we are done.
474   if (ArgLocs1.size() != ArgLocs2.size())
475     return false;
476 
477   // Make sure that each location is passed in exactly the same way.
478   for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
479     const CCValAssign &Loc1 = ArgLocs1[i];
480     const CCValAssign &Loc2 = ArgLocs2[i];
481 
482     // We need both of them to be the same. So if one is a register and one
483     // isn't, we're done.
484     if (Loc1.isRegLoc() != Loc2.isRegLoc())
485       return false;
486 
487     if (Loc1.isRegLoc()) {
488       // If they don't have the same register location, we're done.
489       if (Loc1.getLocReg() != Loc2.getLocReg())
490         return false;
491 
492       // They matched, so we can move to the next ArgLoc.
493       continue;
494     }
495 
496     // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
497     if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
498       return false;
499   }
500 
501   return true;
502 }
503 
504 Register CallLowering::ValueHandler::extendRegister(Register ValReg,
505                                                     CCValAssign &VA,
506                                                     unsigned MaxSizeBits) {
507   LLT LocTy{VA.getLocVT()};
508   LLT ValTy = MRI.getType(ValReg);
509   if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
510     return ValReg;
511 
512   if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
513     if (MaxSizeBits <= ValTy.getSizeInBits())
514       return ValReg;
515     LocTy = LLT::scalar(MaxSizeBits);
516   }
517 
518   switch (VA.getLocInfo()) {
519   default: break;
520   case CCValAssign::Full:
521   case CCValAssign::BCvt:
522     // FIXME: bitconverting between vector types may or may not be a
523     // nop in big-endian situations.
524     return ValReg;
525   case CCValAssign::AExt: {
526     auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
527     return MIB.getReg(0);
528   }
529   case CCValAssign::SExt: {
530     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
531     MIRBuilder.buildSExt(NewReg, ValReg);
532     return NewReg;
533   }
534   case CCValAssign::ZExt: {
535     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
536     MIRBuilder.buildZExt(NewReg, ValReg);
537     return NewReg;
538   }
539   }
540   llvm_unreachable("unable to extend register");
541 }
542 
543 void CallLowering::ValueHandler::anchor() {}
544