1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements some simple delegations needed for call lowering. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 15 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 16 #include "llvm/CodeGen/MachineOperand.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/CodeGen/TargetLowering.h" 19 #include "llvm/IR/DataLayout.h" 20 #include "llvm/IR/Instructions.h" 21 #include "llvm/IR/Module.h" 22 23 #define DEBUG_TYPE "call-lowering" 24 25 using namespace llvm; 26 27 void CallLowering::anchor() {} 28 29 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS, 30 Register ResReg, ArrayRef<Register> ArgRegs, 31 Register SwiftErrorVReg, 32 std::function<unsigned()> GetCalleeReg) const { 33 auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout(); 34 35 // First step is to marshall all the function's parameters into the correct 36 // physregs and memory locations. Gather the sequence of argument types that 37 // we'll pass to the assigner function. 38 SmallVector<ArgInfo, 8> OrigArgs; 39 unsigned i = 0; 40 unsigned NumFixedArgs = CS.getFunctionType()->getNumParams(); 41 for (auto &Arg : CS.args()) { 42 ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}, 43 i < NumFixedArgs}; 44 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS); 45 // We don't currently support swiftself args. 46 if (OrigArg.Flags.isSwiftSelf()) 47 return false; 48 OrigArgs.push_back(OrigArg); 49 ++i; 50 } 51 52 MachineOperand Callee = MachineOperand::CreateImm(0); 53 if (const Function *F = CS.getCalledFunction()) 54 Callee = MachineOperand::CreateGA(F, 0); 55 else 56 Callee = MachineOperand::CreateReg(GetCalleeReg(), false); 57 58 ArgInfo OrigRet{ResReg, CS.getType(), ISD::ArgFlagsTy{}}; 59 if (!OrigRet.Ty->isVoidTy()) 60 setArgFlags(OrigRet, AttributeList::ReturnIndex, DL, CS); 61 62 return lowerCall(MIRBuilder, CS.getCallingConv(), Callee, OrigRet, OrigArgs, 63 SwiftErrorVReg); 64 } 65 66 template <typename FuncInfoTy> 67 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, 68 const DataLayout &DL, 69 const FuncInfoTy &FuncInfo) const { 70 const AttributeList &Attrs = FuncInfo.getAttributes(); 71 if (Attrs.hasAttribute(OpIdx, Attribute::ZExt)) 72 Arg.Flags.setZExt(); 73 if (Attrs.hasAttribute(OpIdx, Attribute::SExt)) 74 Arg.Flags.setSExt(); 75 if (Attrs.hasAttribute(OpIdx, Attribute::InReg)) 76 Arg.Flags.setInReg(); 77 if (Attrs.hasAttribute(OpIdx, Attribute::StructRet)) 78 Arg.Flags.setSRet(); 79 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf)) 80 Arg.Flags.setSwiftSelf(); 81 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError)) 82 Arg.Flags.setSwiftError(); 83 if (Attrs.hasAttribute(OpIdx, Attribute::ByVal)) 84 Arg.Flags.setByVal(); 85 if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca)) 86 Arg.Flags.setInAlloca(); 87 88 if (Arg.Flags.isByVal() || Arg.Flags.isInAlloca()) { 89 Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); 90 91 auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); 92 Arg.Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); 93 94 // For ByVal, alignment should be passed from FE. BE will guess if 95 // this info is not there but there are cases it cannot get right. 96 unsigned FrameAlign; 97 if (FuncInfo.getParamAlignment(OpIdx - 2)) 98 FrameAlign = FuncInfo.getParamAlignment(OpIdx - 2); 99 else 100 FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL); 101 Arg.Flags.setByValAlign(FrameAlign); 102 } 103 if (Attrs.hasAttribute(OpIdx, Attribute::Nest)) 104 Arg.Flags.setNest(); 105 Arg.Flags.setOrigAlign(DL.getABITypeAlignment(Arg.Ty)); 106 } 107 108 template void 109 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 110 const DataLayout &DL, 111 const Function &FuncInfo) const; 112 113 template void 114 CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 115 const DataLayout &DL, 116 const CallInst &FuncInfo) const; 117 118 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, 119 ArrayRef<ArgInfo> Args, 120 ValueHandler &Handler) const { 121 MachineFunction &MF = MIRBuilder.getMF(); 122 const Function &F = MF.getFunction(); 123 const DataLayout &DL = F.getParent()->getDataLayout(); 124 125 SmallVector<CCValAssign, 16> ArgLocs; 126 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 127 128 unsigned NumArgs = Args.size(); 129 for (unsigned i = 0; i != NumArgs; ++i) { 130 MVT CurVT = MVT::getVT(Args[i].Ty); 131 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) { 132 // Try to use the register type if we couldn't assign the VT. 133 if (!Handler.isArgumentHandler() || !CurVT.isValid()) 134 return false; 135 CurVT = TLI->getRegisterTypeForCallingConv( 136 F.getContext(), F.getCallingConv(), EVT(CurVT)); 137 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) 138 return false; 139 } 140 } 141 142 for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { 143 assert(j < ArgLocs.size() && "Skipped too many arg locs"); 144 145 CCValAssign &VA = ArgLocs[j]; 146 assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); 147 148 if (VA.needsCustom()) { 149 j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); 150 continue; 151 } 152 153 assert(Args[i].Regs.size() == 1 && 154 "Can't handle multiple virtual regs yet"); 155 156 // FIXME: Pack registers if we have more than one. 157 unsigned ArgReg = Args[i].Regs[0]; 158 159 if (VA.isRegLoc()) { 160 MVT OrigVT = MVT::getVT(Args[i].Ty); 161 MVT VAVT = VA.getValVT(); 162 if (Handler.isArgumentHandler() && VAVT != OrigVT) { 163 if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) 164 return false; // Can't handle this type of arg yet. 165 const LLT VATy(VAVT); 166 unsigned NewReg = 167 MIRBuilder.getMRI()->createGenericVirtualRegister(VATy); 168 Handler.assignValueToReg(NewReg, VA.getLocReg(), VA); 169 // If it's a vector type, we either need to truncate the elements 170 // or do an unmerge to get the lower block of elements. 171 if (VATy.isVector() && 172 VATy.getNumElements() > OrigVT.getVectorNumElements()) { 173 const LLT OrigTy(OrigVT); 174 // Just handle the case where the VA type is 2 * original type. 175 if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) { 176 LLVM_DEBUG(dbgs() 177 << "Incoming promoted vector arg has too many elts"); 178 return false; 179 } 180 auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg}); 181 MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0)); 182 } else { 183 MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0); 184 } 185 } else { 186 Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA); 187 } 188 } else if (VA.isMemLoc()) { 189 MVT VT = MVT::getVT(Args[i].Ty); 190 unsigned Size = VT == MVT::iPTR ? DL.getPointerSize() 191 : alignTo(VT.getSizeInBits(), 8) / 8; 192 unsigned Offset = VA.getLocMemOffset(); 193 MachinePointerInfo MPO; 194 unsigned StackAddr = Handler.getStackAddress(Size, Offset, MPO); 195 Handler.assignValueToAddress(ArgReg, StackAddr, Size, MPO, VA); 196 } else { 197 // FIXME: Support byvals and other weirdness 198 return false; 199 } 200 } 201 return true; 202 } 203 204 Register CallLowering::ValueHandler::extendRegister(Register ValReg, 205 CCValAssign &VA) { 206 LLT LocTy{VA.getLocVT()}; 207 if (LocTy.getSizeInBits() == MRI.getType(ValReg).getSizeInBits()) 208 return ValReg; 209 switch (VA.getLocInfo()) { 210 default: break; 211 case CCValAssign::Full: 212 case CCValAssign::BCvt: 213 // FIXME: bitconverting between vector types may or may not be a 214 // nop in big-endian situations. 215 return ValReg; 216 case CCValAssign::AExt: { 217 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); 218 return MIB->getOperand(0).getReg(); 219 } 220 case CCValAssign::SExt: { 221 unsigned NewReg = MRI.createGenericVirtualRegister(LocTy); 222 MIRBuilder.buildSExt(NewReg, ValReg); 223 return NewReg; 224 } 225 case CCValAssign::ZExt: { 226 unsigned NewReg = MRI.createGenericVirtualRegister(LocTy); 227 MIRBuilder.buildZExt(NewReg, ValReg); 228 return NewReg; 229 } 230 } 231 llvm_unreachable("unable to extend register"); 232 } 233 234 void CallLowering::ValueHandler::anchor() {} 235