1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements some simple delegations needed for call lowering. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 15 #include "llvm/CodeGen/Analysis.h" 16 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 17 #include "llvm/CodeGen/MachineOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/IR/Instructions.h" 22 #include "llvm/IR/LLVMContext.h" 23 #include "llvm/IR/Module.h" 24 25 #define DEBUG_TYPE "call-lowering" 26 27 using namespace llvm; 28 29 void CallLowering::anchor() {} 30 31 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS, 32 ArrayRef<Register> ResRegs, 33 ArrayRef<ArrayRef<Register>> ArgRegs, 34 Register SwiftErrorVReg, 35 std::function<unsigned()> GetCalleeReg) const { 36 CallLoweringInfo Info; 37 auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout(); 38 39 // First step is to marshall all the function's parameters into the correct 40 // physregs and memory locations. Gather the sequence of argument types that 41 // we'll pass to the assigner function. 42 unsigned i = 0; 43 unsigned NumFixedArgs = CS.getFunctionType()->getNumParams(); 44 for (auto &Arg : CS.args()) { 45 ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}, 46 i < NumFixedArgs}; 47 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS); 48 Info.OrigArgs.push_back(OrigArg); 49 ++i; 50 } 51 52 if (const Function *F = CS.getCalledFunction()) 53 Info.Callee = MachineOperand::CreateGA(F, 0); 54 else 55 Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false); 56 57 Info.OrigRet = ArgInfo{ResRegs, CS.getType(), ISD::ArgFlagsTy{}}; 58 if (!Info.OrigRet.Ty->isVoidTy()) 59 setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CS); 60 61 Info.KnownCallees = 62 CS.getInstruction()->getMetadata(LLVMContext::MD_callees); 63 Info.CallConv = CS.getCallingConv(); 64 Info.SwiftErrorVReg = SwiftErrorVReg; 65 Info.IsMustTailCall = CS.isMustTailCall(); 66 67 return lowerCall(MIRBuilder, Info); 68 } 69 70 template <typename FuncInfoTy> 71 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, 72 const DataLayout &DL, 73 const FuncInfoTy &FuncInfo) const { 74 const AttributeList &Attrs = FuncInfo.getAttributes(); 75 if (Attrs.hasAttribute(OpIdx, Attribute::ZExt)) 76 Arg.Flags.setZExt(); 77 if (Attrs.hasAttribute(OpIdx, Attribute::SExt)) 78 Arg.Flags.setSExt(); 79 if (Attrs.hasAttribute(OpIdx, Attribute::InReg)) 80 Arg.Flags.setInReg(); 81 if (Attrs.hasAttribute(OpIdx, Attribute::StructRet)) 82 Arg.Flags.setSRet(); 83 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf)) 84 Arg.Flags.setSwiftSelf(); 85 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError)) 86 Arg.Flags.setSwiftError(); 87 if (Attrs.hasAttribute(OpIdx, Attribute::ByVal)) 88 Arg.Flags.setByVal(); 89 if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca)) 90 Arg.Flags.setInAlloca(); 91 92 if (Arg.Flags.isByVal() || Arg.Flags.isInAlloca()) { 93 Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); 94 95 auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); 96 Arg.Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); 97 98 // For ByVal, alignment should be passed from FE. BE will guess if 99 // this info is not there but there are cases it cannot get right. 100 unsigned FrameAlign; 101 if (FuncInfo.getParamAlignment(OpIdx - 2)) 102 FrameAlign = FuncInfo.getParamAlignment(OpIdx - 2); 103 else 104 FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL); 105 Arg.Flags.setByValAlign(FrameAlign); 106 } 107 if (Attrs.hasAttribute(OpIdx, Attribute::Nest)) 108 Arg.Flags.setNest(); 109 Arg.Flags.setOrigAlign(DL.getABITypeAlignment(Arg.Ty)); 110 } 111 112 template void 113 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 114 const DataLayout &DL, 115 const Function &FuncInfo) const; 116 117 template void 118 CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 119 const DataLayout &DL, 120 const CallInst &FuncInfo) const; 121 122 Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy, 123 MachineIRBuilder &MIRBuilder) const { 124 assert(SrcRegs.size() > 1 && "Nothing to pack"); 125 126 const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); 127 MachineRegisterInfo *MRI = MIRBuilder.getMRI(); 128 129 LLT PackedLLT = getLLTForType(*PackedTy, DL); 130 131 SmallVector<LLT, 8> LLTs; 132 SmallVector<uint64_t, 8> Offsets; 133 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 134 assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch"); 135 136 Register Dst = MRI->createGenericVirtualRegister(PackedLLT); 137 MIRBuilder.buildUndef(Dst); 138 for (unsigned i = 0; i < SrcRegs.size(); ++i) { 139 Register NewDst = MRI->createGenericVirtualRegister(PackedLLT); 140 MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]); 141 Dst = NewDst; 142 } 143 144 return Dst; 145 } 146 147 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, 148 Type *PackedTy, 149 MachineIRBuilder &MIRBuilder) const { 150 assert(DstRegs.size() > 1 && "Nothing to unpack"); 151 152 const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); 153 154 SmallVector<LLT, 8> LLTs; 155 SmallVector<uint64_t, 8> Offsets; 156 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 157 assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch"); 158 159 for (unsigned i = 0; i < DstRegs.size(); ++i) 160 MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]); 161 } 162 163 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, 164 ArrayRef<ArgInfo> Args, 165 ValueHandler &Handler) const { 166 MachineFunction &MF = MIRBuilder.getMF(); 167 const Function &F = MF.getFunction(); 168 SmallVector<CCValAssign, 16> ArgLocs; 169 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 170 return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler); 171 } 172 173 bool CallLowering::handleAssignments(CCState &CCInfo, 174 SmallVectorImpl<CCValAssign> &ArgLocs, 175 MachineIRBuilder &MIRBuilder, 176 ArrayRef<ArgInfo> Args, 177 ValueHandler &Handler) const { 178 MachineFunction &MF = MIRBuilder.getMF(); 179 const Function &F = MF.getFunction(); 180 const DataLayout &DL = F.getParent()->getDataLayout(); 181 182 unsigned NumArgs = Args.size(); 183 for (unsigned i = 0; i != NumArgs; ++i) { 184 MVT CurVT = MVT::getVT(Args[i].Ty); 185 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) { 186 // Try to use the register type if we couldn't assign the VT. 187 if (!Handler.isIncomingArgumentHandler() || !CurVT.isValid()) 188 return false; 189 CurVT = TLI->getRegisterTypeForCallingConv( 190 F.getContext(), F.getCallingConv(), EVT(CurVT)); 191 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) 192 return false; 193 } 194 } 195 196 for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { 197 assert(j < ArgLocs.size() && "Skipped too many arg locs"); 198 199 CCValAssign &VA = ArgLocs[j]; 200 assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); 201 202 if (VA.needsCustom()) { 203 j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); 204 continue; 205 } 206 207 assert(Args[i].Regs.size() == 1 && 208 "Can't handle multiple virtual regs yet"); 209 210 // FIXME: Pack registers if we have more than one. 211 Register ArgReg = Args[i].Regs[0]; 212 213 if (VA.isRegLoc()) { 214 MVT OrigVT = MVT::getVT(Args[i].Ty); 215 MVT VAVT = VA.getValVT(); 216 if (Handler.isIncomingArgumentHandler() && VAVT != OrigVT) { 217 if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) 218 return false; // Can't handle this type of arg yet. 219 const LLT VATy(VAVT); 220 Register NewReg = 221 MIRBuilder.getMRI()->createGenericVirtualRegister(VATy); 222 Handler.assignValueToReg(NewReg, VA.getLocReg(), VA); 223 // If it's a vector type, we either need to truncate the elements 224 // or do an unmerge to get the lower block of elements. 225 if (VATy.isVector() && 226 VATy.getNumElements() > OrigVT.getVectorNumElements()) { 227 const LLT OrigTy(OrigVT); 228 // Just handle the case where the VA type is 2 * original type. 229 if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) { 230 LLVM_DEBUG(dbgs() 231 << "Incoming promoted vector arg has too many elts"); 232 return false; 233 } 234 auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg}); 235 MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0)); 236 } else { 237 MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0); 238 } 239 } else { 240 Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA); 241 } 242 } else if (VA.isMemLoc()) { 243 MVT VT = MVT::getVT(Args[i].Ty); 244 unsigned Size = VT == MVT::iPTR ? DL.getPointerSize() 245 : alignTo(VT.getSizeInBits(), 8) / 8; 246 unsigned Offset = VA.getLocMemOffset(); 247 MachinePointerInfo MPO; 248 Register StackAddr = Handler.getStackAddress(Size, Offset, MPO); 249 Handler.assignValueToAddress(ArgReg, StackAddr, Size, MPO, VA); 250 } else { 251 // FIXME: Support byvals and other weirdness 252 return false; 253 } 254 } 255 return true; 256 } 257 258 Register CallLowering::ValueHandler::extendRegister(Register ValReg, 259 CCValAssign &VA) { 260 LLT LocTy{VA.getLocVT()}; 261 if (LocTy.getSizeInBits() == MRI.getType(ValReg).getSizeInBits()) 262 return ValReg; 263 switch (VA.getLocInfo()) { 264 default: break; 265 case CCValAssign::Full: 266 case CCValAssign::BCvt: 267 // FIXME: bitconverting between vector types may or may not be a 268 // nop in big-endian situations. 269 return ValReg; 270 case CCValAssign::AExt: { 271 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); 272 return MIB->getOperand(0).getReg(); 273 } 274 case CCValAssign::SExt: { 275 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 276 MIRBuilder.buildSExt(NewReg, ValReg); 277 return NewReg; 278 } 279 case CCValAssign::ZExt: { 280 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 281 MIRBuilder.buildZExt(NewReg, ValReg); 282 return NewReg; 283 } 284 } 285 llvm_unreachable("unable to extend register"); 286 } 287 288 void CallLowering::ValueHandler::anchor() {} 289