1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements some simple delegations needed for call lowering. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 15 #include "llvm/CodeGen/Analysis.h" 16 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 17 #include "llvm/CodeGen/MachineOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/IR/Instructions.h" 22 #include "llvm/IR/LLVMContext.h" 23 #include "llvm/IR/Module.h" 24 25 #define DEBUG_TYPE "call-lowering" 26 27 using namespace llvm; 28 29 void CallLowering::anchor() {} 30 31 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS, 32 ArrayRef<Register> ResRegs, 33 ArrayRef<ArrayRef<Register>> ArgRegs, 34 Register SwiftErrorVReg, 35 std::function<unsigned()> GetCalleeReg) const { 36 auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout(); 37 38 // First step is to marshall all the function's parameters into the correct 39 // physregs and memory locations. Gather the sequence of argument types that 40 // we'll pass to the assigner function. 41 SmallVector<ArgInfo, 8> OrigArgs; 42 unsigned i = 0; 43 unsigned NumFixedArgs = CS.getFunctionType()->getNumParams(); 44 for (auto &Arg : CS.args()) { 45 ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}, 46 i < NumFixedArgs}; 47 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS); 48 OrigArgs.push_back(OrigArg); 49 ++i; 50 } 51 52 MachineOperand Callee = MachineOperand::CreateImm(0); 53 if (const Function *F = CS.getCalledFunction()) 54 Callee = MachineOperand::CreateGA(F, 0); 55 else 56 Callee = MachineOperand::CreateReg(GetCalleeReg(), false); 57 58 ArgInfo OrigRet{ResRegs, CS.getType(), ISD::ArgFlagsTy{}}; 59 if (!OrigRet.Ty->isVoidTy()) 60 setArgFlags(OrigRet, AttributeList::ReturnIndex, DL, CS); 61 62 const MDNode *KnownCallees = 63 CS.getInstruction()->getMetadata(LLVMContext::MD_callees); 64 65 return lowerCall(MIRBuilder, CS.getCallingConv(), Callee, OrigRet, OrigArgs, 66 SwiftErrorVReg, KnownCallees); 67 } 68 69 template <typename FuncInfoTy> 70 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, 71 const DataLayout &DL, 72 const FuncInfoTy &FuncInfo) const { 73 const AttributeList &Attrs = FuncInfo.getAttributes(); 74 if (Attrs.hasAttribute(OpIdx, Attribute::ZExt)) 75 Arg.Flags.setZExt(); 76 if (Attrs.hasAttribute(OpIdx, Attribute::SExt)) 77 Arg.Flags.setSExt(); 78 if (Attrs.hasAttribute(OpIdx, Attribute::InReg)) 79 Arg.Flags.setInReg(); 80 if (Attrs.hasAttribute(OpIdx, Attribute::StructRet)) 81 Arg.Flags.setSRet(); 82 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf)) 83 Arg.Flags.setSwiftSelf(); 84 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError)) 85 Arg.Flags.setSwiftError(); 86 if (Attrs.hasAttribute(OpIdx, Attribute::ByVal)) 87 Arg.Flags.setByVal(); 88 if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca)) 89 Arg.Flags.setInAlloca(); 90 91 if (Arg.Flags.isByVal() || Arg.Flags.isInAlloca()) { 92 Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); 93 94 auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); 95 Arg.Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); 96 97 // For ByVal, alignment should be passed from FE. BE will guess if 98 // this info is not there but there are cases it cannot get right. 99 unsigned FrameAlign; 100 if (FuncInfo.getParamAlignment(OpIdx - 2)) 101 FrameAlign = FuncInfo.getParamAlignment(OpIdx - 2); 102 else 103 FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL); 104 Arg.Flags.setByValAlign(FrameAlign); 105 } 106 if (Attrs.hasAttribute(OpIdx, Attribute::Nest)) 107 Arg.Flags.setNest(); 108 Arg.Flags.setOrigAlign(DL.getABITypeAlignment(Arg.Ty)); 109 } 110 111 template void 112 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 113 const DataLayout &DL, 114 const Function &FuncInfo) const; 115 116 template void 117 CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 118 const DataLayout &DL, 119 const CallInst &FuncInfo) const; 120 121 Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy, 122 MachineIRBuilder &MIRBuilder) const { 123 assert(SrcRegs.size() > 1 && "Nothing to pack"); 124 125 const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); 126 MachineRegisterInfo *MRI = MIRBuilder.getMRI(); 127 128 LLT PackedLLT = getLLTForType(*PackedTy, DL); 129 130 SmallVector<LLT, 8> LLTs; 131 SmallVector<uint64_t, 8> Offsets; 132 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 133 assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch"); 134 135 Register Dst = MRI->createGenericVirtualRegister(PackedLLT); 136 MIRBuilder.buildUndef(Dst); 137 for (unsigned i = 0; i < SrcRegs.size(); ++i) { 138 Register NewDst = MRI->createGenericVirtualRegister(PackedLLT); 139 MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]); 140 Dst = NewDst; 141 } 142 143 return Dst; 144 } 145 146 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, 147 Type *PackedTy, 148 MachineIRBuilder &MIRBuilder) const { 149 assert(DstRegs.size() > 1 && "Nothing to unpack"); 150 151 const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); 152 153 SmallVector<LLT, 8> LLTs; 154 SmallVector<uint64_t, 8> Offsets; 155 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 156 assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch"); 157 158 for (unsigned i = 0; i < DstRegs.size(); ++i) 159 MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]); 160 } 161 162 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, 163 ArrayRef<ArgInfo> Args, 164 ValueHandler &Handler) const { 165 MachineFunction &MF = MIRBuilder.getMF(); 166 const Function &F = MF.getFunction(); 167 SmallVector<CCValAssign, 16> ArgLocs; 168 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 169 return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler); 170 } 171 172 bool CallLowering::handleAssignments(CCState &CCInfo, 173 SmallVectorImpl<CCValAssign> &ArgLocs, 174 MachineIRBuilder &MIRBuilder, 175 ArrayRef<ArgInfo> Args, 176 ValueHandler &Handler) const { 177 MachineFunction &MF = MIRBuilder.getMF(); 178 const Function &F = MF.getFunction(); 179 const DataLayout &DL = F.getParent()->getDataLayout(); 180 181 unsigned NumArgs = Args.size(); 182 for (unsigned i = 0; i != NumArgs; ++i) { 183 MVT CurVT = MVT::getVT(Args[i].Ty); 184 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) { 185 // Try to use the register type if we couldn't assign the VT. 186 if (!Handler.isIncomingArgumentHandler() || !CurVT.isValid()) 187 return false; 188 CurVT = TLI->getRegisterTypeForCallingConv( 189 F.getContext(), F.getCallingConv(), EVT(CurVT)); 190 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) 191 return false; 192 } 193 } 194 195 for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { 196 assert(j < ArgLocs.size() && "Skipped too many arg locs"); 197 198 CCValAssign &VA = ArgLocs[j]; 199 assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); 200 201 if (VA.needsCustom()) { 202 j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); 203 continue; 204 } 205 206 assert(Args[i].Regs.size() == 1 && 207 "Can't handle multiple virtual regs yet"); 208 209 // FIXME: Pack registers if we have more than one. 210 Register ArgReg = Args[i].Regs[0]; 211 212 if (VA.isRegLoc()) { 213 MVT OrigVT = MVT::getVT(Args[i].Ty); 214 MVT VAVT = VA.getValVT(); 215 if (Handler.isIncomingArgumentHandler() && VAVT != OrigVT) { 216 if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) 217 return false; // Can't handle this type of arg yet. 218 const LLT VATy(VAVT); 219 Register NewReg = 220 MIRBuilder.getMRI()->createGenericVirtualRegister(VATy); 221 Handler.assignValueToReg(NewReg, VA.getLocReg(), VA); 222 // If it's a vector type, we either need to truncate the elements 223 // or do an unmerge to get the lower block of elements. 224 if (VATy.isVector() && 225 VATy.getNumElements() > OrigVT.getVectorNumElements()) { 226 const LLT OrigTy(OrigVT); 227 // Just handle the case where the VA type is 2 * original type. 228 if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) { 229 LLVM_DEBUG(dbgs() 230 << "Incoming promoted vector arg has too many elts"); 231 return false; 232 } 233 auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg}); 234 MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0)); 235 } else { 236 MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0); 237 } 238 } else { 239 Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA); 240 } 241 } else if (VA.isMemLoc()) { 242 MVT VT = MVT::getVT(Args[i].Ty); 243 unsigned Size = VT == MVT::iPTR ? DL.getPointerSize() 244 : alignTo(VT.getSizeInBits(), 8) / 8; 245 unsigned Offset = VA.getLocMemOffset(); 246 MachinePointerInfo MPO; 247 Register StackAddr = Handler.getStackAddress(Size, Offset, MPO); 248 Handler.assignValueToAddress(ArgReg, StackAddr, Size, MPO, VA); 249 } else { 250 // FIXME: Support byvals and other weirdness 251 return false; 252 } 253 } 254 return true; 255 } 256 257 Register CallLowering::ValueHandler::extendRegister(Register ValReg, 258 CCValAssign &VA) { 259 LLT LocTy{VA.getLocVT()}; 260 if (LocTy.getSizeInBits() == MRI.getType(ValReg).getSizeInBits()) 261 return ValReg; 262 switch (VA.getLocInfo()) { 263 default: break; 264 case CCValAssign::Full: 265 case CCValAssign::BCvt: 266 // FIXME: bitconverting between vector types may or may not be a 267 // nop in big-endian situations. 268 return ValReg; 269 case CCValAssign::AExt: { 270 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); 271 return MIB->getOperand(0).getReg(); 272 } 273 case CCValAssign::SExt: { 274 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 275 MIRBuilder.buildSExt(NewReg, ValReg); 276 return NewReg; 277 } 278 case CCValAssign::ZExt: { 279 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 280 MIRBuilder.buildZExt(NewReg, ValReg); 281 return NewReg; 282 } 283 } 284 llvm_unreachable("unable to extend register"); 285 } 286 287 void CallLowering::ValueHandler::anchor() {} 288