1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements some simple delegations needed for call lowering. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/Analysis.h" 15 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 16 #include "llvm/CodeGen/GlobalISel/Utils.h" 17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 18 #include "llvm/CodeGen/MachineOperand.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/TargetLowering.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/Instructions.h" 23 #include "llvm/IR/LLVMContext.h" 24 #include "llvm/IR/Module.h" 25 #include "llvm/Target/TargetMachine.h" 26 27 #define DEBUG_TYPE "call-lowering" 28 29 using namespace llvm; 30 31 void CallLowering::anchor() {} 32 33 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB, 34 ArrayRef<Register> ResRegs, 35 ArrayRef<ArrayRef<Register>> ArgRegs, 36 Register SwiftErrorVReg, 37 std::function<unsigned()> GetCalleeReg) const { 38 CallLoweringInfo Info; 39 const DataLayout &DL = MIRBuilder.getDataLayout(); 40 41 // First step is to marshall all the function's parameters into the correct 42 // physregs and memory locations. Gather the sequence of argument types that 43 // we'll pass to the assigner function. 44 unsigned i = 0; 45 unsigned NumFixedArgs = CB.getFunctionType()->getNumParams(); 46 for (auto &Arg : CB.args()) { 47 ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}, 48 i < NumFixedArgs}; 49 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB); 50 Info.OrigArgs.push_back(OrigArg); 51 ++i; 52 } 53 54 // Try looking through a bitcast from one function type to another. 55 // Commonly happens with calls to objc_msgSend(). 56 const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts(); 57 if (const Function *F = dyn_cast<Function>(CalleeV)) 58 Info.Callee = MachineOperand::CreateGA(F, 0); 59 else 60 Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false); 61 62 Info.OrigRet = ArgInfo{ResRegs, CB.getType(), ISD::ArgFlagsTy{}}; 63 if (!Info.OrigRet.Ty->isVoidTy()) 64 setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB); 65 66 MachineFunction &MF = MIRBuilder.getMF(); 67 Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees); 68 Info.CallConv = CB.getCallingConv(); 69 Info.SwiftErrorVReg = SwiftErrorVReg; 70 Info.IsMustTailCall = CB.isMustTailCall(); 71 Info.IsTailCall = 72 CB.isTailCall() && isInTailCallPosition(CB, MF.getTarget()) && 73 (MF.getFunction() 74 .getFnAttribute("disable-tail-calls") 75 .getValueAsString() != "true"); 76 Info.IsVarArg = CB.getFunctionType()->isVarArg(); 77 return lowerCall(MIRBuilder, Info); 78 } 79 80 template <typename FuncInfoTy> 81 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, 82 const DataLayout &DL, 83 const FuncInfoTy &FuncInfo) const { 84 auto &Flags = Arg.Flags[0]; 85 const AttributeList &Attrs = FuncInfo.getAttributes(); 86 if (Attrs.hasAttribute(OpIdx, Attribute::ZExt)) 87 Flags.setZExt(); 88 if (Attrs.hasAttribute(OpIdx, Attribute::SExt)) 89 Flags.setSExt(); 90 if (Attrs.hasAttribute(OpIdx, Attribute::InReg)) 91 Flags.setInReg(); 92 if (Attrs.hasAttribute(OpIdx, Attribute::StructRet)) 93 Flags.setSRet(); 94 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf)) 95 Flags.setSwiftSelf(); 96 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError)) 97 Flags.setSwiftError(); 98 if (Attrs.hasAttribute(OpIdx, Attribute::ByVal)) 99 Flags.setByVal(); 100 if (Attrs.hasAttribute(OpIdx, Attribute::Preallocated)) 101 Flags.setPreallocated(); 102 if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca)) 103 Flags.setInAlloca(); 104 105 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) { 106 Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); 107 108 auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); 109 Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); 110 111 // For ByVal, alignment should be passed from FE. BE will guess if 112 // this info is not there but there are cases it cannot get right. 113 Align FrameAlign; 114 if (auto ParamAlign = FuncInfo.getParamAlign(OpIdx - 2)) 115 FrameAlign = *ParamAlign; 116 else 117 FrameAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL)); 118 Flags.setByValAlign(FrameAlign); 119 } 120 if (Attrs.hasAttribute(OpIdx, Attribute::Nest)) 121 Flags.setNest(); 122 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty)); 123 } 124 125 template void 126 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 127 const DataLayout &DL, 128 const Function &FuncInfo) const; 129 130 template void 131 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 132 const DataLayout &DL, 133 const CallBase &FuncInfo) const; 134 135 Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy, 136 MachineIRBuilder &MIRBuilder) const { 137 assert(SrcRegs.size() > 1 && "Nothing to pack"); 138 139 const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); 140 MachineRegisterInfo *MRI = MIRBuilder.getMRI(); 141 142 LLT PackedLLT = getLLTForType(*PackedTy, DL); 143 144 SmallVector<LLT, 8> LLTs; 145 SmallVector<uint64_t, 8> Offsets; 146 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 147 assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch"); 148 149 Register Dst = MRI->createGenericVirtualRegister(PackedLLT); 150 MIRBuilder.buildUndef(Dst); 151 for (unsigned i = 0; i < SrcRegs.size(); ++i) { 152 Register NewDst = MRI->createGenericVirtualRegister(PackedLLT); 153 MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]); 154 Dst = NewDst; 155 } 156 157 return Dst; 158 } 159 160 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, 161 Type *PackedTy, 162 MachineIRBuilder &MIRBuilder) const { 163 assert(DstRegs.size() > 1 && "Nothing to unpack"); 164 165 const DataLayout &DL = MIRBuilder.getDataLayout(); 166 167 SmallVector<LLT, 8> LLTs; 168 SmallVector<uint64_t, 8> Offsets; 169 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 170 assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch"); 171 172 for (unsigned i = 0; i < DstRegs.size(); ++i) 173 MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]); 174 } 175 176 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, 177 SmallVectorImpl<ArgInfo> &Args, 178 ValueHandler &Handler) const { 179 MachineFunction &MF = MIRBuilder.getMF(); 180 const Function &F = MF.getFunction(); 181 SmallVector<CCValAssign, 16> ArgLocs; 182 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 183 return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler); 184 } 185 186 bool CallLowering::handleAssignments(CCState &CCInfo, 187 SmallVectorImpl<CCValAssign> &ArgLocs, 188 MachineIRBuilder &MIRBuilder, 189 SmallVectorImpl<ArgInfo> &Args, 190 ValueHandler &Handler) const { 191 MachineFunction &MF = MIRBuilder.getMF(); 192 const Function &F = MF.getFunction(); 193 const DataLayout &DL = F.getParent()->getDataLayout(); 194 195 unsigned NumArgs = Args.size(); 196 for (unsigned i = 0; i != NumArgs; ++i) { 197 MVT CurVT = MVT::getVT(Args[i].Ty); 198 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], 199 Args[i].Flags[0], CCInfo)) { 200 if (!CurVT.isValid()) 201 return false; 202 MVT NewVT = TLI->getRegisterTypeForCallingConv( 203 F.getContext(), F.getCallingConv(), EVT(CurVT)); 204 205 // If we need to split the type over multiple regs, check it's a scenario 206 // we currently support. 207 unsigned NumParts = TLI->getNumRegistersForCallingConv( 208 F.getContext(), F.getCallingConv(), CurVT); 209 if (NumParts > 1) { 210 // For now only handle exact splits. 211 if (NewVT.getSizeInBits() * NumParts != CurVT.getSizeInBits()) 212 return false; 213 } 214 215 // For incoming arguments (physregs to vregs), we could have values in 216 // physregs (or memlocs) which we want to extract and copy to vregs. 217 // During this, we might have to deal with the LLT being split across 218 // multiple regs, so we have to record this information for later. 219 // 220 // If we have outgoing args, then we have the opposite case. We have a 221 // vreg with an LLT which we want to assign to a physical location, and 222 // we might have to record that the value has to be split later. 223 if (Handler.isIncomingArgumentHandler()) { 224 if (NumParts == 1) { 225 // Try to use the register type if we couldn't assign the VT. 226 if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i], 227 Args[i].Flags[0], CCInfo)) 228 return false; 229 } else { 230 // We're handling an incoming arg which is split over multiple regs. 231 // E.g. passing an s128 on AArch64. 232 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0]; 233 Args[i].OrigRegs.push_back(Args[i].Regs[0]); 234 Args[i].Regs.clear(); 235 Args[i].Flags.clear(); 236 LLT NewLLT = getLLTForMVT(NewVT); 237 // For each split register, create and assign a vreg that will store 238 // the incoming component of the larger value. These will later be 239 // merged to form the final vreg. 240 for (unsigned Part = 0; Part < NumParts; ++Part) { 241 Register Reg = 242 MIRBuilder.getMRI()->createGenericVirtualRegister(NewLLT); 243 ISD::ArgFlagsTy Flags = OrigFlags; 244 if (Part == 0) { 245 Flags.setSplit(); 246 } else { 247 Flags.setOrigAlign(Align(1)); 248 if (Part == NumParts - 1) 249 Flags.setSplitEnd(); 250 } 251 Args[i].Regs.push_back(Reg); 252 Args[i].Flags.push_back(Flags); 253 if (Handler.assignArg(i + Part, NewVT, NewVT, CCValAssign::Full, 254 Args[i], Args[i].Flags[Part], CCInfo)) { 255 // Still couldn't assign this smaller part type for some reason. 256 return false; 257 } 258 } 259 } 260 } else { 261 // Handling an outgoing arg that might need to be split. 262 if (NumParts < 2) 263 return false; // Don't know how to deal with this type combination. 264 265 // This type is passed via multiple registers in the calling convention. 266 // We need to extract the individual parts. 267 Register LargeReg = Args[i].Regs[0]; 268 LLT SmallTy = LLT::scalar(NewVT.getSizeInBits()); 269 auto Unmerge = MIRBuilder.buildUnmerge(SmallTy, LargeReg); 270 assert(Unmerge->getNumOperands() == NumParts + 1); 271 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0]; 272 // We're going to replace the regs and flags with the split ones. 273 Args[i].Regs.clear(); 274 Args[i].Flags.clear(); 275 for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) { 276 ISD::ArgFlagsTy Flags = OrigFlags; 277 if (PartIdx == 0) { 278 Flags.setSplit(); 279 } else { 280 Flags.setOrigAlign(Align(1)); 281 if (PartIdx == NumParts - 1) 282 Flags.setSplitEnd(); 283 } 284 Args[i].Regs.push_back(Unmerge.getReg(PartIdx)); 285 Args[i].Flags.push_back(Flags); 286 if (Handler.assignArg(i + PartIdx, NewVT, NewVT, CCValAssign::Full, 287 Args[i], Args[i].Flags[PartIdx], CCInfo)) 288 return false; 289 } 290 } 291 } 292 } 293 294 for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { 295 assert(j < ArgLocs.size() && "Skipped too many arg locs"); 296 297 CCValAssign &VA = ArgLocs[j]; 298 assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); 299 300 if (VA.needsCustom()) { 301 unsigned NumArgRegs = 302 Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); 303 if (!NumArgRegs) 304 return false; 305 j += NumArgRegs; 306 continue; 307 } 308 309 // FIXME: Pack registers if we have more than one. 310 Register ArgReg = Args[i].Regs[0]; 311 312 MVT OrigVT = MVT::getVT(Args[i].Ty); 313 MVT VAVT = VA.getValVT(); 314 if (VA.isRegLoc()) { 315 if (Handler.isIncomingArgumentHandler() && VAVT != OrigVT) { 316 if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) { 317 // Expected to be multiple regs for a single incoming arg. 318 unsigned NumArgRegs = Args[i].Regs.size(); 319 if (NumArgRegs < 2) 320 return false; 321 322 assert((j + (NumArgRegs - 1)) < ArgLocs.size() && 323 "Too many regs for number of args"); 324 for (unsigned Part = 0; Part < NumArgRegs; ++Part) { 325 // There should be Regs.size() ArgLocs per argument. 326 VA = ArgLocs[j + Part]; 327 Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA); 328 } 329 j += NumArgRegs - 1; 330 // Merge the split registers into the expected larger result vreg 331 // of the original call. 332 MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs); 333 continue; 334 } 335 const LLT VATy(VAVT); 336 Register NewReg = 337 MIRBuilder.getMRI()->createGenericVirtualRegister(VATy); 338 Handler.assignValueToReg(NewReg, VA.getLocReg(), VA); 339 // If it's a vector type, we either need to truncate the elements 340 // or do an unmerge to get the lower block of elements. 341 if (VATy.isVector() && 342 VATy.getNumElements() > OrigVT.getVectorNumElements()) { 343 const LLT OrigTy(OrigVT); 344 // Just handle the case where the VA type is 2 * original type. 345 if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) { 346 LLVM_DEBUG(dbgs() 347 << "Incoming promoted vector arg has too many elts"); 348 return false; 349 } 350 auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg}); 351 MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0)); 352 } else { 353 MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0); 354 } 355 } else if (!Handler.isIncomingArgumentHandler()) { 356 assert((j + (Args[i].Regs.size() - 1)) < ArgLocs.size() && 357 "Too many regs for number of args"); 358 // This is an outgoing argument that might have been split. 359 for (unsigned Part = 0; Part < Args[i].Regs.size(); ++Part) { 360 // There should be Regs.size() ArgLocs per argument. 361 VA = ArgLocs[j + Part]; 362 Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA); 363 } 364 j += Args[i].Regs.size() - 1; 365 } else { 366 Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA); 367 } 368 } else if (VA.isMemLoc()) { 369 // Don't currently support loading/storing a type that needs to be split 370 // to the stack. Should be easy, just not implemented yet. 371 if (Args[i].Regs.size() > 1) { 372 LLVM_DEBUG( 373 dbgs() 374 << "Load/store a split arg to/from the stack not implemented yet"); 375 return false; 376 } 377 MVT VT = MVT::getVT(Args[i].Ty); 378 unsigned Size = VT == MVT::iPTR ? DL.getPointerSize() 379 : alignTo(VT.getSizeInBits(), 8) / 8; 380 unsigned Offset = VA.getLocMemOffset(); 381 MachinePointerInfo MPO; 382 Register StackAddr = Handler.getStackAddress(Size, Offset, MPO); 383 Handler.assignValueToAddress(Args[i], StackAddr, Size, MPO, VA); 384 } else { 385 // FIXME: Support byvals and other weirdness 386 return false; 387 } 388 } 389 return true; 390 } 391 392 bool CallLowering::analyzeArgInfo(CCState &CCState, 393 SmallVectorImpl<ArgInfo> &Args, 394 CCAssignFn &AssignFnFixed, 395 CCAssignFn &AssignFnVarArg) const { 396 for (unsigned i = 0, e = Args.size(); i < e; ++i) { 397 MVT VT = MVT::getVT(Args[i].Ty); 398 CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg; 399 if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) { 400 // Bail out on anything we can't handle. 401 LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString() 402 << " (arg number = " << i << "\n"); 403 return false; 404 } 405 } 406 return true; 407 } 408 409 bool CallLowering::resultsCompatible(CallLoweringInfo &Info, 410 MachineFunction &MF, 411 SmallVectorImpl<ArgInfo> &InArgs, 412 CCAssignFn &CalleeAssignFnFixed, 413 CCAssignFn &CalleeAssignFnVarArg, 414 CCAssignFn &CallerAssignFnFixed, 415 CCAssignFn &CallerAssignFnVarArg) const { 416 const Function &F = MF.getFunction(); 417 CallingConv::ID CalleeCC = Info.CallConv; 418 CallingConv::ID CallerCC = F.getCallingConv(); 419 420 if (CallerCC == CalleeCC) 421 return true; 422 423 SmallVector<CCValAssign, 16> ArgLocs1; 424 CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext()); 425 if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed, 426 CalleeAssignFnVarArg)) 427 return false; 428 429 SmallVector<CCValAssign, 16> ArgLocs2; 430 CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext()); 431 if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed, 432 CalleeAssignFnVarArg)) 433 return false; 434 435 // We need the argument locations to match up exactly. If there's more in 436 // one than the other, then we are done. 437 if (ArgLocs1.size() != ArgLocs2.size()) 438 return false; 439 440 // Make sure that each location is passed in exactly the same way. 441 for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) { 442 const CCValAssign &Loc1 = ArgLocs1[i]; 443 const CCValAssign &Loc2 = ArgLocs2[i]; 444 445 // We need both of them to be the same. So if one is a register and one 446 // isn't, we're done. 447 if (Loc1.isRegLoc() != Loc2.isRegLoc()) 448 return false; 449 450 if (Loc1.isRegLoc()) { 451 // If they don't have the same register location, we're done. 452 if (Loc1.getLocReg() != Loc2.getLocReg()) 453 return false; 454 455 // They matched, so we can move to the next ArgLoc. 456 continue; 457 } 458 459 // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match. 460 if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset()) 461 return false; 462 } 463 464 return true; 465 } 466 467 Register CallLowering::ValueHandler::extendRegister(Register ValReg, 468 CCValAssign &VA, 469 unsigned MaxSizeBits) { 470 LLT LocTy{VA.getLocVT()}; 471 LLT ValTy = MRI.getType(ValReg); 472 if (LocTy.getSizeInBits() == ValTy.getSizeInBits()) 473 return ValReg; 474 475 if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) { 476 if (MaxSizeBits <= ValTy.getSizeInBits()) 477 return ValReg; 478 LocTy = LLT::scalar(MaxSizeBits); 479 } 480 481 switch (VA.getLocInfo()) { 482 default: break; 483 case CCValAssign::Full: 484 case CCValAssign::BCvt: 485 // FIXME: bitconverting between vector types may or may not be a 486 // nop in big-endian situations. 487 return ValReg; 488 case CCValAssign::AExt: { 489 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); 490 return MIB.getReg(0); 491 } 492 case CCValAssign::SExt: { 493 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 494 MIRBuilder.buildSExt(NewReg, ValReg); 495 return NewReg; 496 } 497 case CCValAssign::ZExt: { 498 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 499 MIRBuilder.buildZExt(NewReg, ValReg); 500 return NewReg; 501 } 502 } 503 llvm_unreachable("unable to extend register"); 504 } 505 506 void CallLowering::ValueHandler::anchor() {} 507