1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements some simple delegations needed for call lowering. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/Analysis.h" 15 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 16 #include "llvm/CodeGen/GlobalISel/Utils.h" 17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 18 #include "llvm/CodeGen/MachineOperand.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/TargetLowering.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/Instructions.h" 23 #include "llvm/IR/LLVMContext.h" 24 #include "llvm/IR/Module.h" 25 26 #define DEBUG_TYPE "call-lowering" 27 28 using namespace llvm; 29 30 void CallLowering::anchor() {} 31 32 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB, 33 ArrayRef<Register> ResRegs, 34 ArrayRef<ArrayRef<Register>> ArgRegs, 35 Register SwiftErrorVReg, 36 std::function<unsigned()> GetCalleeReg) const { 37 CallLoweringInfo Info; 38 const DataLayout &DL = MIRBuilder.getDataLayout(); 39 40 // First step is to marshall all the function's parameters into the correct 41 // physregs and memory locations. Gather the sequence of argument types that 42 // we'll pass to the assigner function. 43 unsigned i = 0; 44 unsigned NumFixedArgs = CB.getFunctionType()->getNumParams(); 45 for (auto &Arg : CB.args()) { 46 ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}, 47 i < NumFixedArgs}; 48 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB); 49 Info.OrigArgs.push_back(OrigArg); 50 ++i; 51 } 52 53 // Try looking through a bitcast from one function type to another. 54 // Commonly happens with calls to objc_msgSend(). 55 const Value *CalleeV = CB.getCalledValue()->stripPointerCasts(); 56 if (const Function *F = dyn_cast<Function>(CalleeV)) 57 Info.Callee = MachineOperand::CreateGA(F, 0); 58 else 59 Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false); 60 61 Info.OrigRet = ArgInfo{ResRegs, CB.getType(), ISD::ArgFlagsTy{}}; 62 if (!Info.OrigRet.Ty->isVoidTy()) 63 setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB); 64 65 MachineFunction &MF = MIRBuilder.getMF(); 66 Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees); 67 Info.CallConv = CB.getCallingConv(); 68 Info.SwiftErrorVReg = SwiftErrorVReg; 69 Info.IsMustTailCall = CB.isMustTailCall(); 70 Info.IsTailCall = 71 CB.isTailCall() && isInTailCallPosition(CB, MF.getTarget()) && 72 (MF.getFunction() 73 .getFnAttribute("disable-tail-calls") 74 .getValueAsString() != "true"); 75 Info.IsVarArg = CB.getFunctionType()->isVarArg(); 76 return lowerCall(MIRBuilder, Info); 77 } 78 79 template <typename FuncInfoTy> 80 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, 81 const DataLayout &DL, 82 const FuncInfoTy &FuncInfo) const { 83 auto &Flags = Arg.Flags[0]; 84 const AttributeList &Attrs = FuncInfo.getAttributes(); 85 if (Attrs.hasAttribute(OpIdx, Attribute::ZExt)) 86 Flags.setZExt(); 87 if (Attrs.hasAttribute(OpIdx, Attribute::SExt)) 88 Flags.setSExt(); 89 if (Attrs.hasAttribute(OpIdx, Attribute::InReg)) 90 Flags.setInReg(); 91 if (Attrs.hasAttribute(OpIdx, Attribute::StructRet)) 92 Flags.setSRet(); 93 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf)) 94 Flags.setSwiftSelf(); 95 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError)) 96 Flags.setSwiftError(); 97 if (Attrs.hasAttribute(OpIdx, Attribute::ByVal)) 98 Flags.setByVal(); 99 if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca)) 100 Flags.setInAlloca(); 101 102 if (Flags.isByVal() || Flags.isInAlloca()) { 103 Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); 104 105 auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); 106 Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); 107 108 // For ByVal, alignment should be passed from FE. BE will guess if 109 // this info is not there but there are cases it cannot get right. 110 Align FrameAlign; 111 if (auto ParamAlign = FuncInfo.getParamAlign(OpIdx - 2)) 112 FrameAlign = *ParamAlign; 113 else 114 FrameAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL)); 115 Flags.setByValAlign(FrameAlign); 116 } 117 if (Attrs.hasAttribute(OpIdx, Attribute::Nest)) 118 Flags.setNest(); 119 Flags.setOrigAlign(Align(DL.getABITypeAlignment(Arg.Ty))); 120 } 121 122 template void 123 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 124 const DataLayout &DL, 125 const Function &FuncInfo) const; 126 127 template void 128 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 129 const DataLayout &DL, 130 const CallBase &FuncInfo) const; 131 132 Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy, 133 MachineIRBuilder &MIRBuilder) const { 134 assert(SrcRegs.size() > 1 && "Nothing to pack"); 135 136 const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); 137 MachineRegisterInfo *MRI = MIRBuilder.getMRI(); 138 139 LLT PackedLLT = getLLTForType(*PackedTy, DL); 140 141 SmallVector<LLT, 8> LLTs; 142 SmallVector<uint64_t, 8> Offsets; 143 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 144 assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch"); 145 146 Register Dst = MRI->createGenericVirtualRegister(PackedLLT); 147 MIRBuilder.buildUndef(Dst); 148 for (unsigned i = 0; i < SrcRegs.size(); ++i) { 149 Register NewDst = MRI->createGenericVirtualRegister(PackedLLT); 150 MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]); 151 Dst = NewDst; 152 } 153 154 return Dst; 155 } 156 157 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, 158 Type *PackedTy, 159 MachineIRBuilder &MIRBuilder) const { 160 assert(DstRegs.size() > 1 && "Nothing to unpack"); 161 162 const DataLayout &DL = MIRBuilder.getDataLayout(); 163 164 SmallVector<LLT, 8> LLTs; 165 SmallVector<uint64_t, 8> Offsets; 166 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 167 assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch"); 168 169 for (unsigned i = 0; i < DstRegs.size(); ++i) 170 MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]); 171 } 172 173 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, 174 SmallVectorImpl<ArgInfo> &Args, 175 ValueHandler &Handler) const { 176 MachineFunction &MF = MIRBuilder.getMF(); 177 const Function &F = MF.getFunction(); 178 SmallVector<CCValAssign, 16> ArgLocs; 179 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 180 return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler); 181 } 182 183 bool CallLowering::handleAssignments(CCState &CCInfo, 184 SmallVectorImpl<CCValAssign> &ArgLocs, 185 MachineIRBuilder &MIRBuilder, 186 SmallVectorImpl<ArgInfo> &Args, 187 ValueHandler &Handler) const { 188 MachineFunction &MF = MIRBuilder.getMF(); 189 const Function &F = MF.getFunction(); 190 const DataLayout &DL = F.getParent()->getDataLayout(); 191 192 unsigned NumArgs = Args.size(); 193 for (unsigned i = 0; i != NumArgs; ++i) { 194 MVT CurVT = MVT::getVT(Args[i].Ty); 195 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], 196 Args[i].Flags[0], CCInfo)) { 197 if (!CurVT.isValid()) 198 return false; 199 MVT NewVT = TLI->getRegisterTypeForCallingConv( 200 F.getContext(), F.getCallingConv(), EVT(CurVT)); 201 202 // If we need to split the type over multiple regs, check it's a scenario 203 // we currently support. 204 unsigned NumParts = TLI->getNumRegistersForCallingConv( 205 F.getContext(), F.getCallingConv(), CurVT); 206 if (NumParts > 1) { 207 // For now only handle exact splits. 208 if (NewVT.getSizeInBits() * NumParts != CurVT.getSizeInBits()) 209 return false; 210 } 211 212 // For incoming arguments (physregs to vregs), we could have values in 213 // physregs (or memlocs) which we want to extract and copy to vregs. 214 // During this, we might have to deal with the LLT being split across 215 // multiple regs, so we have to record this information for later. 216 // 217 // If we have outgoing args, then we have the opposite case. We have a 218 // vreg with an LLT which we want to assign to a physical location, and 219 // we might have to record that the value has to be split later. 220 if (Handler.isIncomingArgumentHandler()) { 221 if (NumParts == 1) { 222 // Try to use the register type if we couldn't assign the VT. 223 if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i], 224 Args[i].Flags[0], CCInfo)) 225 return false; 226 } else { 227 // We're handling an incoming arg which is split over multiple regs. 228 // E.g. passing an s128 on AArch64. 229 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0]; 230 Args[i].OrigRegs.push_back(Args[i].Regs[0]); 231 Args[i].Regs.clear(); 232 Args[i].Flags.clear(); 233 LLT NewLLT = getLLTForMVT(NewVT); 234 // For each split register, create and assign a vreg that will store 235 // the incoming component of the larger value. These will later be 236 // merged to form the final vreg. 237 for (unsigned Part = 0; Part < NumParts; ++Part) { 238 Register Reg = 239 MIRBuilder.getMRI()->createGenericVirtualRegister(NewLLT); 240 ISD::ArgFlagsTy Flags = OrigFlags; 241 if (Part == 0) { 242 Flags.setSplit(); 243 } else { 244 Flags.setOrigAlign(Align(1)); 245 if (Part == NumParts - 1) 246 Flags.setSplitEnd(); 247 } 248 Args[i].Regs.push_back(Reg); 249 Args[i].Flags.push_back(Flags); 250 if (Handler.assignArg(i + Part, NewVT, NewVT, CCValAssign::Full, 251 Args[i], Args[i].Flags[Part], CCInfo)) { 252 // Still couldn't assign this smaller part type for some reason. 253 return false; 254 } 255 } 256 } 257 } else { 258 // Handling an outgoing arg that might need to be split. 259 if (NumParts < 2) 260 return false; // Don't know how to deal with this type combination. 261 262 // This type is passed via multiple registers in the calling convention. 263 // We need to extract the individual parts. 264 Register LargeReg = Args[i].Regs[0]; 265 LLT SmallTy = LLT::scalar(NewVT.getSizeInBits()); 266 auto Unmerge = MIRBuilder.buildUnmerge(SmallTy, LargeReg); 267 assert(Unmerge->getNumOperands() == NumParts + 1); 268 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0]; 269 // We're going to replace the regs and flags with the split ones. 270 Args[i].Regs.clear(); 271 Args[i].Flags.clear(); 272 for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) { 273 ISD::ArgFlagsTy Flags = OrigFlags; 274 if (PartIdx == 0) { 275 Flags.setSplit(); 276 } else { 277 Flags.setOrigAlign(Align(1)); 278 if (PartIdx == NumParts - 1) 279 Flags.setSplitEnd(); 280 } 281 Args[i].Regs.push_back(Unmerge.getReg(PartIdx)); 282 Args[i].Flags.push_back(Flags); 283 if (Handler.assignArg(i + PartIdx, NewVT, NewVT, CCValAssign::Full, 284 Args[i], Args[i].Flags[PartIdx], CCInfo)) 285 return false; 286 } 287 } 288 } 289 } 290 291 for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { 292 assert(j < ArgLocs.size() && "Skipped too many arg locs"); 293 294 CCValAssign &VA = ArgLocs[j]; 295 assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); 296 297 if (VA.needsCustom()) { 298 j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); 299 continue; 300 } 301 302 // FIXME: Pack registers if we have more than one. 303 Register ArgReg = Args[i].Regs[0]; 304 305 MVT OrigVT = MVT::getVT(Args[i].Ty); 306 MVT VAVT = VA.getValVT(); 307 if (VA.isRegLoc()) { 308 if (Handler.isIncomingArgumentHandler() && VAVT != OrigVT) { 309 if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) { 310 // Expected to be multiple regs for a single incoming arg. 311 unsigned NumArgRegs = Args[i].Regs.size(); 312 if (NumArgRegs < 2) 313 return false; 314 315 assert((j + (NumArgRegs - 1)) < ArgLocs.size() && 316 "Too many regs for number of args"); 317 for (unsigned Part = 0; Part < NumArgRegs; ++Part) { 318 // There should be Regs.size() ArgLocs per argument. 319 VA = ArgLocs[j + Part]; 320 Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA); 321 } 322 j += NumArgRegs - 1; 323 // Merge the split registers into the expected larger result vreg 324 // of the original call. 325 MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs); 326 continue; 327 } 328 const LLT VATy(VAVT); 329 Register NewReg = 330 MIRBuilder.getMRI()->createGenericVirtualRegister(VATy); 331 Handler.assignValueToReg(NewReg, VA.getLocReg(), VA); 332 // If it's a vector type, we either need to truncate the elements 333 // or do an unmerge to get the lower block of elements. 334 if (VATy.isVector() && 335 VATy.getNumElements() > OrigVT.getVectorNumElements()) { 336 const LLT OrigTy(OrigVT); 337 // Just handle the case where the VA type is 2 * original type. 338 if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) { 339 LLVM_DEBUG(dbgs() 340 << "Incoming promoted vector arg has too many elts"); 341 return false; 342 } 343 auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg}); 344 MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0)); 345 } else { 346 MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0); 347 } 348 } else if (!Handler.isIncomingArgumentHandler()) { 349 assert((j + (Args[i].Regs.size() - 1)) < ArgLocs.size() && 350 "Too many regs for number of args"); 351 // This is an outgoing argument that might have been split. 352 for (unsigned Part = 0; Part < Args[i].Regs.size(); ++Part) { 353 // There should be Regs.size() ArgLocs per argument. 354 VA = ArgLocs[j + Part]; 355 Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA); 356 } 357 j += Args[i].Regs.size() - 1; 358 } else { 359 Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA); 360 } 361 } else if (VA.isMemLoc()) { 362 // Don't currently support loading/storing a type that needs to be split 363 // to the stack. Should be easy, just not implemented yet. 364 if (Args[i].Regs.size() > 1) { 365 LLVM_DEBUG( 366 dbgs() 367 << "Load/store a split arg to/from the stack not implemented yet"); 368 return false; 369 } 370 MVT VT = MVT::getVT(Args[i].Ty); 371 unsigned Size = VT == MVT::iPTR ? DL.getPointerSize() 372 : alignTo(VT.getSizeInBits(), 8) / 8; 373 unsigned Offset = VA.getLocMemOffset(); 374 MachinePointerInfo MPO; 375 Register StackAddr = Handler.getStackAddress(Size, Offset, MPO); 376 Handler.assignValueToAddress(Args[i], StackAddr, Size, MPO, VA); 377 } else { 378 // FIXME: Support byvals and other weirdness 379 return false; 380 } 381 } 382 return true; 383 } 384 385 bool CallLowering::analyzeArgInfo(CCState &CCState, 386 SmallVectorImpl<ArgInfo> &Args, 387 CCAssignFn &AssignFnFixed, 388 CCAssignFn &AssignFnVarArg) const { 389 for (unsigned i = 0, e = Args.size(); i < e; ++i) { 390 MVT VT = MVT::getVT(Args[i].Ty); 391 CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg; 392 if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) { 393 // Bail out on anything we can't handle. 394 LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString() 395 << " (arg number = " << i << "\n"); 396 return false; 397 } 398 } 399 return true; 400 } 401 402 bool CallLowering::resultsCompatible(CallLoweringInfo &Info, 403 MachineFunction &MF, 404 SmallVectorImpl<ArgInfo> &InArgs, 405 CCAssignFn &CalleeAssignFnFixed, 406 CCAssignFn &CalleeAssignFnVarArg, 407 CCAssignFn &CallerAssignFnFixed, 408 CCAssignFn &CallerAssignFnVarArg) const { 409 const Function &F = MF.getFunction(); 410 CallingConv::ID CalleeCC = Info.CallConv; 411 CallingConv::ID CallerCC = F.getCallingConv(); 412 413 if (CallerCC == CalleeCC) 414 return true; 415 416 SmallVector<CCValAssign, 16> ArgLocs1; 417 CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext()); 418 if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed, 419 CalleeAssignFnVarArg)) 420 return false; 421 422 SmallVector<CCValAssign, 16> ArgLocs2; 423 CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext()); 424 if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed, 425 CalleeAssignFnVarArg)) 426 return false; 427 428 // We need the argument locations to match up exactly. If there's more in 429 // one than the other, then we are done. 430 if (ArgLocs1.size() != ArgLocs2.size()) 431 return false; 432 433 // Make sure that each location is passed in exactly the same way. 434 for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) { 435 const CCValAssign &Loc1 = ArgLocs1[i]; 436 const CCValAssign &Loc2 = ArgLocs2[i]; 437 438 // We need both of them to be the same. So if one is a register and one 439 // isn't, we're done. 440 if (Loc1.isRegLoc() != Loc2.isRegLoc()) 441 return false; 442 443 if (Loc1.isRegLoc()) { 444 // If they don't have the same register location, we're done. 445 if (Loc1.getLocReg() != Loc2.getLocReg()) 446 return false; 447 448 // They matched, so we can move to the next ArgLoc. 449 continue; 450 } 451 452 // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match. 453 if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset()) 454 return false; 455 } 456 457 return true; 458 } 459 460 Register CallLowering::ValueHandler::extendRegister(Register ValReg, 461 CCValAssign &VA, 462 unsigned MaxSizeBits) { 463 LLT LocTy{VA.getLocVT()}; 464 LLT ValTy = MRI.getType(ValReg); 465 if (LocTy.getSizeInBits() == ValTy.getSizeInBits()) 466 return ValReg; 467 468 if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) { 469 if (MaxSizeBits <= ValTy.getSizeInBits()) 470 return ValReg; 471 LocTy = LLT::scalar(MaxSizeBits); 472 } 473 474 switch (VA.getLocInfo()) { 475 default: break; 476 case CCValAssign::Full: 477 case CCValAssign::BCvt: 478 // FIXME: bitconverting between vector types may or may not be a 479 // nop in big-endian situations. 480 return ValReg; 481 case CCValAssign::AExt: { 482 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); 483 return MIB.getReg(0); 484 } 485 case CCValAssign::SExt: { 486 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 487 MIRBuilder.buildSExt(NewReg, ValReg); 488 return NewReg; 489 } 490 case CCValAssign::ZExt: { 491 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 492 MIRBuilder.buildZExt(NewReg, ValReg); 493 return NewReg; 494 } 495 } 496 llvm_unreachable("unable to extend register"); 497 } 498 499 void CallLowering::ValueHandler::anchor() {} 500