1 //===-- RISCVCallLowering.cpp - Call lowering -------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "RISCVCallLowering.h" 16 #include "RISCVCallingConv.h" 17 #include "RISCVISelLowering.h" 18 #include "RISCVMachineFunctionInfo.h" 19 #include "RISCVSubtarget.h" 20 #include "llvm/CodeGen/Analysis.h" 21 #include "llvm/CodeGen/FunctionLoweringInfo.h" 22 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 25 using namespace llvm; 26 27 namespace { 28 29 struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner { 30 private: 31 // The function used internally to assign args - we ignore the AssignFn stored 32 // by OutgoingValueAssigner since RISC-V implements its CC using a custom 33 // function with a different signature. 34 RISCVCCAssignFn *RISCVAssignFn; 35 36 // Whether this is assigning args for a return. 37 bool IsRet; 38 39 public: 40 RISCVOutgoingValueAssigner(RISCVCCAssignFn *RISCVAssignFn_, bool IsRet) 41 : CallLowering::OutgoingValueAssigner(nullptr), 42 RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {} 43 44 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, 45 CCValAssign::LocInfo LocInfo, 46 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags, 47 CCState &State) override { 48 if (RISCVAssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State, Info.IsFixed, 49 IsRet, Info.Ty)) 50 return true; 51 52 StackSize = State.getStackSize(); 53 return false; 54 } 55 }; 56 57 struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler { 58 RISCVOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 59 MachineInstrBuilder MIB) 60 : OutgoingValueHandler(B, MRI), MIB(MIB), 61 Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {} 62 Register getStackAddress(uint64_t MemSize, int64_t Offset, 63 MachinePointerInfo &MPO, 64 ISD::ArgFlagsTy Flags) override { 65 MachineFunction &MF = MIRBuilder.getMF(); 66 LLT p0 = LLT::pointer(0, Subtarget.getXLen()); 67 LLT sXLen = LLT::scalar(Subtarget.getXLen()); 68 69 if (!SPReg) 70 SPReg = MIRBuilder.buildCopy(p0, Register(RISCV::X2)).getReg(0); 71 72 auto OffsetReg = MIRBuilder.buildConstant(sXLen, Offset); 73 74 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg); 75 76 MPO = MachinePointerInfo::getStack(MF, Offset); 77 return AddrReg.getReg(0); 78 } 79 80 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 81 const MachinePointerInfo &MPO, 82 const CCValAssign &VA) override { 83 MachineFunction &MF = MIRBuilder.getMF(); 84 uint64_t LocMemOffset = VA.getLocMemOffset(); 85 86 // TODO: Move StackAlignment to subtarget and share with FrameLowering. 87 auto MMO = 88 MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, MemTy, 89 commonAlignment(Align(16), LocMemOffset)); 90 91 Register ExtReg = extendRegister(ValVReg, VA); 92 MIRBuilder.buildStore(ExtReg, Addr, *MMO); 93 } 94 95 void assignValueToReg(Register ValVReg, Register PhysReg, 96 const CCValAssign &VA) override { 97 Register ExtReg = extendRegister(ValVReg, VA); 98 MIRBuilder.buildCopy(PhysReg, ExtReg); 99 MIB.addUse(PhysReg, RegState::Implicit); 100 } 101 102 unsigned assignCustomValue(CallLowering::ArgInfo &Arg, 103 ArrayRef<CCValAssign> VAs, 104 std::function<void()> *Thunk) override { 105 const CCValAssign &VA = VAs[0]; 106 if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) || 107 (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)) { 108 Register PhysReg = VA.getLocReg(); 109 110 auto assignFunc = [=]() { 111 auto Trunc = MIRBuilder.buildAnyExt(LLT(VA.getLocVT()), Arg.Regs[0]); 112 MIRBuilder.buildCopy(PhysReg, Trunc); 113 MIB.addUse(PhysReg, RegState::Implicit); 114 }; 115 116 if (Thunk) { 117 *Thunk = assignFunc; 118 return 1; 119 } 120 121 assignFunc(); 122 return 1; 123 } 124 125 assert(VAs.size() >= 2 && "Expected at least 2 VAs."); 126 const CCValAssign &VAHi = VAs[1]; 127 128 assert(VAHi.needsCustom() && "Value doesn't need custom handling"); 129 assert(VA.getValNo() == VAHi.getValNo() && 130 "Values belong to different arguments"); 131 132 assert(VA.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 && 133 VA.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 && 134 "unexpected custom value"); 135 136 Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)), 137 MRI.createGenericVirtualRegister(LLT::scalar(32))}; 138 MIRBuilder.buildUnmerge(NewRegs, Arg.Regs[0]); 139 140 if (VAHi.isMemLoc()) { 141 LLT MemTy(VAHi.getLocVT()); 142 143 MachinePointerInfo MPO; 144 Register StackAddr = getStackAddress( 145 MemTy.getSizeInBytes(), VAHi.getLocMemOffset(), MPO, Arg.Flags[0]); 146 147 assignValueToAddress(NewRegs[1], StackAddr, MemTy, MPO, 148 const_cast<CCValAssign &>(VAHi)); 149 } 150 151 auto assignFunc = [=]() { 152 assignValueToReg(NewRegs[0], VA.getLocReg(), VA); 153 if (VAHi.isRegLoc()) 154 assignValueToReg(NewRegs[1], VAHi.getLocReg(), VAHi); 155 }; 156 157 if (Thunk) { 158 *Thunk = assignFunc; 159 return 2; 160 } 161 162 assignFunc(); 163 return 2; 164 } 165 166 private: 167 MachineInstrBuilder MIB; 168 169 // Cache the SP register vreg if we need it more than once in this call site. 170 Register SPReg; 171 172 const RISCVSubtarget &Subtarget; 173 }; 174 175 struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner { 176 private: 177 // The function used internally to assign args - we ignore the AssignFn stored 178 // by IncomingValueAssigner since RISC-V implements its CC using a custom 179 // function with a different signature. 180 RISCVCCAssignFn *RISCVAssignFn; 181 182 // Whether this is assigning args from a return. 183 bool IsRet; 184 185 public: 186 RISCVIncomingValueAssigner(RISCVCCAssignFn *RISCVAssignFn_, bool IsRet) 187 : CallLowering::IncomingValueAssigner(nullptr), 188 RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {} 189 190 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, 191 CCValAssign::LocInfo LocInfo, 192 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags, 193 CCState &State) override { 194 MachineFunction &MF = State.getMachineFunction(); 195 196 if (LocVT.isScalableVector()) 197 MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall(); 198 199 if (RISCVAssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State, 200 /*IsFixed=*/true, IsRet, Info.Ty)) 201 return true; 202 203 StackSize = State.getStackSize(); 204 return false; 205 } 206 }; 207 208 struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler { 209 RISCVIncomingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) 210 : IncomingValueHandler(B, MRI), 211 Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {} 212 213 Register getStackAddress(uint64_t MemSize, int64_t Offset, 214 MachinePointerInfo &MPO, 215 ISD::ArgFlagsTy Flags) override { 216 MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo(); 217 218 int FI = MFI.CreateFixedObject(MemSize, Offset, /*Immutable=*/true); 219 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 220 return MIRBuilder.buildFrameIndex(LLT::pointer(0, Subtarget.getXLen()), FI) 221 .getReg(0); 222 } 223 224 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 225 const MachinePointerInfo &MPO, 226 const CCValAssign &VA) override { 227 MachineFunction &MF = MIRBuilder.getMF(); 228 auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, MemTy, 229 inferAlignFromPtrInfo(MF, MPO)); 230 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 231 } 232 233 void assignValueToReg(Register ValVReg, Register PhysReg, 234 const CCValAssign &VA) override { 235 markPhysRegUsed(PhysReg); 236 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA); 237 } 238 239 unsigned assignCustomValue(CallLowering::ArgInfo &Arg, 240 ArrayRef<CCValAssign> VAs, 241 std::function<void()> *Thunk) override { 242 const CCValAssign &VA = VAs[0]; 243 if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) || 244 (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)) { 245 Register PhysReg = VA.getLocReg(); 246 247 markPhysRegUsed(PhysReg); 248 249 LLT LocTy(VA.getLocVT()); 250 auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg); 251 252 MIRBuilder.buildTrunc(Arg.Regs[0], Copy.getReg(0)); 253 return 1; 254 } 255 256 assert(VAs.size() >= 2 && "Expected at least 2 VAs."); 257 const CCValAssign &VAHi = VAs[1]; 258 259 assert(VAHi.needsCustom() && "Value doesn't need custom handling"); 260 assert(VA.getValNo() == VAHi.getValNo() && 261 "Values belong to different arguments"); 262 263 assert(VA.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 && 264 VA.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 && 265 "unexpected custom value"); 266 267 Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)), 268 MRI.createGenericVirtualRegister(LLT::scalar(32))}; 269 270 if (VAHi.isMemLoc()) { 271 LLT MemTy(VAHi.getLocVT()); 272 273 MachinePointerInfo MPO; 274 Register StackAddr = getStackAddress( 275 MemTy.getSizeInBytes(), VAHi.getLocMemOffset(), MPO, Arg.Flags[0]); 276 277 assignValueToAddress(NewRegs[1], StackAddr, MemTy, MPO, 278 const_cast<CCValAssign &>(VAHi)); 279 } 280 281 assignValueToReg(NewRegs[0], VA.getLocReg(), VA); 282 if (VAHi.isRegLoc()) 283 assignValueToReg(NewRegs[1], VAHi.getLocReg(), VAHi); 284 285 MIRBuilder.buildMergeLikeInstr(Arg.Regs[0], NewRegs); 286 287 return 2; 288 } 289 290 /// How the physical register gets marked varies between formal 291 /// parameters (it's a basic-block live-in), and a call instruction 292 /// (it's an implicit-def of the BL). 293 virtual void markPhysRegUsed(MCRegister PhysReg) = 0; 294 295 private: 296 const RISCVSubtarget &Subtarget; 297 }; 298 299 struct RISCVFormalArgHandler : public RISCVIncomingValueHandler { 300 RISCVFormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) 301 : RISCVIncomingValueHandler(B, MRI) {} 302 303 void markPhysRegUsed(MCRegister PhysReg) override { 304 MIRBuilder.getMRI()->addLiveIn(PhysReg); 305 MIRBuilder.getMBB().addLiveIn(PhysReg); 306 } 307 }; 308 309 struct RISCVCallReturnHandler : public RISCVIncomingValueHandler { 310 RISCVCallReturnHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 311 MachineInstrBuilder &MIB) 312 : RISCVIncomingValueHandler(B, MRI), MIB(MIB) {} 313 314 void markPhysRegUsed(MCRegister PhysReg) override { 315 MIB.addDef(PhysReg, RegState::Implicit); 316 } 317 318 MachineInstrBuilder MIB; 319 }; 320 321 } // namespace 322 323 RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI) 324 : CallLowering(&TLI) {} 325 326 /// Return true if scalable vector with ScalarTy is legal for lowering. 327 static bool isLegalElementTypeForRVV(Type *EltTy, 328 const RISCVSubtarget &Subtarget) { 329 if (EltTy->isPointerTy()) 330 return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true; 331 if (EltTy->isIntegerTy(1) || EltTy->isIntegerTy(8) || 332 EltTy->isIntegerTy(16) || EltTy->isIntegerTy(32)) 333 return true; 334 if (EltTy->isIntegerTy(64)) 335 return Subtarget.hasVInstructionsI64(); 336 if (EltTy->isHalfTy()) 337 return Subtarget.hasVInstructionsF16(); 338 if (EltTy->isBFloatTy()) 339 return Subtarget.hasVInstructionsBF16Minimal(); 340 if (EltTy->isFloatTy()) 341 return Subtarget.hasVInstructionsF32(); 342 if (EltTy->isDoubleTy()) 343 return Subtarget.hasVInstructionsF64(); 344 return false; 345 } 346 347 // TODO: Support all argument types. 348 // TODO: Remove IsLowerArgs argument by adding support for vectors in lowerCall. 349 static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget, 350 bool IsLowerArgs = false) { 351 if (T->isIntegerTy()) 352 return true; 353 if (T->isHalfTy() || T->isFloatTy() || T->isDoubleTy() || T->isFP128Ty()) 354 return true; 355 if (T->isPointerTy()) 356 return true; 357 if (T->isArrayTy()) 358 return isSupportedArgumentType(T->getArrayElementType(), Subtarget, 359 IsLowerArgs); 360 // TODO: Support fixed vector types. 361 if (IsLowerArgs && T->isVectorTy() && Subtarget.hasVInstructions() && 362 T->isScalableTy() && 363 isLegalElementTypeForRVV(T->getScalarType(), Subtarget)) 364 return true; 365 return false; 366 } 367 368 // TODO: Only integer, pointer and aggregate types are supported now. 369 // TODO: Remove IsLowerRetVal argument by adding support for vectors in 370 // lowerCall. 371 static bool isSupportedReturnType(Type *T, const RISCVSubtarget &Subtarget, 372 bool IsLowerRetVal = false) { 373 if (T->isIntegerTy() || T->isFloatingPointTy() || T->isPointerTy()) 374 return true; 375 376 if (T->isArrayTy()) 377 return isSupportedReturnType(T->getArrayElementType(), Subtarget); 378 379 if (T->isStructTy()) { 380 auto StructT = cast<StructType>(T); 381 for (unsigned i = 0, e = StructT->getNumElements(); i != e; ++i) 382 if (!isSupportedReturnType(StructT->getElementType(i), Subtarget)) 383 return false; 384 return true; 385 } 386 387 if (IsLowerRetVal && T->isVectorTy() && Subtarget.hasVInstructions() && 388 T->isScalableTy() && 389 isLegalElementTypeForRVV(T->getScalarType(), Subtarget)) 390 return true; 391 392 return false; 393 } 394 395 bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, 396 const Value *Val, ArrayRef<Register> VRegs, 397 FunctionLoweringInfo &FLI) const { 398 assert(!Val == VRegs.empty() && "Return value without a vreg"); 399 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(RISCV::PseudoRET); 400 401 if (!FLI.CanLowerReturn) { 402 insertSRetStores(MIRBuilder, Val->getType(), VRegs, FLI.DemoteRegister); 403 } else if (!VRegs.empty()) { 404 const RISCVSubtarget &Subtarget = 405 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>(); 406 if (!isSupportedReturnType(Val->getType(), Subtarget, 407 /*IsLowerRetVal=*/true)) 408 return false; 409 410 MachineFunction &MF = MIRBuilder.getMF(); 411 const DataLayout &DL = MF.getDataLayout(); 412 const Function &F = MF.getFunction(); 413 CallingConv::ID CC = F.getCallingConv(); 414 415 ArgInfo OrigRetInfo(VRegs, Val->getType(), 0); 416 setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F); 417 418 SmallVector<ArgInfo, 4> SplitRetInfos; 419 splitToValueTypes(OrigRetInfo, SplitRetInfos, DL, CC); 420 421 RISCVOutgoingValueAssigner Assigner( 422 CC == CallingConv::Fast ? CC_RISCV_FastCC : CC_RISCV, 423 /*IsRet=*/true); 424 RISCVOutgoingValueHandler Handler(MIRBuilder, MF.getRegInfo(), Ret); 425 if (!determineAndHandleAssignments(Handler, Assigner, SplitRetInfos, 426 MIRBuilder, CC, F.isVarArg())) 427 return false; 428 } 429 430 MIRBuilder.insertInstr(Ret); 431 return true; 432 } 433 434 bool RISCVCallLowering::canLowerReturn(MachineFunction &MF, 435 CallingConv::ID CallConv, 436 SmallVectorImpl<BaseArgInfo> &Outs, 437 bool IsVarArg) const { 438 SmallVector<CCValAssign, 16> ArgLocs; 439 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, 440 MF.getFunction().getContext()); 441 442 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>(); 443 444 std::optional<unsigned> FirstMaskArgument = std::nullopt; 445 // Preassign the first mask argument. 446 if (Subtarget.hasVInstructions()) { 447 for (const auto &ArgIdx : enumerate(Outs)) { 448 MVT ArgVT = MVT::getVT(ArgIdx.value().Ty); 449 if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1) 450 FirstMaskArgument = ArgIdx.index(); 451 } 452 } 453 454 for (unsigned I = 0, E = Outs.size(); I < E; ++I) { 455 MVT VT = MVT::getVT(Outs[I].Ty); 456 if (CC_RISCV(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo, 457 /*IsFixed=*/true, /*isRet=*/true, nullptr)) 458 return false; 459 } 460 return true; 461 } 462 463 /// If there are varargs that were passed in a0-a7, the data in those registers 464 /// must be copied to the varargs save area on the stack. 465 void RISCVCallLowering::saveVarArgRegisters( 466 MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler, 467 IncomingValueAssigner &Assigner, CCState &CCInfo) const { 468 MachineFunction &MF = MIRBuilder.getMF(); 469 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>(); 470 unsigned XLenInBytes = Subtarget.getXLen() / 8; 471 ArrayRef<MCPhysReg> ArgRegs = RISCV::getArgGPRs(Subtarget.getTargetABI()); 472 MachineRegisterInfo &MRI = MF.getRegInfo(); 473 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 474 MachineFrameInfo &MFI = MF.getFrameInfo(); 475 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 476 477 // Size of the vararg save area. For now, the varargs save area is either 478 // zero or large enough to hold a0-a7. 479 int VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 480 int FI; 481 482 // If all registers are allocated, then all varargs must be passed on the 483 // stack and we don't need to save any argregs. 484 if (VarArgsSaveSize == 0) { 485 int VaArgOffset = Assigner.StackSize; 486 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 487 } else { 488 int VaArgOffset = -VarArgsSaveSize; 489 FI = MFI.CreateFixedObject(VarArgsSaveSize, VaArgOffset, true); 490 491 // If saving an odd number of registers then create an extra stack slot to 492 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 493 // offsets to even-numbered registered remain 2*XLEN-aligned. 494 if (Idx % 2) { 495 MFI.CreateFixedObject(XLenInBytes, 496 VaArgOffset - static_cast<int>(XLenInBytes), true); 497 VarArgsSaveSize += XLenInBytes; 498 } 499 500 const LLT p0 = LLT::pointer(MF.getDataLayout().getAllocaAddrSpace(), 501 Subtarget.getXLen()); 502 const LLT sXLen = LLT::scalar(Subtarget.getXLen()); 503 504 auto FIN = MIRBuilder.buildFrameIndex(p0, FI); 505 auto Offset = MIRBuilder.buildConstant( 506 MRI.createGenericVirtualRegister(sXLen), XLenInBytes); 507 508 // Copy the integer registers that may have been used for passing varargs 509 // to the vararg save area. 510 const MVT XLenVT = Subtarget.getXLenVT(); 511 for (unsigned I = Idx; I < ArgRegs.size(); ++I) { 512 const Register VReg = MRI.createGenericVirtualRegister(sXLen); 513 Handler.assignValueToReg( 514 VReg, ArgRegs[I], 515 CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenVT, 516 ArgRegs[I], XLenVT, CCValAssign::Full)); 517 auto MPO = 518 MachinePointerInfo::getFixedStack(MF, FI, (I - Idx) * XLenInBytes); 519 MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO)); 520 FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0), 521 FIN.getReg(0), Offset); 522 } 523 } 524 525 // Record the frame index of the first variable argument which is a value 526 // necessary to G_VASTART. 527 RVFI->setVarArgsFrameIndex(FI); 528 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 529 } 530 531 bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, 532 const Function &F, 533 ArrayRef<ArrayRef<Register>> VRegs, 534 FunctionLoweringInfo &FLI) const { 535 MachineFunction &MF = MIRBuilder.getMF(); 536 537 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>(); 538 for (auto &Arg : F.args()) { 539 if (!isSupportedArgumentType(Arg.getType(), Subtarget, 540 /*IsLowerArgs=*/true)) 541 return false; 542 } 543 544 MachineRegisterInfo &MRI = MF.getRegInfo(); 545 const DataLayout &DL = MF.getDataLayout(); 546 CallingConv::ID CC = F.getCallingConv(); 547 548 SmallVector<ArgInfo, 32> SplitArgInfos; 549 550 // Insert the hidden sret parameter if the return value won't fit in the 551 // return registers. 552 if (!FLI.CanLowerReturn) 553 insertSRetIncomingArgument(F, SplitArgInfos, FLI.DemoteRegister, MRI, DL); 554 555 SmallVector<Type *, 4> TypeList; 556 unsigned Index = 0; 557 for (auto &Arg : F.args()) { 558 // Construct the ArgInfo object from destination register and argument type. 559 ArgInfo AInfo(VRegs[Index], Arg.getType(), Index); 560 setArgFlags(AInfo, Index + AttributeList::FirstArgIndex, DL, F); 561 562 // Handle any required merging from split value types from physical 563 // registers into the desired VReg. ArgInfo objects are constructed 564 // correspondingly and appended to SplitArgInfos. 565 splitToValueTypes(AInfo, SplitArgInfos, DL, CC); 566 567 ++Index; 568 } 569 570 RISCVIncomingValueAssigner Assigner(CC == CallingConv::Fast ? CC_RISCV_FastCC 571 : CC_RISCV, 572 /*IsRet=*/false); 573 RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo()); 574 575 SmallVector<CCValAssign, 16> ArgLocs; 576 CCState CCInfo(CC, F.isVarArg(), MIRBuilder.getMF(), ArgLocs, F.getContext()); 577 if (!determineAssignments(Assigner, SplitArgInfos, CCInfo) || 578 !handleAssignments(Handler, SplitArgInfos, CCInfo, ArgLocs, MIRBuilder)) 579 return false; 580 581 if (F.isVarArg()) 582 saveVarArgRegisters(MIRBuilder, Handler, Assigner, CCInfo); 583 584 return true; 585 } 586 587 bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 588 CallLoweringInfo &Info) const { 589 MachineFunction &MF = MIRBuilder.getMF(); 590 const DataLayout &DL = MF.getDataLayout(); 591 const Function &F = MF.getFunction(); 592 CallingConv::ID CC = F.getCallingConv(); 593 594 const RISCVSubtarget &Subtarget = 595 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>(); 596 for (auto &AInfo : Info.OrigArgs) { 597 if (!isSupportedArgumentType(AInfo.Ty, Subtarget)) 598 return false; 599 if (AInfo.Flags[0].isByVal()) 600 return false; 601 } 602 603 if (!Info.OrigRet.Ty->isVoidTy() && 604 !isSupportedReturnType(Info.OrigRet.Ty, Subtarget)) 605 return false; 606 607 MachineInstrBuilder CallSeqStart = 608 MIRBuilder.buildInstr(RISCV::ADJCALLSTACKDOWN); 609 610 SmallVector<ArgInfo, 32> SplitArgInfos; 611 SmallVector<ISD::OutputArg, 8> Outs; 612 for (auto &AInfo : Info.OrigArgs) { 613 // Handle any required unmerging of split value types from a given VReg into 614 // physical registers. ArgInfo objects are constructed correspondingly and 615 // appended to SplitArgInfos. 616 splitToValueTypes(AInfo, SplitArgInfos, DL, CC); 617 } 618 619 // TODO: Support tail calls. 620 Info.IsTailCall = false; 621 622 // Select the recommended relocation type R_RISCV_CALL_PLT. 623 if (!Info.Callee.isReg()) 624 Info.Callee.setTargetFlags(RISCVII::MO_CALL); 625 626 MachineInstrBuilder Call = 627 MIRBuilder 628 .buildInstrNoInsert(Info.Callee.isReg() ? RISCV::PseudoCALLIndirect 629 : RISCV::PseudoCALL) 630 .add(Info.Callee); 631 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 632 Call.addRegMask(TRI->getCallPreservedMask(MF, Info.CallConv)); 633 634 RISCVOutgoingValueAssigner ArgAssigner( 635 CC == CallingConv::Fast ? CC_RISCV_FastCC : CC_RISCV, 636 /*IsRet=*/false); 637 RISCVOutgoingValueHandler ArgHandler(MIRBuilder, MF.getRegInfo(), Call); 638 if (!determineAndHandleAssignments(ArgHandler, ArgAssigner, SplitArgInfos, 639 MIRBuilder, CC, Info.IsVarArg)) 640 return false; 641 642 MIRBuilder.insertInstr(Call); 643 644 CallSeqStart.addImm(ArgAssigner.StackSize).addImm(0); 645 MIRBuilder.buildInstr(RISCV::ADJCALLSTACKUP) 646 .addImm(ArgAssigner.StackSize) 647 .addImm(0); 648 649 // If Callee is a reg, since it is used by a target specific 650 // instruction, it must have a register class matching the 651 // constraint of that instruction. 652 if (Call->getOperand(0).isReg()) 653 constrainOperandRegClass(MF, *TRI, MF.getRegInfo(), 654 *Subtarget.getInstrInfo(), 655 *Subtarget.getRegBankInfo(), *Call, 656 Call->getDesc(), Call->getOperand(0), 0); 657 658 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) { 659 SmallVector<ArgInfo, 4> SplitRetInfos; 660 splitToValueTypes(Info.OrigRet, SplitRetInfos, DL, CC); 661 662 RISCVIncomingValueAssigner RetAssigner( 663 CC == CallingConv::Fast ? CC_RISCV_FastCC : CC_RISCV, 664 /*IsRet=*/true); 665 RISCVCallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), Call); 666 if (!determineAndHandleAssignments(RetHandler, RetAssigner, SplitRetInfos, 667 MIRBuilder, CC, Info.IsVarArg)) 668 return false; 669 } 670 671 if (!Info.CanLowerReturn) 672 insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs, 673 Info.DemoteRegister, Info.DemoteStackIndex); 674 675 return true; 676 } 677