Lines Matching +full:tri +full:- +full:state
1 //===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
51 #define DEBUG_TYPE "aarch64-call-lowering"
64 // hack because the DAG calls the assignment function with pre-legalized
93 CCState &State) override {
96 LocInfo, Info, Flags, State);
119 CCState &State) override {
120 const Function &F = State.getMachineFunction().getFunction();
122 Subtarget.isCallingConvWin64(State.getCallingConv(), F.isVarArg());
123 bool UseVarArgsCCForFixed = IsCalleeWin && State.isVarArg();
129 Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
131 Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
133 StackSize = State.getStackSize();
208 /// parameters (it's a basic-block live-in), and a call instruction
209 /// (it's an implicit-def of the BL).
218 MIRBuilder.getMRI()->addLiveIn(PhysReg);
368 insertSRetStores(MIRBuilder, Val->getType(), VRegs, FLI.DemoteRegister);
378 LLVMContext &Ctx = Val->getType()->getContext();
381 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
414 // padding with more elements, e.g. <2 x half> -> <4 x half>.
512 FuncInfo->getForwardedMustTailRegParms();
531 if (!EnableSVEGISel && (F.getReturnType()->isScalableTy() ||
533 return A.getType()->isScalableTy();
538 LLVM_DEBUG(dbgs() << "Falling back to SDAG because we don't support no-NEON\n");
568 unsigned NumVariadicGPRArgRegs = GPRArgRegs.size() - FirstVariadicGPR + 1;
570 unsigned GPRSaveSize = 8 * (GPRArgRegs.size() - FirstVariadicGPR);
575 -static_cast<int>(GPRSaveSize), false);
578 MFI.CreateFixedObject(16 - (GPRSaveSize & 15),
579 -static_cast<int>(alignTo(GPRSaveSize, 16)),
595 MF, GPRIdx, (i - FirstVariadicGPR) * 8)
603 FuncInfo->setVarArgsGPRIndex(GPRIdx);
604 FuncInfo->setVarArgsGPRSize(GPRSaveSize);
609 unsigned FPRSaveSize = 16 * (FPRArgRegs.size() - FirstVariadicFPR);
633 FuncInfo->setVarArgsFPRIndex(FPRIdx);
634 FuncInfo->setVarArgsFPRSize(FPRSaveSize);
678 // i1 arguments are zero-extended to i8 by the caller. Emit a
680 if (OrigArg.Ty->isIntegerTy(1)) {
696 MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
732 // The AAPCS variadic function ABI is identical to the non-variadic
742 // We currently pass all varargs at 8-byte alignment, or 4 in ILP32.
746 FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackSize, true));
751 // We have a non-standard ABI, so why not make full use of the stack that
757 FuncInfo->setArgumentStackToRestore(StackSize);
767 FuncInfo->setBytesInStackArgArea(StackSize);
770 Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
842 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
843 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
844 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
846 TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
847 TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
850 return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);
887 if (OutInfo.getStackSize() > FuncInfo->getBytesInStackArgArea()) {
892 // Verify that the parameters in callee-saved registers match.
895 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
896 const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
925 // Must pass all target-independent checks in order to tail call optimize.
956 // On Windows, "inreg" attributes signify non-aggregate indirect returns.
974 // Externally-defined functions with weak linkage should not be
975 // tail-called on AArch64 when the OS does not support dynamic
976 // pre-emption of symbols, as the AAELF spec requires normal calls
979 // situation (as used for tail calls) is implementation-defined, so we
984 if (GV->hasExternalWeakLinkage() &&
987 LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function "
993 // If we have -tailcallopt, then we're done.
997 // We don't have -tailcallopt, so we're allowed to change the ABI (sibcall).
1033 assert((PAI->Key == AArch64PACKey::IA || PAI->Key == AArch64PACKey::IB) &&
1043 if (FuncInfo->branchTargetEnforcement()) {
1044 if (FuncInfo->branchProtectionPAuthLR()) {
1045 assert(!PAI && "ptrauth tail-calls not yet supported with PAuthLR");
1053 if (FuncInfo->branchProtectionPAuthLR()) {
1054 assert(!PAI && "ptrauth tail-calls not yet supported with PAuthLR");
1066 const AArch64RegisterInfo &TRI, MachineFunction &MF) {
1069 // For 'this' returns, use the X0-preserving mask if applicable
1070 Mask = TRI.getThisReturnPreservedMask(MF, Info.CallConv);
1073 Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
1076 Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
1090 // True when we're tail calling, but without -tailcallopt.
1111 auto TRI = Subtarget.getRegisterInfo();
1119 assert((Info.PAI->Key == AArch64PACKey::IA ||
1120 Info.PAI->Key == AArch64PACKey::IB) &&
1122 MIB.addImm(Info.PAI->Key);
1127 extractPtrauthBlendDiscriminators(Info.PAI->Discriminator, MRI);
1132 MIB->getOperand(4).setReg(constrainOperandRegClass(
1133 MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
1134 *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(),
1135 MIB->getOperand(4), 4));
1140 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
1142 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1146 MIB->setCFIType(MF, Info.CFIType->getZExtValue());
1148 if (TRI->isAnyArgRegReserved(MF))
1149 TRI->emitReservedArgRegCallError(MF);
1159 // by -tailcallopt. For sibcalls, the memory operands for the call are
1166 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1176 // keep it 16-byte aligned.
1182 FPDiff = NumReusableBytes - NumBytes;
1186 if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)
1187 FuncInfo->setTailCallReservedStack(-FPDiff);
1189 // The stack pointer must be 16-byte aligned at all times it's used for a
1192 // a 16-byte aligned SP and the delta applied for the tail call should
1197 const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
1209 Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
1219 if (any_of(MIB->uses(), [&ForwardedReg, &TRI](const MachineOperand &Use) {
1222 return TRI->regsOverlap(Use.getReg(), ForwardedReg);
1232 // If we have -tailcallopt, we need to adjust the stack. We'll do the call
1235 MIB->getOperand(1).setImm(FPDiff);
1249 if (MIB->getOperand(0).isReg())
1250 constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
1252 MIB->getDesc(), MIB->getOperand(0), 0);
1284 // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
1286 if (OrigArg.Ty->isIntegerTy(1) && !Flags.isSExt() && !Flags.isZExt()) {
1293 // zero-extend the argument to i32 instead of just i8.
1302 if (!Info.OrigRet.Ty->isVoidTy())
1331 // Create a temporarily-floating call instruction so we can add the implicit
1342 else if (Info.CB && Info.CB->hasFnAttr(Attribute::ReturnsTwice) &&
1344 MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement())
1347 // For an intrinsic call (e.g. memset), use GOT if "RtLibUseGOT" (-fno-plt)
1349 if (Info.Callee.isSymbol() && F.getParent()->getRtLibUseGOT()) {
1368 MIB->setCFIType(MF, Info.CFIType->getZExtValue());
1375 const auto *TRI = Subtarget.getRegisterInfo();
1385 Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
1388 assert((Info.PAI->Key == AArch64PACKey::IA ||
1389 Info.PAI->Key == AArch64PACKey::IB) &&
1391 MIB.addImm(Info.PAI->Key);
1396 extractPtrauthBlendDiscriminators(Info.PAI->Discriminator, MRI);
1401 constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
1403 MIB->getDesc(), MIB->getOperand(CalleeOpNo + 3),
1410 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1413 if (TRI->isAnyArgRegReserved(MF))
1414 TRI->emitReservedArgRegCallError(MF);
1433 if (MIB->getOperand(CalleeOpNo).isReg())
1434 constrainOperandRegClass(MF, *TRI, MRI, *Subtarget.getInstrInfo(),
1435 *Subtarget.getRegBankInfo(), *MIB, MIB->getDesc(),
1436 MIB->getOperand(CalleeOpNo), CalleeOpNo);
1438 // Finally we can copy the returned value back into its virtual-register. In
1440 // implicit-define of the call instruction.
1441 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {