Lines Matching defs:VA

714     SmallVectorImpl<std::pair<Register, SDValue>> &RegsToPass, CCValAssign &VA,
719 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
730 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
760 CCValAssign &VA = RVLocs[I];
761 assert(VA.isRegLoc() && "Can only return in registers!");
765 MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
771 if (VA.getLocInfo() == CCValAssign::SExt)
772 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
773 else if (VA.getLocInfo() == CCValAssign::ZExt)
774 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
775 else if (VA.getLocInfo() == CCValAssign::AExt) {
777 ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
779 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
781 else if (VA.getLocInfo() == CCValAssign::BCvt)
782 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
784 assert(VA.getLocInfo() != CCValAssign::FPExt &&
789 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
791 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
793 X86::FR64XRegClass.contains(VA.getLocReg()) &&
798 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
803 if (VA.getLocReg() == X86::FP0 ||
804 VA.getLocReg() == X86::FP1) {
807 if (isScalarFPTypeInSSEReg(VA.getValVT()))
809 RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
818 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
830 if (VA.needsCustom()) {
831 assert(VA.getValVT() == MVT::v64i1 &&
834 Passv64i1ArgInRegs(dl, DAG, ValToCopy, RetVals, VA, RVLocs[++I],
841 RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
998 /// \param VA The current 32 bit value that need to be assigned.
1006 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
1012 assert(VA.getValVT() == MVT::v64i1 &&
1014 assert(NextVA.getValVT() == VA.getValVT() &&
1016 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
1029 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
1037 DAG.getCopyFromReg(Root, DL, VA.getLocReg(), MVT::i32, *InGlue);
1109 CCValAssign &VA = RVLocs[I];
1110 EVT CopyVT = VA.getLocVT();
1115 for (MCPhysReg SubReg : TRI->subregs_inclusive(VA.getLocReg()))
1121 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
1123 if (VA.getLocReg() == X86::XMM1)
1124 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
1126 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
1128 X86::FR64XRegClass.contains(VA.getLocReg()) &&
1131 if (VA.getLocReg() == X86::XMM1)
1132 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
1134 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
1140 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
1141 isScalarFPTypeInSSEReg(VA.getValVT())) {
1145 RoundAfterCopy = (CopyVT != VA.getLocVT());
1149 if (VA.needsCustom()) {
1150 assert(VA.getValVT() == MVT::v64i1 &&
1153 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InGlue);
1155 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InGlue)
1162 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
1166 if (VA.isExtInLoc()) {
1167 if (VA.getValVT().isVector() &&
1168 VA.getValVT().getScalarType() == MVT::i1 &&
1169 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
1170 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
1172 Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
1174 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
1177 if (VA.getLocInfo() == CCValAssign::BCvt)
1178 Val = DAG.getBitcast(VA.getValVT(), Val);
1296 const CCValAssign &VA,
1310 VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
1311 VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
1313 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
1314 ValVT = VA.getLocVT();
1316 ValVT = VA.getValVT();
1328 int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
1338 bool ScalarizedVector = ArgVT.isVector() && !VA.getLocVT().isVector();
1345 VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem &&
1353 int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
1365 int64_t PartBegin = VA.getLocMemOffset();
1385 VA.getLocMemOffset(), isImmutable);
1388 if (VA.getLocInfo() == CCValAssign::ZExt) {
1390 } else if (VA.getLocInfo() == CCValAssign::SExt) {
1404 ? (VA.getValVT().isVector()
1405 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
1406 : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
1715 CCValAssign &VA = ArgLocs[I];
1717 if (VA.isRegLoc()) {
1718 EVT RegVT = VA.getLocVT();
1719 if (VA.needsCustom()) {
1721 VA.getValVT() == MVT::v64i1 &&
1727 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
1769 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
1776 if (VA.getLocInfo() == CCValAssign::SExt)
1778 DAG.getValueType(VA.getValVT()));
1779 else if (VA.getLocInfo() == CCValAssign::ZExt)
1781 DAG.getValueType(VA.getValVT()));
1782 else if (VA.getLocInfo() == CCValAssign::BCvt)
1783 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
1785 if (VA.isExtInLoc()) {
1787 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
1788 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
1789 else if (VA.getValVT().isVector() &&
1790 VA.getValVT().getScalarType() == MVT::i1 &&
1791 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
1792 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
1794 ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
1796 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1799 assert(VA.isMemLoc());
1801 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
1805 if (VA.getLocInfo() == CCValAssign::Indirect &&
1806 !(Ins[I].Flags.isByVal() && VA.isRegLoc())) {
1808 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
1926 const CCValAssign &VA,
1929 unsigned LocMemOffset = VA.getLocMemOffset();
2161 CCValAssign &VA = ArgLocs[I];
2162 EVT RegVT = VA.getLocVT();
2167 switch (VA.getLocInfo()) {
2208 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2219 if (VA.needsCustom()) {
2220 assert(VA.getValVT() == MVT::v64i1 &&
2223 Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
2224 } else if (VA.isRegLoc()) {
2225 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2228 CSInfo.ArgRegPairs.emplace_back(VA.getLocReg(), I);
2233 switch (VA.getLocReg()) {
2243 assert(VA.isMemLoc());
2248 dl, DAG, VA, Flags, isByVal));
2333 CCValAssign &VA = ArgLocs[I];
2335 if (VA.isRegLoc()) {
2336 if (VA.needsCustom()) {
2347 assert(VA.isMemLoc());
2354 int32_t Offset = VA.getLocMemOffset()+FPDiff;
2355 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
2361 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
2634 const X86InstrInfo *TII, const CCValAssign &VA) {
2712 if (VA.getLocVT().getFixedSizeInBits() >
2804 for (const auto &VA : ArgLocs)
2805 if (!VA.isRegLoc())
2823 for (const auto &VA : RVLocs) {
2824 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
2854 const CCValAssign &VA = ArgLocs[I];
2857 if (VA.getLocInfo() == CCValAssign::Indirect)
2859 if (!VA.isRegLoc()) {
2860 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI,
2861 TII, VA))
2881 for (const auto &VA : ArgLocs) {
2882 if (!VA.isRegLoc())
2884 Register Reg = VA.getLocReg();