Lines Matching defs:VA
722 SmallVectorImpl<std::pair<Register, SDValue>> &RegsToPass, CCValAssign &VA,
727 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
738 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
768 CCValAssign &VA = RVLocs[I];
769 assert(VA.isRegLoc() && "Can only return in registers!");
773 MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
779 if (VA.getLocInfo() == CCValAssign::SExt)
780 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
781 else if (VA.getLocInfo() == CCValAssign::ZExt)
782 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
783 else if (VA.getLocInfo() == CCValAssign::AExt) {
785 ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
787 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
789 else if (VA.getLocInfo() == CCValAssign::BCvt)
790 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
792 assert(VA.getLocInfo() != CCValAssign::FPExt &&
797 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
799 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
801 X86::FR64XRegClass.contains(VA.getLocReg()) &&
806 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
811 if (VA.getLocReg() == X86::FP0 ||
812 VA.getLocReg() == X86::FP1) {
815 if (isScalarFPTypeInSSEReg(VA.getValVT()))
817 RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
826 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
838 if (VA.needsCustom()) {
839 assert(VA.getValVT() == MVT::v64i1 &&
842 Passv64i1ArgInRegs(dl, DAG, ValToCopy, RetVals, VA, RVLocs[++I],
849 RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
1006 /// \param VA The current 32 bit value that need to be assigned.
1014 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
1020 assert(VA.getValVT() == MVT::v64i1 &&
1022 assert(NextVA.getValVT() == VA.getValVT() &&
1024 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
1037 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
1045 DAG.getCopyFromReg(Root, DL, VA.getLocReg(), MVT::i32, *InGlue);
1117 CCValAssign &VA = RVLocs[I];
1118 EVT CopyVT = VA.getLocVT();
1123 for (MCPhysReg SubReg : TRI->subregs_inclusive(VA.getLocReg()))
1129 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
1131 if (VA.getLocReg() == X86::XMM1)
1132 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
1134 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
1136 X86::FR64XRegClass.contains(VA.getLocReg()) &&
1139 if (VA.getLocReg() == X86::XMM1)
1140 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
1142 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
1148 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
1149 isScalarFPTypeInSSEReg(VA.getValVT())) {
1153 RoundAfterCopy = (CopyVT != VA.getLocVT());
1157 if (VA.needsCustom()) {
1158 assert(VA.getValVT() == MVT::v64i1 &&
1161 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InGlue);
1163 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InGlue)
1170 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
1174 if (VA.isExtInLoc()) {
1175 if (VA.getValVT().isVector() &&
1176 VA.getValVT().getScalarType() == MVT::i1 &&
1177 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
1178 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
1180 Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
1182 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
1185 if (VA.getLocInfo() == CCValAssign::BCvt)
1186 Val = DAG.getBitcast(VA.getValVT(), Val);
1304 const CCValAssign &VA,
1318 VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
1319 VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
1321 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
1322 ValVT = VA.getLocVT();
1324 ValVT = VA.getValVT();
1336 int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
1346 bool ScalarizedVector = ArgVT.isVector() && !VA.getLocVT().isVector();
1353 VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem &&
1361 int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
1373 int64_t PartBegin = VA.getLocMemOffset();
1393 VA.getLocMemOffset(), isImmutable);
1396 if (VA.getLocInfo() == CCValAssign::ZExt) {
1398 } else if (VA.getLocInfo() == CCValAssign::SExt) {
1412 ? (VA.getValVT().isVector()
1413 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
1414 : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
1723 CCValAssign &VA = ArgLocs[I];
1725 if (VA.isRegLoc()) {
1726 EVT RegVT = VA.getLocVT();
1727 if (VA.needsCustom()) {
1729 VA.getValVT() == MVT::v64i1 &&
1735 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
1777 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
1784 if (VA.getLocInfo() == CCValAssign::SExt)
1786 DAG.getValueType(VA.getValVT()));
1787 else if (VA.getLocInfo() == CCValAssign::ZExt)
1789 DAG.getValueType(VA.getValVT()));
1790 else if (VA.getLocInfo() == CCValAssign::BCvt)
1791 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
1793 if (VA.isExtInLoc()) {
1795 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
1796 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
1797 else if (VA.getValVT().isVector() &&
1798 VA.getValVT().getScalarType() == MVT::i1 &&
1799 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
1800 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
1802 ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
1804 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1807 assert(VA.isMemLoc());
1809 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
1813 if (VA.getLocInfo() == CCValAssign::Indirect &&
1814 !(Ins[I].Flags.isByVal() && VA.isRegLoc())) {
1816 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
1934 const CCValAssign &VA,
1937 unsigned LocMemOffset = VA.getLocMemOffset();
2169 CCValAssign &VA = ArgLocs[I];
2170 EVT RegVT = VA.getLocVT();
2175 switch (VA.getLocInfo()) {
2216 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2227 if (VA.needsCustom()) {
2228 assert(VA.getValVT() == MVT::v64i1 &&
2231 Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
2232 } else if (VA.isRegLoc()) {
2233 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2236 CSInfo.ArgRegPairs.emplace_back(VA.getLocReg(), I);
2241 switch (VA.getLocReg()) {
2251 assert(VA.isMemLoc());
2256 dl, DAG, VA, Flags, isByVal));
2341 CCValAssign &VA = ArgLocs[I];
2343 if (VA.isRegLoc()) {
2344 if (VA.needsCustom()) {
2355 assert(VA.isMemLoc());
2362 int32_t Offset = VA.getLocMemOffset()+FPDiff;
2363 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
2369 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
2653 const X86InstrInfo *TII, const CCValAssign &VA) {
2731 if (VA.getLocVT().getFixedSizeInBits() >
2823 for (const auto &VA : ArgLocs)
2824 if (!VA.isRegLoc())
2842 for (const auto &VA : RVLocs) {
2843 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
2880 const CCValAssign &VA = ArgLocs[I];
2883 if (VA.getLocInfo() == CCValAssign::Indirect)
2885 if (!VA.isRegLoc()) {
2886 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI,
2887 TII, VA))
2907 for (const auto &VA : ArgLocs) {
2908 if (!VA.isRegLoc())
2910 Register Reg = VA.getLocReg();