Lines Matching defs:N

67   bool SelectRDVLImm(SDValue N, SDValue &Imm);
69 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
70 bool SelectArithUXTXRegister(SDValue N, SDValue &Reg, SDValue &Shift);
71 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
72 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
74 return SelectShiftedRegister(N, false, Reg, Shift);
76 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
77 return SelectShiftedRegister(N, true, Reg, Shift);
79 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
80 return SelectAddrModeIndexed7S(N, 1, Base, OffImm);
82 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
83 return SelectAddrModeIndexed7S(N, 2, Base, OffImm);
85 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
86 return SelectAddrModeIndexed7S(N, 4, Base, OffImm);
88 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
89 return SelectAddrModeIndexed7S(N, 8, Base, OffImm);
91 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
92 return SelectAddrModeIndexed7S(N, 16, Base, OffImm);
94 bool SelectAddrModeIndexedS9S128(SDValue N, SDValue &Base, SDValue &OffImm) {
95 return SelectAddrModeIndexedBitWidth(N, true, 9, 16, Base, OffImm);
97 bool SelectAddrModeIndexedU6S128(SDValue N, SDValue &Base, SDValue &OffImm) {
98 return SelectAddrModeIndexedBitWidth(N, false, 6, 16, Base, OffImm);
100 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
101 return SelectAddrModeIndexed(N, 1, Base, OffImm);
103 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
104 return SelectAddrModeIndexed(N, 2, Base, OffImm);
106 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
107 return SelectAddrModeIndexed(N, 4, Base, OffImm);
109 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
110 return SelectAddrModeIndexed(N, 8, Base, OffImm);
112 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
113 return SelectAddrModeIndexed(N, 16, Base, OffImm);
115 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
116 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
118 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
119 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
121 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
122 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
124 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
125 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
127 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
128 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
131 bool SelectAddrModeIndexedUImm(SDValue N, SDValue &Base, SDValue &OffImm) {
134 bool Found = SelectAddrModeIndexed(N, Size, Base, OffImm);
144 Base = N;
145 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);
150 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
152 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
156 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
158 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
161 bool SelectExtractHigh(SDValue N, SDValue &Res) {
162 if (Subtarget->isLittleEndian() && N->getOpcode() == ISD::BITCAST)
163 N = N->getOperand(0);
164 if (N->getOpcode() != ISD::EXTRACT_SUBVECTOR ||
165 !isa<ConstantSDNode>(N->getOperand(1)))
167 EVT VT = N->getValueType(0);
168 EVT LVT = N->getOperand(0).getValueType();
169 unsigned Index = N->getConstantOperandVal(1);
173 Res = N->getOperand(0);
177 bool SelectRoundingVLShr(SDValue N, SDValue &Res1, SDValue &Res2) {
178 if (N.getOpcode() != AArch64ISD::VLSHR)
180 SDValue Op = N->getOperand(0);
182 unsigned ShtAmt = N->getConstantOperandVal(1);
202 Res2 = CurDAG->getTargetConstant(ShtAmt, SDLoc(N), MVT::i32);
206 bool SelectDupZeroOrUndef(SDValue N) {
207 switch(N->getOpcode()) {
212 auto Opnd0 = N->getOperand(0);
226 bool SelectDupZero(SDValue N) {
227 switch(N->getOpcode()) {
230 auto Opnd0 = N->getOperand(0);
242 bool SelectDupNegativeZero(SDValue N) {
243 switch(N->getOpcode()) {
246 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
255 bool SelectSVEAddSubImm(SDValue N, SDValue &Imm, SDValue &Shift) {
256 return SelectSVEAddSubImm(N, VT, Imm, Shift);
260 bool SelectSVEAddSubSSatImm(SDValue N, SDValue &Imm, SDValue &Shift) {
261 return SelectSVEAddSubSSatImm(N, VT, Imm, Shift, Negate);
265 bool SelectSVECpyDupImm(SDValue N, SDValue &Imm, SDValue &Shift) {
266 return SelectSVECpyDupImm(N, VT, Imm, Shift);
270 bool SelectSVELogicalImm(SDValue N, SDValue &Imm) {
271 return SelectSVELogicalImm(N, VT, Imm, Invert);
275 bool SelectSVEArithImm(SDValue N, SDValue &Imm) {
276 return SelectSVEArithImm(N, VT, Imm);
280 bool SelectSVEShiftImm(SDValue N, SDValue &Imm) {
281 return SelectSVEShiftImm(N, Low, High, AllowSaturation, Imm);
284 bool SelectSVEShiftSplatImmR(SDValue N, SDValue &Imm) {
285 if (N->getOpcode() != ISD::SPLAT_VECTOR)
288 EVT EltVT = N->getValueType(0).getVectorElementType();
289 return SelectSVEShiftImm(N->getOperand(0), /* Low */ 1,
294 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
296 bool SelectCntImm(SDValue N, SDValue &Imm) {
297 if (!isa<ConstantSDNode>(N))
300 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
309 Imm = CurDAG->getTargetConstant(MulImm, SDLoc(N), MVT::i32);
317 bool SelectEXTImm(SDValue N, SDValue &Imm) {
318 if (!isa<ConstantSDNode>(N))
321 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
325 Imm = CurDAG->getTargetConstant(MulImm, SDLoc(N), MVT::i32);
333 bool ImmToReg(SDValue N, SDValue &Imm) {
334 if (auto *CI = dyn_cast<ConstantSDNode>(N)) {
365 void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
367 bool tryIndexedLoad(SDNode *N);
369 void SelectPtrauthAuth(SDNode *N);
370 void SelectPtrauthResign(SDNode *N);
372 bool trySelectStackSlotTagP(SDNode *N);
373 void SelectTagP(SDNode *N);
375 void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
377 void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
379 void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
380 void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
381 void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, unsigned Scale,
384 void SelectContiguousMultiVectorLoad(SDNode *N, unsigned NumVecs,
387 void SelectDestructiveMultiIntrinsic(SDNode *N, unsigned NumVecs,
390 void SelectPExtPair(SDNode *N, unsigned Opc);
391 void SelectWhilePair(SDNode *N, unsigned Opc);
392 void SelectCVTIntrinsic(SDNode *N, unsigned NumVecs, unsigned Opcode);
393 void SelectClamp(SDNode *N, unsigned NumVecs, unsigned Opcode);
394 void SelectUnaryMultiIntrinsic(SDNode *N, unsigned NumOutVecs,
396 void SelectFrintFromVT(SDNode *N, unsigned NumVecs, unsigned Opcode);
399 void SelectMultiVectorMove(SDNode *N, unsigned NumVecs, unsigned BaseReg,
401 void SelectMultiVectorMoveZ(SDNode *N, unsigned NumVecs,
404 bool SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base, SDValue &OffImm);
407 bool SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, SDValue &Base,
411 bool SelectSVERegRegAddrMode(SDValue N, SDValue &Base, SDValue &Offset) {
412 return SelectSVERegRegAddrMode(N, Scale, Base, Offset);
419 bool SelectSMETileSlice(SDValue N, SDValue &Vector, SDValue &Offset) {
420 return SelectSMETileSlice(N, MaxIdx, Vector, Offset, Scale);
423 void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
424 void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
425 void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
426 void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
427 void SelectPredicatedStore(SDNode *N, unsigned NumVecs, unsigned Scale,
430 findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr, unsigned Opc_ri,
434 bool tryBitfieldExtractOp(SDNode *N);
435 bool tryBitfieldExtractOpFromSExt(SDNode *N);
436 bool tryBitfieldInsertOp(SDNode *N);
437 bool tryBitfieldInsertInZeroOp(SDNode *N);
438 bool tryShiftAmountMod(SDNode *N);
440 bool tryReadRegister(SDNode *N);
441 bool tryWriteRegister(SDNode *N);
443 bool trySelectCastFixedLengthToScalableVector(SDNode *N);
444 bool trySelectCastScalableToFixedLengthVector(SDNode *N);
446 bool trySelectXAR(SDNode *N);
452 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
454 bool SelectShiftedRegisterFromAnd(SDValue N, SDValue &Reg, SDValue &Shift);
455 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
457 return SelectAddrModeIndexedBitWidth(N, true, 7, Size, Base, OffImm);
459 bool SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm, unsigned BW,
462 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
464 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
466 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
469 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
474 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
478 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
479 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
482 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
485 bool SelectCVTFixedPosRecipOperand(SDValue N, SDValue &FixedPos) {
486 return SelectCVTFixedPosRecipOperand(N, FixedPos, RegWidth);
489 bool SelectCVTFixedPosRecipOperand(SDValue N, SDValue &FixedPos,
492 bool SelectCMP_SWAP(SDNode *N);
494 bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
495 bool SelectSVEAddSubSSatImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift,
497 bool SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
498 bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm, bool Invert);
500 bool SelectSVESignedArithImm(SDValue N, SDValue &Imm);
501 bool SelectSVEShiftImm(SDValue N, uint64_t Low, uint64_t High,
504 bool SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm);
505 bool SelectSVERegRegAddrMode(SDValue N, unsigned Scale, SDValue &Base,
507 bool SelectSMETileSlice(SDValue N, unsigned MaxSize, SDValue &Vector,
510 bool SelectAllActivePredicate(SDValue N);
511 bool SelectAnyPredicate(SDValue N);
530 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
531 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
540 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
541 return isIntImmediate(N.getNode(), Imm);
547 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
549 return N->getOpcode() == Opc &&
550 isIntImmediate(N->getOperand(1).getNode(), Imm);
553 // isIntImmediateEq - This method tests to see if N is a constant operand that
556 static bool isIntImmediateEq(SDValue N, const uint64_t ImmExpected) {
558 if (!isIntImmediate(N.getNode(), Imm))
592 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
599 if (!isa<ConstantSDNode>(N.getNode()))
602 uint64_t Immed = N.getNode()->getAsZExtVal();
614 SDLoc dl(N);
622 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
629 if (!isa<ConstantSDNode>(N.getNode()))
633 uint64_t Immed = N.getNode()->getAsZExtVal();
641 if (N.getValueType() == MVT::i32)
649 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
655 static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
656 switch (N.getOpcode()) {
726 bool AArch64DAGToDAGISel::SelectShiftedRegisterFromAnd(SDValue N, SDValue &Reg,
728 EVT VT = N.getValueType();
732 if (N->getOpcode() != ISD::AND || !N->hasOneUse())
734 SDValue LHS = N.getOperand(0);
747 ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N.getOperand(1));
756 unsigned BitWidth = N.getValueSizeInBits();
805 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
806 if (N.getOpcode() == ISD::SIGN_EXTEND ||
807 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
809 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
810 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
812 SrcVT = N.getOperand(0).getValueType();
823 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
824 N.getOpcode() == ISD::ANY_EXTEND) {
825 EVT SrcVT = N.getOperand(0).getValueType();
835 } else if (N.getOpcode() == ISD::AND) {
836 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
857 /// Add/Sub. LSL means we are folding into an `add w0, w1, w2, lsl #N`
882 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
884 if (SelectShiftedRegisterFromAnd(N, Reg, Shift))
887 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
893 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
894 unsigned BitSize = N.getValueSizeInBits();
898 Reg = N.getOperand(0);
899 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
900 return isWorthFoldingALU(N, true);
910 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
911 if (N.getValueType() == MVT::i32)
912 return N;
914 SDLoc dl(N);
915 return CurDAG->getTargetExtractSubreg(AArch64::sub_32, dl, MVT::i32, N);
918 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
920 bool AArch64DAGToDAGISel::SelectRDVLImm(SDValue N, SDValue &Imm) {
921 if (!isa<ConstantSDNode>(N))
924 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
928 Imm = CurDAG->getTargetConstant(RDVLImm, SDLoc(N), MVT::i32);
938 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
943 if (N.getOpcode() == ISD::SHL) {
944 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
951 Ext = getExtendTypeForNode(N.getOperand(0));
955 Reg = N.getOperand(0).getOperand(0);
957 Ext = getExtendTypeForNode(N);
961 Reg = N.getOperand(0);
965 auto isDef32 = [](SDValue N) {
966 unsigned Opc = N.getOpcode();
984 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
986 return isWorthFoldingALU(N);
991 bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg,
996 if (N.getOpcode() != ISD::SHL)
999 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
1007 Reg = N.getOperand(0);
1008 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
1010 return isWorthFoldingALU(N);
1018 static bool isWorthFoldingADDlow(SDValue N) {
1019 for (auto *Use : N->uses()) {
1046 bool AArch64DAGToDAGISel::SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm,
1050 SDLoc dl(N);
1053 if (N.getOpcode() == ISD::FrameIndex) {
1054 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1062 if (CurDAG->isBaseWithConstantOffset(N)) {
1063 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1071 Base = N.getOperand(0);
1086 Base = N.getOperand(0);
1101 Base = N;
1109 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
1111 SDLoc dl(N);
1114 if (N.getOpcode() == ISD::FrameIndex) {
1115 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1121 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
1123 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
1124 Base = N.getOperand(0);
1125 OffImm = N.getOperand(1);
1134 if (CurDAG->isBaseWithConstantOffset(N)) {
1135 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1139 Base = N.getOperand(0);
1152 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
1159 Base = N;
1169 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
1172 if (!CurDAG->isBaseWithConstantOffset(N))
1174 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1177 Base = N.getOperand(0);
1184 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
1191 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
1192 SDLoc dl(N);
1196 N);
1199 /// Check if the given SHL node (\p N), can be used to form an
1201 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
1204 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
1205 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
1209 SDLoc dl(N);
1212 getExtendTypeForNode(N.getOperand(0), true);
1216 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
1220 Offset = N.getOperand(0);
1230 return isWorthFoldingAddr(N, Size);
1233 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
1237 if (N.getOpcode() != ISD::ADD)
1239 SDValue LHS = N.getOperand(0);
1240 SDValue RHS = N.getOperand(1);
1241 SDLoc dl(N);
1251 const SDNode *Node = N.getNode();
1257 // Remember if it is worth folding N when it produces extended register.
1258 bool IsExtendedRegisterWorthFolding = isWorthFoldingAddr(N, Size);
1322 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
1326 if (N.getOpcode() != ISD::ADD)
1328 SDValue LHS = N.getOperand(0);
1329 SDValue RHS = N.getOperand(1);
1330 SDLoc DL(N);
1335 const SDNode *Node = N.getNode();
1366 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
1369 // Remember if it is worth folding N when it produces extended register.
1370 bool IsExtendedRegisterWorthFolding = isWorthFoldingAddr(N, Size);
1461 SDNode *N =
1463 return SDValue(N, 0);
1466 void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
1468 SDLoc dl(N);
1469 EVT VT = N->getValueType(0);
1475 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
1476 N->op_begin() + Vec0Off + NumVecs);
1481 Ops.push_back(N->getOperand(1));
1483 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
1484 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
1520 void AArch64DAGToDAGISel::SelectPtrauthAuth(SDNode *N) {
1521 SDLoc DL(N);
1523 SDValue Val = N->getOperand(1);
1524 SDValue AUTKey = N->getOperand(2);
1525 SDValue AUTDisc = N->getOperand(3);
1539 ReplaceNode(N, AUT);
1543 void AArch64DAGToDAGISel::SelectPtrauthResign(SDNode *N) {
1544 SDLoc DL(N);
1546 SDValue Val = N->getOperand(1);
1547 SDValue AUTKey = N->getOperand(2);
1548 SDValue AUTDisc = N->getOperand(3);
1549 SDValue PACKey = N->getOperand(4);
1550 SDValue PACDisc = N->getOperand(5);
1573 ReplaceNode(N, AUTPAC);
1577 bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {
1578 LoadSDNode *LD = cast<LoadSDNode>(N);
1582 EVT DstVT = N->getValueType(0);
1649 SDLoc dl(N);
1656 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
1671 ReplaceUses(SDValue(N, 0), LoadedVal);
1672 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1673 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1674 CurDAG->RemoveDeadNode(N);
1678 void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
1680 SDLoc dl(N);
1681 EVT VT = N->getValueType(0);
1682 SDValue Chain = N->getOperand(0);
1684 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1692 ReplaceUses(SDValue(N, i),
1695 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1699 if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(N)) {
1704 CurDAG->RemoveDeadNode(N);
1707 void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1709 SDLoc dl(N);
1710 EVT VT = N->getValueType(0);
1711 SDValue Chain = N->getOperand(0);
1713 SDValue Ops[] = {N->getOperand(1), // Mem operand
1714 N->getOperand(2), // Incremental
1723 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1728 ReplaceUses(SDValue(N, 0), SuperReg);
1731 ReplaceUses(SDValue(N, i),
1735 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1736 CurDAG->RemoveDeadNode(N);
1743 AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr,
1752 N, OldBase, NewBase, NewOffset);
1826 void AArch64DAGToDAGISel::SelectPExtPair(SDNode *N, unsigned Opc) {
1828 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(N->getOperand(2)))
1832 SDLoc DL(N);
1833 EVT VT = N->getValueType(0);
1834 SDValue Ops[] = {N->getOperand(1), N->getOperand(2)};
1839 ReplaceUses(SDValue(N, I), CurDAG->getTargetExtractSubreg(
1842 CurDAG->RemoveDeadNode(N);
1845 void AArch64DAGToDAGISel::SelectWhilePair(SDNode *N, unsigned Opc) {
1846 SDLoc DL(N);
1847 EVT VT = N->getValueType(0);
1849 SDValue Ops[] = {N->getOperand(1), N->getOperand(2)};
1855 ReplaceUses(SDValue(N, I), CurDAG->getTargetExtractSubreg(
1858 CurDAG->RemoveDeadNode(N);
1861 void AArch64DAGToDAGISel::SelectCVTIntrinsic(SDNode *N, unsigned NumVecs,
1863 EVT VT = N->getValueType(0);
1864 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1866 SDLoc DL(N);
1870 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
1873 CurDAG->RemoveDeadNode(N);
1876 void AArch64DAGToDAGISel::SelectDestructiveMultiIntrinsic(SDNode *N,
1883 SDLoc DL(N);
1884 EVT VT = N->getValueType(0);
1888 SmallVector<SDValue, 4> Regs(N->op_begin() + StartIdx,
1889 N->op_begin() + StartIdx + NumVecs);
1899 Zm = N->getOperand(NumVecs + FirstVecIdx);
1904 N->getOperand(1), Zdn, Zm);
1909 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
1912 CurDAG->RemoveDeadNode(N);
1915 void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs,
1919 SDLoc DL(N);
1920 EVT VT = N->getValueType(0);
1921 SDValue Chain = N->getOperand(0);
1927 N, Opc_rr, Opc_ri, N->getOperand(IsIntr ? 3 : 2),
1930 SDValue Ops[] = {N->getOperand(IsIntr ? 2 : 1), // Predicate
1939 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
1944 ReplaceUses(SDValue(N, ChainIdx), SDValue(Load, 1));
1945 CurDAG->RemoveDeadNode(N);
1948 void AArch64DAGToDAGISel::SelectContiguousMultiVectorLoad(SDNode *N,
1954 SDLoc DL(N);
1955 EVT VT = N->getValueType(0);
1956 SDValue Chain = N->getOperand(0);
1958 SDValue PNg = N->getOperand(2);
1959 SDValue Base = N->getOperand(3);
1963 findAddrModeSVELoadStore(N, Opc_rr, Opc_ri, Base, Offset, Scale);
1974 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
1979 ReplaceUses(SDValue(N, ChainIdx), SDValue(Load, 1));
1980 CurDAG->RemoveDeadNode(N);
1983 void AArch64DAGToDAGISel::SelectFrintFromVT(SDNode *N, unsigned NumVecs,
1985 if (N->getValueType(0) != MVT::nxv4f32)
1987 SelectUnaryMultiIntrinsic(N, NumVecs, true, Opcode);
2018 void AArch64DAGToDAGISel::SelectClamp(SDNode *N, unsigned NumVecs,
2020 SDLoc DL(N);
2021 EVT VT = N->getValueType(0);
2023 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
2025 SDValue Zn = N->getOperand(1 + NumVecs);
2026 SDValue Zm = N->getOperand(2 + NumVecs);
2033 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
2036 CurDAG->RemoveDeadNode(N);
2067 void AArch64DAGToDAGISel::SelectMultiVectorMove(SDNode *N, unsigned NumVecs,
2071 TileNum = N->getConstantOperandVal(2);
2078 SliceBase = N->getOperand(2);
2080 SliceBase = N->getOperand(3);
2085 SDLoc DL(N);
2087 SDValue Ops[] = {SubReg, Base, Offset, /*Chain*/ N->getOperand(0)};
2090 EVT VT = N->getValueType(0);
2092 ReplaceUses(SDValue(N, I),
2097 ReplaceUses(SDValue(N, ChainIdx), SDValue(Mov, 1));
2098 CurDAG->RemoveDeadNode(N);
2101 void AArch64DAGToDAGISel::SelectMultiVectorMoveZ(SDNode *N, unsigned NumVecs,
2107 SDValue SliceBase = N->getOperand(2);
2109 SliceBase = N->getOperand(3);
2117 SDLoc DL(N);
2120 Ops.push_back(N->getOperand(2));
2123 Ops.push_back(N->getOperand(0)); //Chain
2126 EVT VT = N->getValueType(0);
2128 ReplaceUses(SDValue(N, I),
2134 ReplaceUses(SDValue(N, ChainIdx), SDValue(Mov, 1));
2135 CurDAG->RemoveDeadNode(N);
2138 void AArch64DAGToDAGISel::SelectUnaryMultiIntrinsic(SDNode *N,
2142 SDLoc DL(N);
2143 EVT VT = N->getValueType(0);
2144 unsigned NumInVecs = N->getNumOperands() - 1;
2150 SmallVector<SDValue, 4> Regs(N->op_begin() + 1,
2151 N->op_begin() + 1 + NumInVecs);
2156 Ops.push_back(N->getOperand(1 + I));
2163 ReplaceUses(SDValue(N, I), CurDAG->getTargetExtractSubreg(
2165 CurDAG->RemoveDeadNode(N);
2168 void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
2170 SDLoc dl(N);
2171 EVT VT = N->getOperand(2)->getValueType(0);
2175 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
2178 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
2179 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
2182 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2185 ReplaceNode(N, St);
2188 void AArch64DAGToDAGISel::SelectPredicatedStore(SDNode *N, unsigned NumVecs,
2191 SDLoc dl(N);
2194 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
2201 N, Opc_rr, Opc_ri, N->getOperand(NumVecs + 3),
2204 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), // predicate
2207 N->getOperand(0)}; // chain
2208 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
2210 ReplaceNode(N, St);
2213 bool AArch64DAGToDAGISel::SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base,
2215 SDLoc dl(N);
2220 if (auto FINode = dyn_cast<FrameIndexSDNode>(N)) {
2230 void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
2232 SDLoc dl(N);
2233 EVT VT = N->getOperand(2)->getValueType(0);
2239 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
2243 N->getOperand(NumVecs + 1), // base register
2244 N->getOperand(NumVecs + 2), // Incremental
2245 N->getOperand(0)}; // Chain
2248 ReplaceNode(N, St);
2286 void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
2288 SDLoc dl(N);
2289 EVT VT = N->getValueType(0);
2293 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
2303 unsigned LaneNo = N->getConstantOperandVal(NumVecs + 2);
2306 N->getOperand(NumVecs + 3), N->getOperand(0)};
2317 ReplaceUses(SDValue(N, i), NV);
2320 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
2321 CurDAG->RemoveDeadNode(N);
2324 void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
2326 SDLoc dl(N);
2327 EVT VT = N->getValueType(0);
2331 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
2342 unsigned LaneNo = N->getConstantOperandVal(NumVecs + 1);
2347 N->getOperand(NumVecs + 2), // Base register
2348 N->getOperand(NumVecs + 3), // Incremental
2349 N->getOperand(0)};
2353 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
2358 ReplaceUses(SDValue(N, 0),
2369 ReplaceUses(SDValue(N, i), NV);
2374 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
2375 CurDAG->RemoveDeadNode(N);
2378 void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
2380 SDLoc dl(N);
2381 EVT VT = N->getOperand(2)->getValueType(0);
2385 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
2393 unsigned LaneNo = N->getConstantOperandVal(NumVecs + 2);
2396 N->getOperand(NumVecs + 3), N->getOperand(0)};
2400 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2403 ReplaceNode(N, St);
2406 void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
2408 SDLoc dl(N);
2409 EVT VT = N->getOperand(2)->getValueType(0);
2413 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
2424 unsigned LaneNo = N->getConstantOperandVal(NumVecs + 1);
2427 N->getOperand(NumVecs + 2), // Base Register
2428 N->getOperand(NumVecs + 3), // Incremental
2429 N->getOperand(0)};
2433 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2436 ReplaceNode(N, St);
2439 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
2444 assert(N->getOpcode() == ISD::AND &&
2445 "N must be a AND operation to call this function");
2447 EVT VT = N->getValueType(0);
2464 if (!isOpcWithIntImmediate(N, ISD::AND, AndImm))
2467 const SDNode *Op0 = N->getOperand(0).getNode();
2504 Opd0 = N->getOperand(0);
2512 (dbgs() << N
2533 static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
2536 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
2538 EVT VT = N->getValueType(0);
2543 SDValue Op = N->getOperand(0);
2555 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
2566 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
2583 if (N->getOpcode() != ISD::SRL)
2587 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, AndMask))
2590 Opd0 = N->getOperand(0).getOperand(0);
2593 if (!isIntImmediate(N->getOperand(1), SrlImm))
2600 Opc = N->getValueType(0) == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2606 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
2609 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
2610 "N must be a SHR/SRA operation to call this function");
2612 EVT VT = N->getValueType(0);
2621 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
2627 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, ShlImm)) {
2628 Opd0 = N->getOperand(0).getOperand(0);
2629 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
2630 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
2635 Opd0 = N->getOperand(0).getOperand(0);
2643 Opd0 = N->getOperand(0);
2651 (dbgs() << N
2657 if (!isIntImmediate(N->getOperand(1), SrlImm))
2667 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
2669 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
2673 bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) {
2674 assert(N->getOpcode() == ISD::SIGN_EXTEND);
2676 EVT VT = N->getValueType(0);
2677 EVT NarrowVT = N->getOperand(0)->getValueType(0);
2682 SDValue Op = N->getOperand(0);
2686 SDLoc dl(N);
2693 CurDAG->SelectNodeTo(N, AArch64::SBFMXri, VT, Ops);
2697 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
2701 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
2704 switch (N->getOpcode()) {
2706 if (!N->isMachineOpcode())
2710 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
2714 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
2717 return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms);
2720 unsigned NOpc = N->getMachineOpcode();
2729 Opd0 = N->getOperand(0);
2730 Immr = N->getConstantOperandVal(1);
2731 Imms = N->getConstantOperandVal(2);
2738 bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {
2741 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
2744 EVT VT = N->getValueType(0);
2745 SDLoc dl(N);
2756 ReplaceNode(N, Inner.getNode());
2762 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
3032 // For bit-field-positioning pattern "(and (shl VAL, N), ShiftedMask)".
3039 // For bit-field-positioning pattern "shl VAL, N)".
3047 /// essentially "(and (shl VAL, N), Mask)" or (shl VAL, N).
3108 // For pattern "and(shl(val, N), shifted-mask)", 'ShlOp0' is set to 'val'.
3113 // For pattern "and(any_extend(shl(val, N)), shifted-mask)"
3115 // ShlVal == shl(val, N), which is a left shift on a smaller type.
3143 // - For "(and (any_extend(shl val, N)), shifted-mask)", the`and` Op
3166 // For node (shl (and val, mask), N)), returns true if the node is equivalent to
3171 // Caller should have verified that N is a left shift with constant shift
3241 static bool tryBitfieldInsertOpFromOrAndImm(SDNode *N, SelectionDAG *CurDAG) {
3242 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
3244 EVT VT = N->getValueType(0);
3251 if (!isOpcWithIntImmediate(N, ISD::OR, OrImm))
3261 SDValue And = N->getOperand(0);
3315 SDLoc DL(N);
3325 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
3404 static bool tryOrrWithShift(SDNode *N, SDValue OrOpd0, SDValue OrOpd1,
3407 EVT VT = N->getValueType(0);
3408 assert(N->getOpcode() == ISD::OR && "Expect N to be an OR node");
3409 assert(((N->getOperand(0) == OrOpd0 && N->getOperand(1) == OrOpd1) ||
3410 (N->getOperand(1) == OrOpd0 && N->getOperand(0) == OrOpd1)) &&
3413 "Expect result type to be i32 or i64 since N is combinable to BFM");
3414 SDLoc DL(N);
3439 CurDAG->SelectNodeTo(N, OrrOpc, VT, Ops);
3455 CurDAG->SelectNodeTo(N, OrrOpc, VT, Ops);
3464 // If N is selected to be BFI, we know that
3468 // Instead of selecting N to BFI, fold OrOpd0 as a left shift directly.
3474 CurDAG->SelectNodeTo(N, OrrOpc, VT, Ops);
3486 // If N is selected to be BFXIL, we know that
3490 // Instead of selecting N to BFXIL, fold OrOpd0 as a right shift directly.
3496 CurDAG->SelectNodeTo(N, OrrOpc, VT, Ops);
3504 static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
3506 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
3508 EVT VT = N->getValueType(0);
3543 SDValue OrOpd0Val = N->getOperand(I % 2);
3545 SDValue OrOpd1Val = N->getOperand((I + 1) % 2);
3608 if (tryOrrWithShift(N, OrOpd0Val, OrOpd1Val, Src, Dst, CurDAG,
3613 SDLoc DL(N);
3617 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
3625 SDValue And0 = N->getOperand(0);
3626 SDValue And1 = N->getOperand(1);
3648 SDLoc DL(N);
3671 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
3678 bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {
3679 if (N->getOpcode() != ISD::OR)
3683 getUsefulBits(SDValue(N, 0), NUsefulBits);
3687 CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
3691 if (tryBitfieldInsertOpFromOr(N, NUsefulBits, CurDAG))
3694 return tryBitfieldInsertOpFromOrAndImm(N, CurDAG);
3700 bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {
3701 if (N->getOpcode() != ISD::AND)
3704 EVT VT = N->getValueType(0);
3710 if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false,
3719 SDLoc DL(N);
3723 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
3729 bool AArch64DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
3730 EVT VT = N->getValueType(0);
3733 switch (N->getOpcode()) {
3761 SDValue ShiftAmt = N->getOperand(1);
3762 SDLoc DL(N);
3776 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
3782 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
3802 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
3848 SDValue Ops[] = {N->getOperand(0), NewShiftAmt};
3849 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
3853 static bool checkCVTFixedPointOperandWithFBits(SelectionDAG *CurDAG, SDValue N,
3858 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
3860 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
3890 // N.b. isPowerOf2 also checks for > 0.
3899 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
3903 bool AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
3905 return checkCVTFixedPointOperandWithFBits(CurDAG, N, FixedPos, RegWidth,
3909 bool AArch64DAGToDAGISel::SelectCVTFixedPosRecipOperand(SDValue N,
3912 return checkCVTFixedPointOperandWithFBits(CurDAG, N, FixedPos, RegWidth,
3952 bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
3953 const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
3955 SDLoc DL(N);
3957 bool ReadIs128Bit = N->getOpcode() == AArch64ISD::MRRS;
3983 SDValue InChain = N->getOperand(0);
3986 CurDAG->SelectNodeTo(N, Opcode64Bit, MVT::i64, MVT::Other /* Chain */,
4002 ReplaceUses(SDValue(N, 0), Lo);
4003 ReplaceUses(SDValue(N, 1), Hi);
4004 ReplaceUses(SDValue(N, 2), OutChain);
4013 bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
4014 const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
4016 SDLoc DL(N);
4018 bool WriteIs128Bit = N->getOpcode() == AArch64ISD::MSRR;
4028 assert(isa<ConstantSDNode>(N->getOperand(2)) &&
4031 uint64_t Immed = N->getConstantOperandVal(2);
4033 N, State, MVT::Other, CurDAG->getTargetConstant(Reg, DL, MVT::i32),
4034 CurDAG->getTargetConstant(Immed, DL, MVT::i16), N->getOperand(0));
4066 SDValue InChain = N->getOperand(0);
4068 CurDAG->SelectNodeTo(N, AArch64::MSR, MVT::Other,
4070 N->getOperand(2), InChain);
4078 N->getOperand(2),
4080 N->getOperand(3),
4083 CurDAG->SelectNodeTo(N, AArch64::MSRR, MVT::Other,
4092 bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
4094 EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
4111 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
4112 N->getOperand(0)};
4114 Opcode, SDLoc(N),
4117 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
4120 ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
4121 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
4122 CurDAG->RemoveDeadNode(N);
4127 bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm,
4129 if (!isa<ConstantSDNode>(N))
4132 SDLoc DL(N);
4133 uint64_t Val = cast<ConstantSDNode>(N)
4167 bool AArch64DAGToDAGISel::SelectSVEAddSubSSatImm(SDValue N, MVT VT,
4170 if (!isa<ConstantSDNode>(N))
4173 SDLoc DL(N);
4174 int64_t Val = cast<ConstantSDNode>(N)
4217 bool AArch64DAGToDAGISel::SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm,
4219 if (!isa<ConstantSDNode>(N))
4222 SDLoc DL(N);
4223 int64_t Val = cast<ConstantSDNode>(N)
4257 bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) {
4258 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
4260 SDLoc DL(N);
4269 bool AArch64DAGToDAGISel::SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm) {
4270 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
4290 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i32);
4297 bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm,
4299 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
4301 SDLoc DL(N);
4343 bool AArch64DAGToDAGISel::SelectSVEShiftImm(SDValue N, uint64_t Low,
4346 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
4360 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i32);
4367 bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) {
4371 if (!(isa<FrameIndexSDNode>(N->getOperand(1)))) {
4375 SDValue IRG_SP = N->getOperand(2);
4382 SDLoc DL(N);
4383 int FI = cast<FrameIndexSDNode>(N->getOperand(1))->getIndex();
4386 int TagOffset = N->getConstantOperandVal(3);
4390 {FiOp, CurDAG->getTargetConstant(0, DL, MVT::i64), N->getOperand(2),
4392 ReplaceNode(N, Out);
4396 void AArch64DAGToDAGISel::SelectTagP(SDNode *N) {
4397 assert(isa<ConstantSDNode>(N->getOperand(3)) &&
4399 if (trySelectStackSlotTagP(N))
4405 SDLoc DL(N);
4406 int TagOffset = N->getConstantOperandVal(3);
4408 {N->getOperand(1), N->getOperand(2)});
4410 {SDValue(N1, 0), N->getOperand(2)});
4415 ReplaceNode(N, N3);
4418 bool AArch64DAGToDAGISel::trySelectCastFixedLengthToScalableVector(SDNode *N) {
4419 assert(N->getOpcode() == ISD::INSERT_SUBVECTOR && "Invalid Node!");
4422 if (N->getConstantOperandVal(2) != 0)
4424 if (!N->getOperand(0).isUndef())
4428 EVT VT = N->getValueType(0);
4429 EVT InVT = N->getOperand(1).getValueType();
4442 SDLoc DL(N);
4444 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT,
4445 N->getOperand(1), RC));
4449 bool AArch64DAGToDAGISel::trySelectCastScalableToFixedLengthVector(SDNode *N) {
4450 assert(N->getOpcode() == ISD::EXTRACT_SUBVECTOR && "Invalid Node!");
4453 if (N->getConstantOperandVal(1) != 0)
4457 EVT VT = N->getValueType(0);
4458 EVT InVT = N->getOperand(0).getValueType();
4471 SDLoc DL(N);
4473 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT,
4474 N->getOperand(0), RC));
4478 bool AArch64DAGToDAGISel::trySelectXAR(SDNode *N) {
4479 assert(N->getOpcode() == ISD::OR && "Expected OR instruction");
4481 SDValue N0 = N->getOperand(0);
4482 SDValue N1 = N->getOperand(1);
4483 EVT VT = N->getValueType(0);
4521 SDLoc DL(N);
4529 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
4561 CurDAG->SelectNodeTo(N, AArch64::XAR, N0.getValueType(), Ops);
7217 /// where Root is the memory access using N for its address.
7219 bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
7226 if (N.getOpcode() == ISD::FrameIndex) {
7227 int FI = cast<FrameIndexSDNode>(N)->getIndex();
7232 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);
7242 if (N.getOpcode() != ISD::ADD)
7245 SDValue VScale = N.getOperand(1);
7260 Base = N.getOperand(0);
7269 OffImm = CurDAG->getTargetConstant(Offset, SDLoc(N), MVT::i64);
7275 bool AArch64DAGToDAGISel::SelectSVERegRegAddrMode(SDValue N, unsigned Scale,
7278 if (N.getOpcode() != ISD::ADD)
7282 const SDValue LHS = N.getOperand(0);
7283 const SDValue RHS = N.getOperand(1);
7302 SDLoc DL(N);
7326 bool AArch64DAGToDAGISel::SelectAllActivePredicate(SDValue N) {
7330 return TLI->isAllActivePredicate(*CurDAG, N);
7333 bool AArch64DAGToDAGISel::SelectAnyPredicate(SDValue N) {
7334 EVT VT = N.getValueType();
7338 bool AArch64DAGToDAGISel::SelectSMETileSlice(SDValue N, unsigned MaxSize,
7342 if (N.getOpcode() == ISD::ADD)
7343 if (auto C = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
7346 Base = N.getOperand(0);
7347 Offset = CurDAG->getTargetConstant(ImmOff / Scale, SDLoc(N), MVT::i64);
7353 Base = N;
7354 Offset = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);