xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
14 #include "MCTargetDesc/RISCVMCTargetDesc.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCVISelLowering.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/IR/IntrinsicsRISCV.h"
19 #include "llvm/Support/Alignment.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/KnownBits.h"
22 #include "llvm/Support/MathExtras.h"
23 #include "llvm/Support/raw_ostream.h"
24 
25 using namespace llvm;
26 
27 #define DEBUG_TYPE "riscv-isel"
28 
29 namespace llvm {
30 namespace RISCV {
31 #define GET_RISCVVSSEGTable_IMPL
32 #define GET_RISCVVLSEGTable_IMPL
33 #define GET_RISCVVLXSEGTable_IMPL
34 #define GET_RISCVVSXSEGTable_IMPL
35 #define GET_RISCVVLETable_IMPL
36 #define GET_RISCVVSETable_IMPL
37 #define GET_RISCVVLXTable_IMPL
38 #define GET_RISCVVSXTable_IMPL
39 #include "RISCVGenSearchableTables.inc"
40 } // namespace RISCV
41 } // namespace llvm
42 
PostprocessISelDAG()43 void RISCVDAGToDAGISel::PostprocessISelDAG() {
44   doPeepholeLoadStoreADDI();
45 }
46 
selectImm(SelectionDAG * CurDAG,const SDLoc & DL,int64_t Imm,MVT XLenVT)47 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
48                          MVT XLenVT) {
49   RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, XLenVT == MVT::i64);
50 
51   SDNode *Result = nullptr;
52   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
53   for (RISCVMatInt::Inst &Inst : Seq) {
54     SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
55     if (Inst.Opc == RISCV::LUI)
56       Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
57     else
58       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
59 
60     // Only the first instruction has X0 as its source.
61     SrcReg = SDValue(Result, 0);
62   }
63 
64   return Result;
65 }
66 
createTupleImpl(SelectionDAG & CurDAG,ArrayRef<SDValue> Regs,unsigned RegClassID,unsigned SubReg0)67 static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
68                                unsigned RegClassID, unsigned SubReg0) {
69   assert(Regs.size() >= 2 && Regs.size() <= 8);
70 
71   SDLoc DL(Regs[0]);
72   SmallVector<SDValue, 8> Ops;
73 
74   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
75 
76   for (unsigned I = 0; I < Regs.size(); ++I) {
77     Ops.push_back(Regs[I]);
78     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
79   }
80   SDNode *N =
81       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
82   return SDValue(N, 0);
83 }
84 
createM1Tuple(SelectionDAG & CurDAG,ArrayRef<SDValue> Regs,unsigned NF)85 static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
86                              unsigned NF) {
87   static const unsigned RegClassIDs[] = {
88       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
89       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
90       RISCV::VRN8M1RegClassID};
91 
92   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
93 }
94 
createM2Tuple(SelectionDAG & CurDAG,ArrayRef<SDValue> Regs,unsigned NF)95 static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
96                              unsigned NF) {
97   static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
98                                          RISCV::VRN3M2RegClassID,
99                                          RISCV::VRN4M2RegClassID};
100 
101   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
102 }
103 
createM4Tuple(SelectionDAG & CurDAG,ArrayRef<SDValue> Regs,unsigned NF)104 static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
105                              unsigned NF) {
106   return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
107                          RISCV::sub_vrm4_0);
108 }
109 
createTuple(SelectionDAG & CurDAG,ArrayRef<SDValue> Regs,unsigned NF,RISCVII::VLMUL LMUL)110 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
111                            unsigned NF, RISCVII::VLMUL LMUL) {
112   switch (LMUL) {
113   default:
114     llvm_unreachable("Invalid LMUL.");
115   case RISCVII::VLMUL::LMUL_F8:
116   case RISCVII::VLMUL::LMUL_F4:
117   case RISCVII::VLMUL::LMUL_F2:
118   case RISCVII::VLMUL::LMUL_1:
119     return createM1Tuple(CurDAG, Regs, NF);
120   case RISCVII::VLMUL::LMUL_2:
121     return createM2Tuple(CurDAG, Regs, NF);
122   case RISCVII::VLMUL::LMUL_4:
123     return createM4Tuple(CurDAG, Regs, NF);
124   }
125 }
126 
addVectorLoadStoreOperands(SDNode * Node,unsigned SEW,const SDLoc & DL,unsigned CurOp,bool IsMasked,bool IsStridedOrIndexed,SmallVectorImpl<SDValue> & Operands,MVT * IndexVT)127 void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
128     SDNode *Node, unsigned SEW, const SDLoc &DL, unsigned CurOp, bool IsMasked,
129     bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands, MVT *IndexVT) {
130   SDValue Chain = Node->getOperand(0);
131   SDValue Glue;
132 
133   SDValue Base;
134   SelectBaseAddr(Node->getOperand(CurOp++), Base);
135   Operands.push_back(Base); // Base pointer.
136 
137   if (IsStridedOrIndexed) {
138     Operands.push_back(Node->getOperand(CurOp++)); // Index.
139     if (IndexVT)
140       *IndexVT = Operands.back()->getSimpleValueType(0);
141   }
142 
143   if (IsMasked) {
144     // Mask needs to be copied to V0.
145     SDValue Mask = Node->getOperand(CurOp++);
146     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
147     Glue = Chain.getValue(1);
148     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
149   }
150   SDValue VL;
151   selectVLOp(Node->getOperand(CurOp++), VL);
152   Operands.push_back(VL);
153 
154   MVT XLenVT = Subtarget->getXLenVT();
155   SDValue SEWOp = CurDAG->getTargetConstant(Log2_32(SEW), DL, XLenVT);
156   Operands.push_back(SEWOp);
157 
158   Operands.push_back(Chain); // Chain.
159   if (Glue)
160     Operands.push_back(Glue);
161 }
162 
selectVLSEG(SDNode * Node,bool IsMasked,bool IsStrided)163 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
164                                     bool IsStrided) {
165   SDLoc DL(Node);
166   unsigned NF = Node->getNumValues() - 1;
167   MVT VT = Node->getSimpleValueType(0);
168   unsigned ScalarSize = VT.getScalarSizeInBits();
169   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
170 
171   unsigned CurOp = 2;
172   SmallVector<SDValue, 8> Operands;
173   if (IsMasked) {
174     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
175                                  Node->op_begin() + CurOp + NF);
176     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
177     Operands.push_back(MaskedOff);
178     CurOp += NF;
179   }
180 
181   addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, IsStrided,
182                              Operands);
183 
184   const RISCV::VLSEGPseudo *P =
185       RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, ScalarSize,
186                             static_cast<unsigned>(LMUL));
187   MachineSDNode *Load =
188       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
189 
190   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
191     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
192 
193   SDValue SuperReg = SDValue(Load, 0);
194   for (unsigned I = 0; I < NF; ++I) {
195     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
196     ReplaceUses(SDValue(Node, I),
197                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
198   }
199 
200   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
201   CurDAG->RemoveDeadNode(Node);
202 }
203 
selectVLSEGFF(SDNode * Node,bool IsMasked)204 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
205   SDLoc DL(Node);
206   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
207   MVT VT = Node->getSimpleValueType(0);
208   MVT XLenVT = Subtarget->getXLenVT();
209   unsigned ScalarSize = VT.getScalarSizeInBits();
210   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
211 
212   unsigned CurOp = 2;
213   SmallVector<SDValue, 7> Operands;
214   if (IsMasked) {
215     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
216                                  Node->op_begin() + CurOp + NF);
217     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
218     Operands.push_back(MaskedOff);
219     CurOp += NF;
220   }
221 
222   addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
223                              /*IsStridedOrIndexed*/ false, Operands);
224 
225   const RISCV::VLSEGPseudo *P =
226       RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
227                             ScalarSize, static_cast<unsigned>(LMUL));
228   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
229                                                MVT::Other, MVT::Glue, Operands);
230   SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
231                                           /*Glue*/ SDValue(Load, 2));
232 
233   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
234     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
235 
236   SDValue SuperReg = SDValue(Load, 0);
237   for (unsigned I = 0; I < NF; ++I) {
238     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
239     ReplaceUses(SDValue(Node, I),
240                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
241   }
242 
243   ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0));   // VL
244   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
245   CurDAG->RemoveDeadNode(Node);
246 }
247 
selectVLXSEG(SDNode * Node,bool IsMasked,bool IsOrdered)248 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
249                                      bool IsOrdered) {
250   SDLoc DL(Node);
251   unsigned NF = Node->getNumValues() - 1;
252   MVT VT = Node->getSimpleValueType(0);
253   unsigned ScalarSize = VT.getScalarSizeInBits();
254   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
255 
256   unsigned CurOp = 2;
257   SmallVector<SDValue, 8> Operands;
258   if (IsMasked) {
259     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
260                                  Node->op_begin() + CurOp + NF);
261     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
262     Operands.push_back(MaskedOff);
263     CurOp += NF;
264   }
265 
266   MVT IndexVT;
267   addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
268                              /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
269 
270   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
271          "Element count mismatch");
272 
273   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
274   unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
275   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
276       NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
277       static_cast<unsigned>(IndexLMUL));
278   MachineSDNode *Load =
279       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
280 
281   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
282     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
283 
284   SDValue SuperReg = SDValue(Load, 0);
285   for (unsigned I = 0; I < NF; ++I) {
286     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
287     ReplaceUses(SDValue(Node, I),
288                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
289   }
290 
291   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
292   CurDAG->RemoveDeadNode(Node);
293 }
294 
selectVSSEG(SDNode * Node,bool IsMasked,bool IsStrided)295 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
296                                     bool IsStrided) {
297   SDLoc DL(Node);
298   unsigned NF = Node->getNumOperands() - 4;
299   if (IsStrided)
300     NF--;
301   if (IsMasked)
302     NF--;
303   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
304   unsigned ScalarSize = VT.getScalarSizeInBits();
305   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
306   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
307   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
308 
309   SmallVector<SDValue, 8> Operands;
310   Operands.push_back(StoreVal);
311   unsigned CurOp = 2 + NF;
312 
313   addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, IsStrided,
314                              Operands);
315 
316   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
317       NF, IsMasked, IsStrided, ScalarSize, static_cast<unsigned>(LMUL));
318   MachineSDNode *Store =
319       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
320 
321   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
322     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
323 
324   ReplaceNode(Node, Store);
325 }
326 
selectVSXSEG(SDNode * Node,bool IsMasked,bool IsOrdered)327 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
328                                      bool IsOrdered) {
329   SDLoc DL(Node);
330   unsigned NF = Node->getNumOperands() - 5;
331   if (IsMasked)
332     --NF;
333   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
334   unsigned ScalarSize = VT.getScalarSizeInBits();
335   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
336   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
337   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
338 
339   SmallVector<SDValue, 8> Operands;
340   Operands.push_back(StoreVal);
341   unsigned CurOp = 2 + NF;
342 
343   MVT IndexVT;
344   addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
345                              /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
346 
347   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
348          "Element count mismatch");
349 
350   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
351   unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
352   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
353       NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
354       static_cast<unsigned>(IndexLMUL));
355   MachineSDNode *Store =
356       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
357 
358   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
359     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
360 
361   ReplaceNode(Node, Store);
362 }
363 
364 
Select(SDNode * Node)365 void RISCVDAGToDAGISel::Select(SDNode *Node) {
366   // If we have a custom node, we have already selected.
367   if (Node->isMachineOpcode()) {
368     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
369     Node->setNodeId(-1);
370     return;
371   }
372 
373   // Instruction Selection not handled by the auto-generated tablegen selection
374   // should be handled here.
375   unsigned Opcode = Node->getOpcode();
376   MVT XLenVT = Subtarget->getXLenVT();
377   SDLoc DL(Node);
378   MVT VT = Node->getSimpleValueType(0);
379 
380   switch (Opcode) {
381   case ISD::Constant: {
382     auto *ConstNode = cast<ConstantSDNode>(Node);
383     if (VT == XLenVT && ConstNode->isNullValue()) {
384       SDValue New =
385           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
386       ReplaceNode(Node, New.getNode());
387       return;
388     }
389     ReplaceNode(Node, selectImm(CurDAG, DL, ConstNode->getSExtValue(), XLenVT));
390     return;
391   }
392   case ISD::FrameIndex: {
393     SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
394     int FI = cast<FrameIndexSDNode>(Node)->getIndex();
395     SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
396     ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
397     return;
398   }
399   case ISD::SRL: {
400     // We don't need this transform if zext.h is supported.
401     if (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())
402       break;
403     // Optimize (srl (and X, 0xffff), C) ->
404     //          (srli (slli X, (XLen-16), (XLen-16) + C)
405     // Taking into account that the 0xffff may have had lower bits unset by
406     // SimplifyDemandedBits. This avoids materializing the 0xffff immediate.
407     // This pattern occurs when type legalizing i16 right shifts.
408     // FIXME: This could be extended to other AND masks.
409     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
410     if (N1C) {
411       uint64_t ShAmt = N1C->getZExtValue();
412       SDValue N0 = Node->getOperand(0);
413       if (ShAmt < 16 && N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
414           isa<ConstantSDNode>(N0.getOperand(1))) {
415         uint64_t Mask = N0.getConstantOperandVal(1);
416         Mask |= maskTrailingOnes<uint64_t>(ShAmt);
417         if (Mask == 0xffff) {
418           unsigned LShAmt = Subtarget->getXLen() - 16;
419           SDNode *SLLI =
420               CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
421                                      CurDAG->getTargetConstant(LShAmt, DL, VT));
422           SDNode *SRLI = CurDAG->getMachineNode(
423               RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
424               CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
425           ReplaceNode(Node, SRLI);
426           return;
427         }
428       }
429     }
430 
431     break;
432   }
433   case ISD::INTRINSIC_WO_CHAIN: {
434     unsigned IntNo = Node->getConstantOperandVal(0);
435     switch (IntNo) {
436       // By default we do not custom select any intrinsic.
437     default:
438       break;
439     case Intrinsic::riscv_vmsgeu:
440     case Intrinsic::riscv_vmsge: {
441       SDValue Src1 = Node->getOperand(1);
442       SDValue Src2 = Node->getOperand(2);
443       // Only custom select scalar second operand.
444       if (Src2.getValueType() != XLenVT)
445         break;
446       // Small constants are handled with patterns.
447       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
448         int64_t CVal = C->getSExtValue();
449         if (CVal >= -15 && CVal <= 16)
450           break;
451       }
452       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
453       MVT Src1VT = Src1.getSimpleValueType();
454       unsigned VMSLTOpcode, VMNANDOpcode;
455       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
456       default:
457         llvm_unreachable("Unexpected LMUL!");
458       case RISCVII::VLMUL::LMUL_F8:
459         VMSLTOpcode =
460             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
461         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
462         break;
463       case RISCVII::VLMUL::LMUL_F4:
464         VMSLTOpcode =
465             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
466         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
467         break;
468       case RISCVII::VLMUL::LMUL_F2:
469         VMSLTOpcode =
470             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
471         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
472         break;
473       case RISCVII::VLMUL::LMUL_1:
474         VMSLTOpcode =
475             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
476         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
477         break;
478       case RISCVII::VLMUL::LMUL_2:
479         VMSLTOpcode =
480             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
481         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
482         break;
483       case RISCVII::VLMUL::LMUL_4:
484         VMSLTOpcode =
485             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
486         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
487         break;
488       case RISCVII::VLMUL::LMUL_8:
489         VMSLTOpcode =
490             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
491         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
492         break;
493       }
494       SDValue SEW = CurDAG->getTargetConstant(
495           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
496       SDValue VL;
497       selectVLOp(Node->getOperand(3), VL);
498 
499       // Expand to
500       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
501       SDValue Cmp = SDValue(
502           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
503           0);
504       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
505                                                {Cmp, Cmp, VL, SEW}));
506       return;
507     }
508     case Intrinsic::riscv_vmsgeu_mask:
509     case Intrinsic::riscv_vmsge_mask: {
510       SDValue Src1 = Node->getOperand(2);
511       SDValue Src2 = Node->getOperand(3);
512       // Only custom select scalar second operand.
513       if (Src2.getValueType() != XLenVT)
514         break;
515       // Small constants are handled with patterns.
516       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
517         int64_t CVal = C->getSExtValue();
518         if (CVal >= -15 && CVal <= 16)
519           break;
520       }
521       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
522       MVT Src1VT = Src1.getSimpleValueType();
523       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode;
524       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
525       default:
526         llvm_unreachable("Unexpected LMUL!");
527       case RISCVII::VLMUL::LMUL_F8:
528         VMSLTOpcode =
529             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
530         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
531                                      : RISCV::PseudoVMSLT_VX_MF8_MASK;
532         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
533         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
534         break;
535       case RISCVII::VLMUL::LMUL_F4:
536         VMSLTOpcode =
537             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
538         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
539                                      : RISCV::PseudoVMSLT_VX_MF4_MASK;
540         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
541         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
542         break;
543       case RISCVII::VLMUL::LMUL_F2:
544         VMSLTOpcode =
545             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
546         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
547                                      : RISCV::PseudoVMSLT_VX_MF2_MASK;
548         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
549         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
550         break;
551       case RISCVII::VLMUL::LMUL_1:
552         VMSLTOpcode =
553             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
554         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
555                                      : RISCV::PseudoVMSLT_VX_M1_MASK;
556         VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
557         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
558         break;
559       case RISCVII::VLMUL::LMUL_2:
560         VMSLTOpcode =
561             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
562         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
563                                      : RISCV::PseudoVMSLT_VX_M2_MASK;
564         VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
565         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
566         break;
567       case RISCVII::VLMUL::LMUL_4:
568         VMSLTOpcode =
569             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
570         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
571                                      : RISCV::PseudoVMSLT_VX_M4_MASK;
572         VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
573         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
574         break;
575       case RISCVII::VLMUL::LMUL_8:
576         VMSLTOpcode =
577             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
578         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
579                                      : RISCV::PseudoVMSLT_VX_M8_MASK;
580         VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
581         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
582         break;
583       }
584       SDValue SEW = CurDAG->getTargetConstant(
585           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
586       SDValue VL;
587       selectVLOp(Node->getOperand(5), VL);
588       SDValue MaskedOff = Node->getOperand(1);
589       SDValue Mask = Node->getOperand(4);
590       // If the MaskedOff value and the Mask are the same value use
591       // vmslt{u}.vx vt, va, x;  vmandnot.mm vd, vd, vt
592       // This avoids needing to copy v0 to vd before starting the next sequence.
593       if (Mask == MaskedOff) {
594         SDValue Cmp = SDValue(
595             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
596             0);
597         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
598                                                  {Mask, Cmp, VL, SEW}));
599         return;
600       }
601 
602       // Otherwise use
603       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
604       SDValue Cmp = SDValue(
605           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
606                                  {MaskedOff, Src1, Src2, Mask, VL, SEW}),
607           0);
608       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
609                                                {Cmp, Mask, VL, SEW}));
610       return;
611     }
612     }
613     break;
614   }
615   case ISD::INTRINSIC_W_CHAIN: {
616     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
617     switch (IntNo) {
618       // By default we do not custom select any intrinsic.
619     default:
620       break;
621 
622     case Intrinsic::riscv_vsetvli:
623     case Intrinsic::riscv_vsetvlimax: {
624       if (!Subtarget->hasStdExtV())
625         break;
626 
627       bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
628       unsigned Offset = VLMax ? 2 : 3;
629 
630       assert(Node->getNumOperands() == Offset + 2 &&
631              "Unexpected number of operands");
632 
633       unsigned SEW =
634           RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
635       RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
636           Node->getConstantOperandVal(Offset + 1) & 0x7);
637 
638       unsigned VTypeI = RISCVVType::encodeVTYPE(
639           VLMul, SEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
640       SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
641 
642       SDValue VLOperand;
643       if (VLMax) {
644         VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
645       } else {
646         VLOperand = Node->getOperand(2);
647 
648         if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
649           uint64_t AVL = C->getZExtValue();
650           if (isUInt<5>(AVL)) {
651             SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
652             ReplaceNode(
653                 Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
654                                              MVT::Other, VLImm, VTypeIOp,
655                                              /* Chain */ Node->getOperand(0)));
656             return;
657           }
658         }
659       }
660 
661       ReplaceNode(Node,
662                   CurDAG->getMachineNode(RISCV::PseudoVSETVLI, DL, XLenVT,
663                                          MVT::Other, VLOperand, VTypeIOp,
664                                          /* Chain */ Node->getOperand(0)));
665       return;
666     }
667     case Intrinsic::riscv_vlseg2:
668     case Intrinsic::riscv_vlseg3:
669     case Intrinsic::riscv_vlseg4:
670     case Intrinsic::riscv_vlseg5:
671     case Intrinsic::riscv_vlseg6:
672     case Intrinsic::riscv_vlseg7:
673     case Intrinsic::riscv_vlseg8: {
674       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
675       return;
676     }
677     case Intrinsic::riscv_vlseg2_mask:
678     case Intrinsic::riscv_vlseg3_mask:
679     case Intrinsic::riscv_vlseg4_mask:
680     case Intrinsic::riscv_vlseg5_mask:
681     case Intrinsic::riscv_vlseg6_mask:
682     case Intrinsic::riscv_vlseg7_mask:
683     case Intrinsic::riscv_vlseg8_mask: {
684       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
685       return;
686     }
687     case Intrinsic::riscv_vlsseg2:
688     case Intrinsic::riscv_vlsseg3:
689     case Intrinsic::riscv_vlsseg4:
690     case Intrinsic::riscv_vlsseg5:
691     case Intrinsic::riscv_vlsseg6:
692     case Intrinsic::riscv_vlsseg7:
693     case Intrinsic::riscv_vlsseg8: {
694       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
695       return;
696     }
697     case Intrinsic::riscv_vlsseg2_mask:
698     case Intrinsic::riscv_vlsseg3_mask:
699     case Intrinsic::riscv_vlsseg4_mask:
700     case Intrinsic::riscv_vlsseg5_mask:
701     case Intrinsic::riscv_vlsseg6_mask:
702     case Intrinsic::riscv_vlsseg7_mask:
703     case Intrinsic::riscv_vlsseg8_mask: {
704       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
705       return;
706     }
707     case Intrinsic::riscv_vloxseg2:
708     case Intrinsic::riscv_vloxseg3:
709     case Intrinsic::riscv_vloxseg4:
710     case Intrinsic::riscv_vloxseg5:
711     case Intrinsic::riscv_vloxseg6:
712     case Intrinsic::riscv_vloxseg7:
713     case Intrinsic::riscv_vloxseg8:
714       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
715       return;
716     case Intrinsic::riscv_vluxseg2:
717     case Intrinsic::riscv_vluxseg3:
718     case Intrinsic::riscv_vluxseg4:
719     case Intrinsic::riscv_vluxseg5:
720     case Intrinsic::riscv_vluxseg6:
721     case Intrinsic::riscv_vluxseg7:
722     case Intrinsic::riscv_vluxseg8:
723       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
724       return;
725     case Intrinsic::riscv_vloxseg2_mask:
726     case Intrinsic::riscv_vloxseg3_mask:
727     case Intrinsic::riscv_vloxseg4_mask:
728     case Intrinsic::riscv_vloxseg5_mask:
729     case Intrinsic::riscv_vloxseg6_mask:
730     case Intrinsic::riscv_vloxseg7_mask:
731     case Intrinsic::riscv_vloxseg8_mask:
732       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
733       return;
734     case Intrinsic::riscv_vluxseg2_mask:
735     case Intrinsic::riscv_vluxseg3_mask:
736     case Intrinsic::riscv_vluxseg4_mask:
737     case Intrinsic::riscv_vluxseg5_mask:
738     case Intrinsic::riscv_vluxseg6_mask:
739     case Intrinsic::riscv_vluxseg7_mask:
740     case Intrinsic::riscv_vluxseg8_mask:
741       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
742       return;
743     case Intrinsic::riscv_vlseg8ff:
744     case Intrinsic::riscv_vlseg7ff:
745     case Intrinsic::riscv_vlseg6ff:
746     case Intrinsic::riscv_vlseg5ff:
747     case Intrinsic::riscv_vlseg4ff:
748     case Intrinsic::riscv_vlseg3ff:
749     case Intrinsic::riscv_vlseg2ff: {
750       selectVLSEGFF(Node, /*IsMasked*/ false);
751       return;
752     }
753     case Intrinsic::riscv_vlseg8ff_mask:
754     case Intrinsic::riscv_vlseg7ff_mask:
755     case Intrinsic::riscv_vlseg6ff_mask:
756     case Intrinsic::riscv_vlseg5ff_mask:
757     case Intrinsic::riscv_vlseg4ff_mask:
758     case Intrinsic::riscv_vlseg3ff_mask:
759     case Intrinsic::riscv_vlseg2ff_mask: {
760       selectVLSEGFF(Node, /*IsMasked*/ true);
761       return;
762     }
763     case Intrinsic::riscv_vloxei:
764     case Intrinsic::riscv_vloxei_mask:
765     case Intrinsic::riscv_vluxei:
766     case Intrinsic::riscv_vluxei_mask: {
767       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
768                       IntNo == Intrinsic::riscv_vluxei_mask;
769       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
770                        IntNo == Intrinsic::riscv_vloxei_mask;
771 
772       MVT VT = Node->getSimpleValueType(0);
773       unsigned ScalarSize = VT.getScalarSizeInBits();
774 
775       unsigned CurOp = 2;
776       SmallVector<SDValue, 8> Operands;
777       if (IsMasked)
778         Operands.push_back(Node->getOperand(CurOp++));
779 
780       MVT IndexVT;
781       addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
782                                  /*IsStridedOrIndexed*/ true, Operands,
783                                  &IndexVT);
784 
785       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
786              "Element count mismatch");
787 
788       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
789       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
790       unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
791       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
792           IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
793           static_cast<unsigned>(IndexLMUL));
794       MachineSDNode *Load =
795           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
796 
797       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
798         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
799 
800       ReplaceNode(Node, Load);
801       return;
802     }
803     case Intrinsic::riscv_vle1:
804     case Intrinsic::riscv_vle:
805     case Intrinsic::riscv_vle_mask:
806     case Intrinsic::riscv_vlse:
807     case Intrinsic::riscv_vlse_mask: {
808       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
809                       IntNo == Intrinsic::riscv_vlse_mask;
810       bool IsStrided =
811           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
812 
813       MVT VT = Node->getSimpleValueType(0);
814       unsigned ScalarSize = VT.getScalarSizeInBits();
815       // VLE1 uses an SEW of 8.
816       unsigned SEW = (IntNo == Intrinsic::riscv_vle1) ? 8 : ScalarSize;
817 
818       unsigned CurOp = 2;
819       SmallVector<SDValue, 8> Operands;
820       if (IsMasked)
821         Operands.push_back(Node->getOperand(CurOp++));
822 
823       addVectorLoadStoreOperands(Node, SEW, DL, CurOp, IsMasked, IsStrided,
824                                  Operands);
825 
826       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
827       const RISCV::VLEPseudo *P =
828           RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, ScalarSize,
829                               static_cast<unsigned>(LMUL));
830       MachineSDNode *Load =
831           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
832 
833       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
834         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
835 
836       ReplaceNode(Node, Load);
837       return;
838     }
839     case Intrinsic::riscv_vleff:
840     case Intrinsic::riscv_vleff_mask: {
841       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
842 
843       MVT VT = Node->getSimpleValueType(0);
844       unsigned ScalarSize = VT.getScalarSizeInBits();
845 
846       unsigned CurOp = 2;
847       SmallVector<SDValue, 7> Operands;
848       if (IsMasked)
849         Operands.push_back(Node->getOperand(CurOp++));
850 
851       addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
852                                  /*IsStridedOrIndexed*/ false, Operands);
853 
854       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
855       const RISCV::VLEPseudo *P =
856           RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true,
857                               ScalarSize, static_cast<unsigned>(LMUL));
858       MachineSDNode *Load =
859           CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
860                                  MVT::Other, MVT::Glue, Operands);
861       SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
862                                               /*Glue*/ SDValue(Load, 2));
863 
864       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
865         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
866 
867       ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
868       ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
869       ReplaceUses(SDValue(Node, 2), SDValue(Load, 1));   // Chain
870       CurDAG->RemoveDeadNode(Node);
871       return;
872     }
873     }
874     break;
875   }
876   case ISD::INTRINSIC_VOID: {
877     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
878     switch (IntNo) {
879     case Intrinsic::riscv_vsseg2:
880     case Intrinsic::riscv_vsseg3:
881     case Intrinsic::riscv_vsseg4:
882     case Intrinsic::riscv_vsseg5:
883     case Intrinsic::riscv_vsseg6:
884     case Intrinsic::riscv_vsseg7:
885     case Intrinsic::riscv_vsseg8: {
886       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
887       return;
888     }
889     case Intrinsic::riscv_vsseg2_mask:
890     case Intrinsic::riscv_vsseg3_mask:
891     case Intrinsic::riscv_vsseg4_mask:
892     case Intrinsic::riscv_vsseg5_mask:
893     case Intrinsic::riscv_vsseg6_mask:
894     case Intrinsic::riscv_vsseg7_mask:
895     case Intrinsic::riscv_vsseg8_mask: {
896       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
897       return;
898     }
899     case Intrinsic::riscv_vssseg2:
900     case Intrinsic::riscv_vssseg3:
901     case Intrinsic::riscv_vssseg4:
902     case Intrinsic::riscv_vssseg5:
903     case Intrinsic::riscv_vssseg6:
904     case Intrinsic::riscv_vssseg7:
905     case Intrinsic::riscv_vssseg8: {
906       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
907       return;
908     }
909     case Intrinsic::riscv_vssseg2_mask:
910     case Intrinsic::riscv_vssseg3_mask:
911     case Intrinsic::riscv_vssseg4_mask:
912     case Intrinsic::riscv_vssseg5_mask:
913     case Intrinsic::riscv_vssseg6_mask:
914     case Intrinsic::riscv_vssseg7_mask:
915     case Intrinsic::riscv_vssseg8_mask: {
916       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
917       return;
918     }
919     case Intrinsic::riscv_vsoxseg2:
920     case Intrinsic::riscv_vsoxseg3:
921     case Intrinsic::riscv_vsoxseg4:
922     case Intrinsic::riscv_vsoxseg5:
923     case Intrinsic::riscv_vsoxseg6:
924     case Intrinsic::riscv_vsoxseg7:
925     case Intrinsic::riscv_vsoxseg8:
926       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
927       return;
928     case Intrinsic::riscv_vsuxseg2:
929     case Intrinsic::riscv_vsuxseg3:
930     case Intrinsic::riscv_vsuxseg4:
931     case Intrinsic::riscv_vsuxseg5:
932     case Intrinsic::riscv_vsuxseg6:
933     case Intrinsic::riscv_vsuxseg7:
934     case Intrinsic::riscv_vsuxseg8:
935       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
936       return;
937     case Intrinsic::riscv_vsoxseg2_mask:
938     case Intrinsic::riscv_vsoxseg3_mask:
939     case Intrinsic::riscv_vsoxseg4_mask:
940     case Intrinsic::riscv_vsoxseg5_mask:
941     case Intrinsic::riscv_vsoxseg6_mask:
942     case Intrinsic::riscv_vsoxseg7_mask:
943     case Intrinsic::riscv_vsoxseg8_mask:
944       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
945       return;
946     case Intrinsic::riscv_vsuxseg2_mask:
947     case Intrinsic::riscv_vsuxseg3_mask:
948     case Intrinsic::riscv_vsuxseg4_mask:
949     case Intrinsic::riscv_vsuxseg5_mask:
950     case Intrinsic::riscv_vsuxseg6_mask:
951     case Intrinsic::riscv_vsuxseg7_mask:
952     case Intrinsic::riscv_vsuxseg8_mask:
953       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
954       return;
955     case Intrinsic::riscv_vsoxei:
956     case Intrinsic::riscv_vsoxei_mask:
957     case Intrinsic::riscv_vsuxei:
958     case Intrinsic::riscv_vsuxei_mask: {
959       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
960                       IntNo == Intrinsic::riscv_vsuxei_mask;
961       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
962                        IntNo == Intrinsic::riscv_vsoxei_mask;
963 
964       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
965       unsigned ScalarSize = VT.getScalarSizeInBits();
966 
967       unsigned CurOp = 2;
968       SmallVector<SDValue, 8> Operands;
969       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
970 
971       MVT IndexVT;
972       addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
973                                  /*IsStridedOrIndexed*/ true, Operands,
974                                  &IndexVT);
975 
976       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
977              "Element count mismatch");
978 
979       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
980       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
981       unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
982       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
983           IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
984           static_cast<unsigned>(IndexLMUL));
985       MachineSDNode *Store =
986           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
987 
988       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
989         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
990 
991       ReplaceNode(Node, Store);
992       return;
993     }
994     case Intrinsic::riscv_vse1:
995     case Intrinsic::riscv_vse:
996     case Intrinsic::riscv_vse_mask:
997     case Intrinsic::riscv_vsse:
998     case Intrinsic::riscv_vsse_mask: {
999       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1000                       IntNo == Intrinsic::riscv_vsse_mask;
1001       bool IsStrided =
1002           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1003 
1004       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1005       unsigned ScalarSize = VT.getScalarSizeInBits();
1006       // VSE1 uses an SEW of 8.
1007       unsigned SEW = (IntNo == Intrinsic::riscv_vse1) ? 8 : ScalarSize;
1008 
1009       unsigned CurOp = 2;
1010       SmallVector<SDValue, 8> Operands;
1011       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1012 
1013       addVectorLoadStoreOperands(Node, SEW, DL, CurOp, IsMasked, IsStrided,
1014                                  Operands);
1015 
1016       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1017       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1018           IsMasked, IsStrided, ScalarSize, static_cast<unsigned>(LMUL));
1019       MachineSDNode *Store =
1020           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1021       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1022         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1023 
1024       ReplaceNode(Node, Store);
1025       return;
1026     }
1027     }
1028     break;
1029   }
1030   case ISD::BITCAST: {
1031     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1032     // Just drop bitcasts between vectors if both are fixed or both are
1033     // scalable.
1034     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1035         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1036       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1037       CurDAG->RemoveDeadNode(Node);
1038       return;
1039     }
1040     break;
1041   }
1042   case ISD::INSERT_SUBVECTOR: {
1043     SDValue V = Node->getOperand(0);
1044     SDValue SubV = Node->getOperand(1);
1045     SDLoc DL(SubV);
1046     auto Idx = Node->getConstantOperandVal(2);
1047     MVT SubVecVT = SubV.getSimpleValueType();
1048 
1049     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1050     MVT SubVecContainerVT = SubVecVT;
1051     // Establish the correct scalable-vector types for any fixed-length type.
1052     if (SubVecVT.isFixedLengthVector())
1053       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1054     if (VT.isFixedLengthVector())
1055       VT = TLI.getContainerForFixedLengthVector(VT);
1056 
1057     const auto *TRI = Subtarget->getRegisterInfo();
1058     unsigned SubRegIdx;
1059     std::tie(SubRegIdx, Idx) =
1060         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1061             VT, SubVecContainerVT, Idx, TRI);
1062 
1063     // If the Idx hasn't been completely eliminated then this is a subvector
1064     // insert which doesn't naturally align to a vector register. These must
1065     // be handled using instructions to manipulate the vector registers.
1066     if (Idx != 0)
1067       break;
1068 
1069     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1070     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1071                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1072                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1073     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1074     assert((!IsSubVecPartReg || V.isUndef()) &&
1075            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1076            "the subvector is smaller than a full-sized register");
1077 
1078     // If we haven't set a SubRegIdx, then we must be going between
1079     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1080     if (SubRegIdx == RISCV::NoSubRegister) {
1081       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1082       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1083                  InRegClassID &&
1084              "Unexpected subvector extraction");
1085       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1086       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1087                                                DL, VT, SubV, RC);
1088       ReplaceNode(Node, NewNode);
1089       return;
1090     }
1091 
1092     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1093     ReplaceNode(Node, Insert.getNode());
1094     return;
1095   }
1096   case ISD::EXTRACT_SUBVECTOR: {
1097     SDValue V = Node->getOperand(0);
1098     auto Idx = Node->getConstantOperandVal(1);
1099     MVT InVT = V.getSimpleValueType();
1100     SDLoc DL(V);
1101 
1102     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1103     MVT SubVecContainerVT = VT;
1104     // Establish the correct scalable-vector types for any fixed-length type.
1105     if (VT.isFixedLengthVector())
1106       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1107     if (InVT.isFixedLengthVector())
1108       InVT = TLI.getContainerForFixedLengthVector(InVT);
1109 
1110     const auto *TRI = Subtarget->getRegisterInfo();
1111     unsigned SubRegIdx;
1112     std::tie(SubRegIdx, Idx) =
1113         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1114             InVT, SubVecContainerVT, Idx, TRI);
1115 
1116     // If the Idx hasn't been completely eliminated then this is a subvector
1117     // extract which doesn't naturally align to a vector register. These must
1118     // be handled using instructions to manipulate the vector registers.
1119     if (Idx != 0)
1120       break;
1121 
1122     // If we haven't set a SubRegIdx, then we must be going between
1123     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1124     if (SubRegIdx == RISCV::NoSubRegister) {
1125       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1126       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1127                  InRegClassID &&
1128              "Unexpected subvector extraction");
1129       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1130       SDNode *NewNode =
1131           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1132       ReplaceNode(Node, NewNode);
1133       return;
1134     }
1135 
1136     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1137     ReplaceNode(Node, Extract.getNode());
1138     return;
1139   }
1140   case RISCVISD::VMV_V_X_VL:
1141   case RISCVISD::VFMV_V_F_VL: {
1142     // Try to match splat of a scalar load to a strided load with stride of x0.
1143     SDValue Src = Node->getOperand(0);
1144     auto *Ld = dyn_cast<LoadSDNode>(Src);
1145     if (!Ld)
1146       break;
1147     EVT MemVT = Ld->getMemoryVT();
1148     // The memory VT should be the same size as the element type.
1149     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1150       break;
1151     if (!IsProfitableToFold(Src, Node, Node) ||
1152         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1153       break;
1154 
1155     SDValue VL;
1156     selectVLOp(Node->getOperand(1), VL);
1157 
1158     unsigned ScalarSize = VT.getScalarSizeInBits();
1159     SDValue SEW = CurDAG->getTargetConstant(Log2_32(ScalarSize), DL, XLenVT);
1160 
1161     SDValue Operands[] = {Ld->getBasePtr(),
1162                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1163                           Ld->getChain()};
1164 
1165     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1166     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1167         /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, ScalarSize,
1168         static_cast<unsigned>(LMUL));
1169     MachineSDNode *Load =
1170         CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1171 
1172     if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1173       CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1174 
1175     ReplaceNode(Node, Load);
1176     return;
1177   }
1178   }
1179 
1180   // Select the default instruction.
1181   SelectCode(Node);
1182 }
1183 
SelectInlineAsmMemoryOperand(const SDValue & Op,unsigned ConstraintID,std::vector<SDValue> & OutOps)1184 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
1185     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1186   switch (ConstraintID) {
1187   case InlineAsm::Constraint_m:
1188     // We just support simple memory operands that have a single address
1189     // operand and need no special handling.
1190     OutOps.push_back(Op);
1191     return false;
1192   case InlineAsm::Constraint_A:
1193     OutOps.push_back(Op);
1194     return false;
1195   default:
1196     break;
1197   }
1198 
1199   return true;
1200 }
1201 
SelectAddrFI(SDValue Addr,SDValue & Base)1202 bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
1203   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1204     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1205     return true;
1206   }
1207   return false;
1208 }
1209 
SelectBaseAddr(SDValue Addr,SDValue & Base)1210 bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
1211   // If this is FrameIndex, select it directly. Otherwise just let it get
1212   // selected to a register independently.
1213   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1214     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1215   else
1216     Base = Addr;
1217   return true;
1218 }
1219 
selectShiftMask(SDValue N,unsigned ShiftWidth,SDValue & ShAmt)1220 bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1221                                         SDValue &ShAmt) {
1222   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1223   // amount. If there is an AND on the shift amount, we can bypass it if it
1224   // doesn't affect any of those bits.
1225   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1226     const APInt &AndMask = N->getConstantOperandAPInt(1);
1227 
1228     // Since the max shift amount is a power of 2 we can subtract 1 to make a
1229     // mask that covers the bits needed to represent all shift amounts.
1230     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1231     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1232 
1233     if (ShMask.isSubsetOf(AndMask)) {
1234       ShAmt = N.getOperand(0);
1235       return true;
1236     }
1237 
1238     // SimplifyDemandedBits may have optimized the mask so try restoring any
1239     // bits that are known zero.
1240     KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1241     if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1242       ShAmt = N.getOperand(0);
1243       return true;
1244     }
1245   }
1246 
1247   ShAmt = N;
1248   return true;
1249 }
1250 
selectSExti32(SDValue N,SDValue & Val)1251 bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
1252   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1253       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1254     Val = N.getOperand(0);
1255     return true;
1256   }
1257   // FIXME: Should we just call computeNumSignBits here?
1258   if (N.getOpcode() == ISD::AssertSext &&
1259       cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32)) {
1260     Val = N;
1261     return true;
1262   }
1263   if (N.getOpcode() == ISD::AssertZext &&
1264       cast<VTSDNode>(N->getOperand(1))->getVT().bitsLT(MVT::i32)) {
1265     Val = N;
1266     return true;
1267   }
1268 
1269   return false;
1270 }
1271 
selectZExti32(SDValue N,SDValue & Val)1272 bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
1273   if (N.getOpcode() == ISD::AND) {
1274     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1275     if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1276       Val = N.getOperand(0);
1277       return true;
1278     }
1279   }
1280   // FIXME: Should we just call computeKnownBits here?
1281   if (N.getOpcode() == ISD::AssertZext &&
1282       cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32)) {
1283     Val = N;
1284     return true;
1285   }
1286 
1287   return false;
1288 }
1289 
1290 // Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
1291 // on RV64).
1292 // SLLIUW is the same as SLLI except for the fact that it clears the bits
1293 // XLEN-1:32 of the input RS1 before shifting.
1294 // A PatFrag has already checked that it has the right structure:
1295 //
1296 //  (AND (SHL RS1, VC2), VC1)
1297 //
1298 // We check that VC2, the shamt is less than 32, otherwise the pattern is
1299 // exactly the same as SLLI and we give priority to that.
1300 // Eventually we check that VC1, the mask used to clear the upper 32 bits
1301 // of RS1, is correct:
1302 //
1303 //  VC1 == (0xFFFFFFFF << VC2)
1304 //
MatchSLLIUW(SDNode * N) const1305 bool RISCVDAGToDAGISel::MatchSLLIUW(SDNode *N) const {
1306   assert(N->getOpcode() == ISD::AND);
1307   assert(N->getOperand(0).getOpcode() == ISD::SHL);
1308   assert(isa<ConstantSDNode>(N->getOperand(1)));
1309   assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
1310 
1311   // The IsRV64 predicate is checked after PatFrag predicates so we can get
1312   // here even on RV32.
1313   if (!Subtarget->is64Bit())
1314     return false;
1315 
1316   SDValue Shl = N->getOperand(0);
1317   uint64_t VC1 = N->getConstantOperandVal(1);
1318   uint64_t VC2 = Shl.getConstantOperandVal(1);
1319 
1320   // Immediate range should be enforced by uimm5 predicate.
1321   assert(VC2 < 32 && "Unexpected immediate");
1322   return (VC1 >> VC2) == UINT64_C(0xFFFFFFFF);
1323 }
1324 
1325 // Select VL as a 5 bit immediate or a value that will become a register. This
1326 // allows us to choose betwen VSETIVLI or VSETVLI later.
selectVLOp(SDValue N,SDValue & VL)1327 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
1328   auto *C = dyn_cast<ConstantSDNode>(N);
1329   if (C && isUInt<5>(C->getZExtValue()))
1330     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1331                                    N->getValueType(0));
1332   else
1333     VL = N;
1334 
1335   return true;
1336 }
1337 
selectVSplat(SDValue N,SDValue & SplatVal)1338 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
1339   if (N.getOpcode() != ISD::SPLAT_VECTOR &&
1340       N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1341       N.getOpcode() != RISCVISD::VMV_V_X_VL)
1342     return false;
1343   SplatVal = N.getOperand(0);
1344   return true;
1345 }
1346 
1347 using ValidateFn = bool (*)(int64_t);
1348 
selectVSplatSimmHelper(SDValue N,SDValue & SplatVal,SelectionDAG & DAG,const RISCVSubtarget & Subtarget,ValidateFn ValidateImm)1349 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1350                                    SelectionDAG &DAG,
1351                                    const RISCVSubtarget &Subtarget,
1352                                    ValidateFn ValidateImm) {
1353   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1354        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1355        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1356       !isa<ConstantSDNode>(N.getOperand(0)))
1357     return false;
1358 
1359   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1360 
1361   // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL
1362   // share semantics when the operand type is wider than the resulting vector
1363   // element type: an implicit truncation first takes place. Therefore, perform
1364   // a manual truncation/sign-extension in order to ignore any truncated bits
1365   // and catch any zero-extended immediate.
1366   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1367   // sign-extending to (XLenVT -1).
1368   MVT XLenVT = Subtarget.getXLenVT();
1369   assert(XLenVT == N.getOperand(0).getSimpleValueType() &&
1370          "Unexpected splat operand type");
1371   MVT EltVT = N.getSimpleValueType().getVectorElementType();
1372   if (EltVT.bitsLT(XLenVT))
1373     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1374 
1375   if (!ValidateImm(SplatImm))
1376     return false;
1377 
1378   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1379   return true;
1380 }
1381 
selectVSplatSimm5(SDValue N,SDValue & SplatVal)1382 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
1383   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1384                                 [](int64_t Imm) { return isInt<5>(Imm); });
1385 }
1386 
selectVSplatSimm5Plus1(SDValue N,SDValue & SplatVal)1387 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
1388   return selectVSplatSimmHelper(
1389       N, SplatVal, *CurDAG, *Subtarget,
1390       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1391 }
1392 
selectVSplatSimm5Plus1NonZero(SDValue N,SDValue & SplatVal)1393 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
1394                                                       SDValue &SplatVal) {
1395   return selectVSplatSimmHelper(
1396       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1397         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1398       });
1399 }
1400 
selectVSplatUimm5(SDValue N,SDValue & SplatVal)1401 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
1402   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1403        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1404        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1405       !isa<ConstantSDNode>(N.getOperand(0)))
1406     return false;
1407 
1408   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1409 
1410   if (!isUInt<5>(SplatImm))
1411     return false;
1412 
1413   SplatVal =
1414       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
1415 
1416   return true;
1417 }
1418 
selectRVVSimm5(SDValue N,unsigned Width,SDValue & Imm)1419 bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
1420                                        SDValue &Imm) {
1421   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
1422     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
1423 
1424     if (!isInt<5>(ImmVal))
1425       return false;
1426 
1427     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
1428     return true;
1429   }
1430 
1431   return false;
1432 }
1433 
1434 // Merge an ADDI into the offset of a load/store instruction where possible.
1435 // (load (addi base, off1), off2) -> (load base, off1+off2)
1436 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
1437 // This is possible when off1+off2 fits a 12-bit immediate.
doPeepholeLoadStoreADDI()1438 void RISCVDAGToDAGISel::doPeepholeLoadStoreADDI() {
1439   SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
1440   ++Position;
1441 
1442   while (Position != CurDAG->allnodes_begin()) {
1443     SDNode *N = &*--Position;
1444     // Skip dead nodes and any non-machine opcodes.
1445     if (N->use_empty() || !N->isMachineOpcode())
1446       continue;
1447 
1448     int OffsetOpIdx;
1449     int BaseOpIdx;
1450 
1451     // Only attempt this optimisation for I-type loads and S-type stores.
1452     switch (N->getMachineOpcode()) {
1453     default:
1454       continue;
1455     case RISCV::LB:
1456     case RISCV::LH:
1457     case RISCV::LW:
1458     case RISCV::LBU:
1459     case RISCV::LHU:
1460     case RISCV::LWU:
1461     case RISCV::LD:
1462     case RISCV::FLH:
1463     case RISCV::FLW:
1464     case RISCV::FLD:
1465       BaseOpIdx = 0;
1466       OffsetOpIdx = 1;
1467       break;
1468     case RISCV::SB:
1469     case RISCV::SH:
1470     case RISCV::SW:
1471     case RISCV::SD:
1472     case RISCV::FSH:
1473     case RISCV::FSW:
1474     case RISCV::FSD:
1475       BaseOpIdx = 1;
1476       OffsetOpIdx = 2;
1477       break;
1478     }
1479 
1480     if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
1481       continue;
1482 
1483     SDValue Base = N->getOperand(BaseOpIdx);
1484 
1485     // If the base is an ADDI, we can merge it in to the load/store.
1486     if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
1487       continue;
1488 
1489     SDValue ImmOperand = Base.getOperand(1);
1490     uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
1491 
1492     if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
1493       int64_t Offset1 = Const->getSExtValue();
1494       int64_t CombinedOffset = Offset1 + Offset2;
1495       if (!isInt<12>(CombinedOffset))
1496         continue;
1497       ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
1498                                              ImmOperand.getValueType());
1499     } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
1500       // If the off1 in (addi base, off1) is a global variable's address (its
1501       // low part, really), then we can rely on the alignment of that variable
1502       // to provide a margin of safety before off1 can overflow the 12 bits.
1503       // Check if off2 falls within that margin; if so off1+off2 can't overflow.
1504       const DataLayout &DL = CurDAG->getDataLayout();
1505       Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
1506       if (Offset2 != 0 && Alignment <= Offset2)
1507         continue;
1508       int64_t Offset1 = GA->getOffset();
1509       int64_t CombinedOffset = Offset1 + Offset2;
1510       ImmOperand = CurDAG->getTargetGlobalAddress(
1511           GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
1512           CombinedOffset, GA->getTargetFlags());
1513     } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
1514       // Ditto.
1515       Align Alignment = CP->getAlign();
1516       if (Offset2 != 0 && Alignment <= Offset2)
1517         continue;
1518       int64_t Offset1 = CP->getOffset();
1519       int64_t CombinedOffset = Offset1 + Offset2;
1520       ImmOperand = CurDAG->getTargetConstantPool(
1521           CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
1522           CombinedOffset, CP->getTargetFlags());
1523     } else {
1524       continue;
1525     }
1526 
1527     LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase:    ");
1528     LLVM_DEBUG(Base->dump(CurDAG));
1529     LLVM_DEBUG(dbgs() << "\nN: ");
1530     LLVM_DEBUG(N->dump(CurDAG));
1531     LLVM_DEBUG(dbgs() << "\n");
1532 
1533     // Modify the offset operand of the load/store.
1534     if (BaseOpIdx == 0) // Load
1535       CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
1536                                  N->getOperand(2));
1537     else // Store
1538       CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
1539                                  ImmOperand, N->getOperand(3));
1540 
1541     // The add-immediate may now be dead, in which case remove it.
1542     if (Base.getNode()->use_empty())
1543       CurDAG->RemoveDeadNode(Base.getNode());
1544   }
1545 }
1546 
1547 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
1548 // for instruction scheduling.
createRISCVISelDag(RISCVTargetMachine & TM)1549 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
1550   return new RISCVDAGToDAGISel(TM);
1551 }
1552