xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (revision 0eae32dcef82f6f06de6419a0d623d7def0cc8f6)
1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
14 #include "MCTargetDesc/RISCVMCTargetDesc.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCVISelLowering.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #include "RISCVGenSearchableTables.inc"
41 } // namespace RISCV
42 } // namespace llvm
43 
44 void RISCVDAGToDAGISel::PreprocessISelDAG() {
45   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
46                                        E = CurDAG->allnodes_end();
47        I != E;) {
48     SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
49 
50     // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
51     // load. Done after lowering and combining so that we have a chance to
52     // optimize this to VMV_V_X_VL when the upper bits aren't needed.
53     if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
54       continue;
55 
56     assert(N->getNumOperands() == 3 && "Unexpected number of operands");
57     MVT VT = N->getSimpleValueType(0);
58     SDValue Lo = N->getOperand(0);
59     SDValue Hi = N->getOperand(1);
60     SDValue VL = N->getOperand(2);
61     assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
62            Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
63            "Unexpected VTs!");
64     MachineFunction &MF = CurDAG->getMachineFunction();
65     RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
66     SDLoc DL(N);
67 
68     // We use the same frame index we use for moving two i32s into 64-bit FPR.
69     // This is an analogous operation.
70     int FI = FuncInfo->getMoveF64FrameIndex(MF);
71     MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
72     const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
73     SDValue StackSlot =
74         CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
75 
76     SDValue Chain = CurDAG->getEntryNode();
77     Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
78 
79     SDValue OffsetSlot =
80         CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
81     Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
82                           Align(8));
83 
84     Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
85 
86     SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
87     SDValue IntID =
88         CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
89     SDValue Ops[] = {Chain, IntID, StackSlot,
90                      CurDAG->getRegister(RISCV::X0, MVT::i64), VL};
91 
92     SDValue Result = CurDAG->getMemIntrinsicNode(
93         ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
94         MachineMemOperand::MOLoad);
95 
96     // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
97     // vlse we created.  This will cause general havok on the dag because
98     // anything below the conversion could be folded into other existing nodes.
99     // To avoid invalidating 'I', back it up to the convert node.
100     --I;
101     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
102 
103     // Now that we did that, the node is dead.  Increment the iterator to the
104     // next node to process, then delete N.
105     ++I;
106     CurDAG->DeleteNode(N);
107   }
108 }
109 
110 void RISCVDAGToDAGISel::PostprocessISelDAG() {
111   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
112 
113   bool MadeChange = false;
114   while (Position != CurDAG->allnodes_begin()) {
115     SDNode *N = &*--Position;
116     // Skip dead nodes and any non-machine opcodes.
117     if (N->use_empty() || !N->isMachineOpcode())
118       continue;
119 
120     MadeChange |= doPeepholeSExtW(N);
121     MadeChange |= doPeepholeLoadStoreADDI(N);
122   }
123 
124   if (MadeChange)
125     CurDAG->RemoveDeadNodes();
126 }
127 
128 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
129                          const RISCVSubtarget &Subtarget) {
130   MVT XLenVT = Subtarget.getXLenVT();
131   RISCVMatInt::InstSeq Seq =
132       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
133 
134   SDNode *Result = nullptr;
135   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
136   for (RISCVMatInt::Inst &Inst : Seq) {
137     SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
138     if (Inst.Opc == RISCV::LUI)
139       Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
140     else if (Inst.Opc == RISCV::ADDUW)
141       Result = CurDAG->getMachineNode(RISCV::ADDUW, DL, XLenVT, SrcReg,
142                                       CurDAG->getRegister(RISCV::X0, XLenVT));
143     else if (Inst.Opc == RISCV::SH1ADD || Inst.Opc == RISCV::SH2ADD ||
144              Inst.Opc == RISCV::SH3ADD)
145       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SrcReg);
146     else
147       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
148 
149     // Only the first instruction has X0 as its source.
150     SrcReg = SDValue(Result, 0);
151   }
152 
153   return Result;
154 }
155 
156 static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
157                                unsigned RegClassID, unsigned SubReg0) {
158   assert(Regs.size() >= 2 && Regs.size() <= 8);
159 
160   SDLoc DL(Regs[0]);
161   SmallVector<SDValue, 8> Ops;
162 
163   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
164 
165   for (unsigned I = 0; I < Regs.size(); ++I) {
166     Ops.push_back(Regs[I]);
167     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
168   }
169   SDNode *N =
170       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
171   return SDValue(N, 0);
172 }
173 
174 static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
175                              unsigned NF) {
176   static const unsigned RegClassIDs[] = {
177       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
178       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
179       RISCV::VRN8M1RegClassID};
180 
181   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
182 }
183 
184 static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
185                              unsigned NF) {
186   static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
187                                          RISCV::VRN3M2RegClassID,
188                                          RISCV::VRN4M2RegClassID};
189 
190   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
191 }
192 
193 static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
194                              unsigned NF) {
195   return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
196                          RISCV::sub_vrm4_0);
197 }
198 
199 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
200                            unsigned NF, RISCVII::VLMUL LMUL) {
201   switch (LMUL) {
202   default:
203     llvm_unreachable("Invalid LMUL.");
204   case RISCVII::VLMUL::LMUL_F8:
205   case RISCVII::VLMUL::LMUL_F4:
206   case RISCVII::VLMUL::LMUL_F2:
207   case RISCVII::VLMUL::LMUL_1:
208     return createM1Tuple(CurDAG, Regs, NF);
209   case RISCVII::VLMUL::LMUL_2:
210     return createM2Tuple(CurDAG, Regs, NF);
211   case RISCVII::VLMUL::LMUL_4:
212     return createM4Tuple(CurDAG, Regs, NF);
213   }
214 }
215 
216 void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
217     SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
218     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
219     bool IsLoad, MVT *IndexVT) {
220   SDValue Chain = Node->getOperand(0);
221   SDValue Glue;
222 
223   SDValue Base;
224   SelectBaseAddr(Node->getOperand(CurOp++), Base);
225   Operands.push_back(Base); // Base pointer.
226 
227   if (IsStridedOrIndexed) {
228     Operands.push_back(Node->getOperand(CurOp++)); // Index.
229     if (IndexVT)
230       *IndexVT = Operands.back()->getSimpleValueType(0);
231   }
232 
233   if (IsMasked) {
234     // Mask needs to be copied to V0.
235     SDValue Mask = Node->getOperand(CurOp++);
236     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
237     Glue = Chain.getValue(1);
238     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
239   }
240   SDValue VL;
241   selectVLOp(Node->getOperand(CurOp++), VL);
242   Operands.push_back(VL);
243 
244   MVT XLenVT = Subtarget->getXLenVT();
245   SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
246   Operands.push_back(SEWOp);
247 
248   // Masked load has the tail policy argument.
249   if (IsMasked && IsLoad) {
250     // Policy must be a constant.
251     uint64_t Policy = Node->getConstantOperandVal(CurOp++);
252     SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
253     Operands.push_back(PolicyOp);
254   }
255 
256   Operands.push_back(Chain); // Chain.
257   if (Glue)
258     Operands.push_back(Glue);
259 }
260 
261 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
262                                     bool IsStrided) {
263   SDLoc DL(Node);
264   unsigned NF = Node->getNumValues() - 1;
265   MVT VT = Node->getSimpleValueType(0);
266   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
267   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
268 
269   unsigned CurOp = 2;
270   SmallVector<SDValue, 8> Operands;
271   if (IsMasked) {
272     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
273                                  Node->op_begin() + CurOp + NF);
274     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
275     Operands.push_back(MaskedOff);
276     CurOp += NF;
277   }
278 
279   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
280                              Operands, /*IsLoad=*/true);
281 
282   const RISCV::VLSEGPseudo *P =
283       RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW,
284                             static_cast<unsigned>(LMUL));
285   MachineSDNode *Load =
286       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
287 
288   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
289     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
290 
291   SDValue SuperReg = SDValue(Load, 0);
292   for (unsigned I = 0; I < NF; ++I) {
293     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
294     ReplaceUses(SDValue(Node, I),
295                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
296   }
297 
298   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
299   CurDAG->RemoveDeadNode(Node);
300 }
301 
302 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
303   SDLoc DL(Node);
304   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
305   MVT VT = Node->getSimpleValueType(0);
306   MVT XLenVT = Subtarget->getXLenVT();
307   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
308   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
309 
310   unsigned CurOp = 2;
311   SmallVector<SDValue, 7> Operands;
312   if (IsMasked) {
313     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
314                                  Node->op_begin() + CurOp + NF);
315     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
316     Operands.push_back(MaskedOff);
317     CurOp += NF;
318   }
319 
320   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
321                              /*IsStridedOrIndexed*/ false, Operands,
322                              /*IsLoad=*/true);
323 
324   const RISCV::VLSEGPseudo *P =
325       RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
326                             Log2SEW, static_cast<unsigned>(LMUL));
327   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
328                                                MVT::Other, MVT::Glue, Operands);
329   SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
330                                           /*Glue*/ SDValue(Load, 2));
331 
332   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
333     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
334 
335   SDValue SuperReg = SDValue(Load, 0);
336   for (unsigned I = 0; I < NF; ++I) {
337     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
338     ReplaceUses(SDValue(Node, I),
339                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
340   }
341 
342   ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0));   // VL
343   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
344   CurDAG->RemoveDeadNode(Node);
345 }
346 
347 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
348                                      bool IsOrdered) {
349   SDLoc DL(Node);
350   unsigned NF = Node->getNumValues() - 1;
351   MVT VT = Node->getSimpleValueType(0);
352   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
353   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
354 
355   unsigned CurOp = 2;
356   SmallVector<SDValue, 8> Operands;
357   if (IsMasked) {
358     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
359                                  Node->op_begin() + CurOp + NF);
360     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
361     Operands.push_back(MaskedOff);
362     CurOp += NF;
363   }
364 
365   MVT IndexVT;
366   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
367                              /*IsStridedOrIndexed*/ true, Operands,
368                              /*IsLoad=*/true, &IndexVT);
369 
370   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
371          "Element count mismatch");
372 
373   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
374   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
375   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
376       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
377       static_cast<unsigned>(IndexLMUL));
378   MachineSDNode *Load =
379       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
380 
381   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
382     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
383 
384   SDValue SuperReg = SDValue(Load, 0);
385   for (unsigned I = 0; I < NF; ++I) {
386     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
387     ReplaceUses(SDValue(Node, I),
388                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
389   }
390 
391   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
392   CurDAG->RemoveDeadNode(Node);
393 }
394 
395 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
396                                     bool IsStrided) {
397   SDLoc DL(Node);
398   unsigned NF = Node->getNumOperands() - 4;
399   if (IsStrided)
400     NF--;
401   if (IsMasked)
402     NF--;
403   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
404   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
405   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
406   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
407   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
408 
409   SmallVector<SDValue, 8> Operands;
410   Operands.push_back(StoreVal);
411   unsigned CurOp = 2 + NF;
412 
413   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
414                              Operands);
415 
416   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
417       NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
418   MachineSDNode *Store =
419       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
420 
421   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
422     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
423 
424   ReplaceNode(Node, Store);
425 }
426 
427 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
428                                      bool IsOrdered) {
429   SDLoc DL(Node);
430   unsigned NF = Node->getNumOperands() - 5;
431   if (IsMasked)
432     --NF;
433   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
434   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
435   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
436   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
437   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
438 
439   SmallVector<SDValue, 8> Operands;
440   Operands.push_back(StoreVal);
441   unsigned CurOp = 2 + NF;
442 
443   MVT IndexVT;
444   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
445                              /*IsStridedOrIndexed*/ true, Operands,
446                              /*IsLoad=*/false, &IndexVT);
447 
448   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
449          "Element count mismatch");
450 
451   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
452   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
453   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
454       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
455       static_cast<unsigned>(IndexLMUL));
456   MachineSDNode *Store =
457       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
458 
459   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
460     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
461 
462   ReplaceNode(Node, Store);
463 }
464 
465 
466 void RISCVDAGToDAGISel::Select(SDNode *Node) {
467   // If we have a custom node, we have already selected.
468   if (Node->isMachineOpcode()) {
469     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
470     Node->setNodeId(-1);
471     return;
472   }
473 
474   // Instruction Selection not handled by the auto-generated tablegen selection
475   // should be handled here.
476   unsigned Opcode = Node->getOpcode();
477   MVT XLenVT = Subtarget->getXLenVT();
478   SDLoc DL(Node);
479   MVT VT = Node->getSimpleValueType(0);
480 
481   switch (Opcode) {
482   case ISD::Constant: {
483     auto *ConstNode = cast<ConstantSDNode>(Node);
484     if (VT == XLenVT && ConstNode->isZero()) {
485       SDValue New =
486           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
487       ReplaceNode(Node, New.getNode());
488       return;
489     }
490     int64_t Imm = ConstNode->getSExtValue();
491     // If the upper XLen-16 bits are not used, try to convert this to a simm12
492     // by sign extending bit 15.
493     if (isUInt<16>(Imm) && isInt<12>(SignExtend64(Imm, 16)) &&
494         hasAllHUsers(Node))
495       Imm = SignExtend64(Imm, 16);
496     // If the upper 32-bits are not used try to convert this into a simm32 by
497     // sign extending bit 32.
498     if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
499       Imm = SignExtend64(Imm, 32);
500 
501     ReplaceNode(Node, selectImm(CurDAG, DL, Imm, *Subtarget));
502     return;
503   }
504   case ISD::FrameIndex: {
505     SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
506     int FI = cast<FrameIndexSDNode>(Node)->getIndex();
507     SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
508     ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
509     return;
510   }
511   case ISD::SRL: {
512     // We don't need this transform if zext.h is supported.
513     if (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())
514       break;
515     // Optimize (srl (and X, 0xffff), C) ->
516     //          (srli (slli X, (XLen-16), (XLen-16) + C)
517     // Taking into account that the 0xffff may have had lower bits unset by
518     // SimplifyDemandedBits. This avoids materializing the 0xffff immediate.
519     // This pattern occurs when type legalizing i16 right shifts.
520     // FIXME: This could be extended to other AND masks.
521     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
522     if (N1C) {
523       uint64_t ShAmt = N1C->getZExtValue();
524       SDValue N0 = Node->getOperand(0);
525       if (ShAmt < 16 && N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
526           isa<ConstantSDNode>(N0.getOperand(1))) {
527         uint64_t Mask = N0.getConstantOperandVal(1);
528         Mask |= maskTrailingOnes<uint64_t>(ShAmt);
529         if (Mask == 0xffff) {
530           unsigned LShAmt = Subtarget->getXLen() - 16;
531           SDNode *SLLI =
532               CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
533                                      CurDAG->getTargetConstant(LShAmt, DL, VT));
534           SDNode *SRLI = CurDAG->getMachineNode(
535               RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
536               CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
537           ReplaceNode(Node, SRLI);
538           return;
539         }
540       }
541     }
542 
543     break;
544   }
545   case ISD::AND: {
546     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
547     if (!N1C)
548       break;
549 
550     SDValue N0 = Node->getOperand(0);
551 
552     bool LeftShift = N0.getOpcode() == ISD::SHL;
553     if (!LeftShift && N0.getOpcode() != ISD::SRL)
554       break;
555 
556     auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
557     if (!C)
558       break;
559     uint64_t C2 = C->getZExtValue();
560     unsigned XLen = Subtarget->getXLen();
561     if (!C2 || C2 >= XLen)
562       break;
563 
564     uint64_t C1 = N1C->getZExtValue();
565 
566     // Keep track of whether this is a andi, zext.h, or zext.w.
567     bool ZExtOrANDI = isInt<12>(N1C->getSExtValue());
568     if (C1 == UINT64_C(0xFFFF) &&
569         (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp()))
570       ZExtOrANDI = true;
571     if (C1 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba())
572       ZExtOrANDI = true;
573 
574     // Clear irrelevant bits in the mask.
575     if (LeftShift)
576       C1 &= maskTrailingZeros<uint64_t>(C2);
577     else
578       C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
579 
580     // Some transforms should only be done if the shift has a single use or
581     // the AND would become (srli (slli X, 32), 32)
582     bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
583 
584     SDValue X = N0.getOperand(0);
585 
586     // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
587     // with c3 leading zeros.
588     if (!LeftShift && isMask_64(C1)) {
589       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
590       if (C2 < C3) {
591         // If the number of leading zeros is C2+32 this can be SRLIW.
592         if (C2 + 32 == C3) {
593           SDNode *SRLIW =
594               CurDAG->getMachineNode(RISCV::SRLIW, DL, XLenVT, X,
595                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
596           ReplaceNode(Node, SRLIW);
597           return;
598         }
599 
600         // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
601         // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
602         //
603         // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
604         // legalized and goes through DAG combine.
605         SDValue Y;
606         if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
607             selectSExti32(X, Y)) {
608           SDNode *SRAIW =
609               CurDAG->getMachineNode(RISCV::SRAIW, DL, XLenVT, Y,
610                                      CurDAG->getTargetConstant(31, DL, XLenVT));
611           SDNode *SRLIW = CurDAG->getMachineNode(
612               RISCV::SRLIW, DL, XLenVT, SDValue(SRAIW, 0),
613               CurDAG->getTargetConstant(C3 - 32, DL, XLenVT));
614           ReplaceNode(Node, SRLIW);
615           return;
616         }
617 
618         // (srli (slli x, c3-c2), c3).
619         if (OneUseOrZExtW && !ZExtOrANDI) {
620           SDNode *SLLI = CurDAG->getMachineNode(
621               RISCV::SLLI, DL, XLenVT, X,
622               CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
623           SDNode *SRLI =
624               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
625                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
626           ReplaceNode(Node, SRLI);
627           return;
628         }
629       }
630     }
631 
632     // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
633     // shifted by c2 bits with c3 leading zeros.
634     if (LeftShift && isShiftedMask_64(C1)) {
635       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
636 
637       if (C2 + C3 < XLen &&
638           C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
639         // Use slli.uw when possible.
640         if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
641           SDNode *SLLIUW =
642               CurDAG->getMachineNode(RISCV::SLLIUW, DL, XLenVT, X,
643                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
644           ReplaceNode(Node, SLLIUW);
645           return;
646         }
647 
648         // (srli (slli c2+c3), c3)
649         if (OneUseOrZExtW && !ZExtOrANDI) {
650           SDNode *SLLI = CurDAG->getMachineNode(
651               RISCV::SLLI, DL, XLenVT, X,
652               CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
653           SDNode *SRLI =
654               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
655                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
656           ReplaceNode(Node, SRLI);
657           return;
658         }
659       }
660     }
661 
662     // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
663     // shifted mask with c2 leading zeros and c3 trailing zeros.
664     if (!LeftShift && isShiftedMask_64(C1)) {
665       uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
666       uint64_t C3 = countTrailingZeros(C1);
667       if (Leading == C2 && C2 + C3 < XLen && OneUseOrZExtW && !ZExtOrANDI) {
668         SDNode *SRLI = CurDAG->getMachineNode(
669             RISCV::SRLI, DL, XLenVT, X,
670             CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
671         SDNode *SLLI =
672             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
673                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
674         ReplaceNode(Node, SLLI);
675         return;
676       }
677       // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
678       if (Leading > 32 && (Leading - 32) == C2 && C2 + C3 < 32 &&
679           OneUseOrZExtW && !ZExtOrANDI) {
680         SDNode *SRLIW = CurDAG->getMachineNode(
681             RISCV::SRLIW, DL, XLenVT, X,
682             CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
683         SDNode *SLLI =
684             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
685                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
686         ReplaceNode(Node, SLLI);
687         return;
688       }
689     }
690 
691     // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
692     // shifted mask with no leading zeros and c3 trailing zeros.
693     if (LeftShift && isShiftedMask_64(C1)) {
694       uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
695       uint64_t C3 = countTrailingZeros(C1);
696       if (Leading == 0 && C2 < C3 && OneUseOrZExtW && !ZExtOrANDI) {
697         SDNode *SRLI = CurDAG->getMachineNode(
698             RISCV::SRLI, DL, XLenVT, X,
699             CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
700         SDNode *SLLI =
701             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLI, 0),
702                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
703         ReplaceNode(Node, SLLI);
704         return;
705       }
706       // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
707       if (C2 < C3 && Leading + C2 == 32 && OneUseOrZExtW && !ZExtOrANDI) {
708         SDNode *SRLIW = CurDAG->getMachineNode(
709             RISCV::SRLIW, DL, XLenVT, X,
710             CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
711         SDNode *SLLI =
712             CurDAG->getMachineNode(RISCV::SLLI, DL, XLenVT, SDValue(SRLIW, 0),
713                                    CurDAG->getTargetConstant(C3, DL, XLenVT));
714         ReplaceNode(Node, SLLI);
715         return;
716       }
717     }
718 
719     break;
720   }
721   case ISD::MUL: {
722     // Special case for calculating (mul (and X, C2), C1) where the full product
723     // fits in XLen bits. We can shift X left by the number of leading zeros in
724     // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
725     // product has XLen trailing zeros, putting it in the output of MULHU. This
726     // can avoid materializing a constant in a register for C2.
727 
728     // RHS should be a constant.
729     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
730     if (!N1C || !N1C->hasOneUse())
731       break;
732 
733     // LHS should be an AND with constant.
734     SDValue N0 = Node->getOperand(0);
735     if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
736       break;
737 
738     uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
739 
740     // Constant should be a mask.
741     if (!isMask_64(C2))
742       break;
743 
744     // This should be the only use of the AND unless we will use
745     // (SRLI (SLLI X, 32), 32). We don't use a shift pair for other AND
746     // constants.
747     if (!N0.hasOneUse() && C2 != UINT64_C(0xFFFFFFFF))
748       break;
749 
750     // If this can be an ANDI, ZEXT.H or ZEXT.W we don't need to do this
751     // optimization.
752     if (isInt<12>(C2) ||
753         (C2 == UINT64_C(0xFFFF) &&
754          (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())) ||
755         (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba()))
756       break;
757 
758     // We need to shift left the AND input and C1 by a total of XLen bits.
759 
760     // How far left do we need to shift the AND input?
761     unsigned XLen = Subtarget->getXLen();
762     unsigned LeadingZeros = XLen - (64 - countLeadingZeros(C2));
763 
764     // The constant gets shifted by the remaining amount unless that would
765     // shift bits out.
766     uint64_t C1 = N1C->getZExtValue();
767     unsigned ConstantShift = XLen - LeadingZeros;
768     if (ConstantShift > (XLen - (64 - countLeadingZeros(C1))))
769       break;
770 
771     uint64_t ShiftedC1 = C1 << ConstantShift;
772     // If this RV32, we need to sign extend the constant.
773     if (XLen == 32)
774       ShiftedC1 = SignExtend64(ShiftedC1, 32);
775 
776     // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
777     SDNode *Imm = selectImm(CurDAG, DL, ShiftedC1, *Subtarget);
778     SDNode *SLLI =
779         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
780                                CurDAG->getTargetConstant(LeadingZeros, DL, VT));
781     SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
782                                            SDValue(SLLI, 0), SDValue(Imm, 0));
783     ReplaceNode(Node, MULHU);
784     return;
785   }
786   case ISD::INTRINSIC_WO_CHAIN: {
787     unsigned IntNo = Node->getConstantOperandVal(0);
788     switch (IntNo) {
789       // By default we do not custom select any intrinsic.
790     default:
791       break;
792     case Intrinsic::riscv_vmsgeu:
793     case Intrinsic::riscv_vmsge: {
794       SDValue Src1 = Node->getOperand(1);
795       SDValue Src2 = Node->getOperand(2);
796       // Only custom select scalar second operand.
797       if (Src2.getValueType() != XLenVT)
798         break;
799       // Small constants are handled with patterns.
800       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
801         int64_t CVal = C->getSExtValue();
802         if (CVal >= -15 && CVal <= 16)
803           break;
804       }
805       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
806       MVT Src1VT = Src1.getSimpleValueType();
807       unsigned VMSLTOpcode, VMNANDOpcode;
808       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
809       default:
810         llvm_unreachable("Unexpected LMUL!");
811       case RISCVII::VLMUL::LMUL_F8:
812         VMSLTOpcode =
813             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
814         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
815         break;
816       case RISCVII::VLMUL::LMUL_F4:
817         VMSLTOpcode =
818             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
819         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
820         break;
821       case RISCVII::VLMUL::LMUL_F2:
822         VMSLTOpcode =
823             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
824         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
825         break;
826       case RISCVII::VLMUL::LMUL_1:
827         VMSLTOpcode =
828             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
829         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
830         break;
831       case RISCVII::VLMUL::LMUL_2:
832         VMSLTOpcode =
833             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
834         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
835         break;
836       case RISCVII::VLMUL::LMUL_4:
837         VMSLTOpcode =
838             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
839         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
840         break;
841       case RISCVII::VLMUL::LMUL_8:
842         VMSLTOpcode =
843             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
844         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
845         break;
846       }
847       SDValue SEW = CurDAG->getTargetConstant(
848           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
849       SDValue VL;
850       selectVLOp(Node->getOperand(3), VL);
851 
852       // Expand to
853       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
854       SDValue Cmp = SDValue(
855           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
856           0);
857       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
858                                                {Cmp, Cmp, VL, SEW}));
859       return;
860     }
861     case Intrinsic::riscv_vmsgeu_mask:
862     case Intrinsic::riscv_vmsge_mask: {
863       SDValue Src1 = Node->getOperand(2);
864       SDValue Src2 = Node->getOperand(3);
865       // Only custom select scalar second operand.
866       if (Src2.getValueType() != XLenVT)
867         break;
868       // Small constants are handled with patterns.
869       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
870         int64_t CVal = C->getSExtValue();
871         if (CVal >= -15 && CVal <= 16)
872           break;
873       }
874       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
875       MVT Src1VT = Src1.getSimpleValueType();
876       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode;
877       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
878       default:
879         llvm_unreachable("Unexpected LMUL!");
880       case RISCVII::VLMUL::LMUL_F8:
881         VMSLTOpcode =
882             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
883         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
884                                      : RISCV::PseudoVMSLT_VX_MF8_MASK;
885         break;
886       case RISCVII::VLMUL::LMUL_F4:
887         VMSLTOpcode =
888             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
889         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
890                                      : RISCV::PseudoVMSLT_VX_MF4_MASK;
891         break;
892       case RISCVII::VLMUL::LMUL_F2:
893         VMSLTOpcode =
894             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
895         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
896                                      : RISCV::PseudoVMSLT_VX_MF2_MASK;
897         break;
898       case RISCVII::VLMUL::LMUL_1:
899         VMSLTOpcode =
900             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
901         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
902                                      : RISCV::PseudoVMSLT_VX_M1_MASK;
903         break;
904       case RISCVII::VLMUL::LMUL_2:
905         VMSLTOpcode =
906             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
907         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
908                                      : RISCV::PseudoVMSLT_VX_M2_MASK;
909         break;
910       case RISCVII::VLMUL::LMUL_4:
911         VMSLTOpcode =
912             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
913         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
914                                      : RISCV::PseudoVMSLT_VX_M4_MASK;
915         break;
916       case RISCVII::VLMUL::LMUL_8:
917         VMSLTOpcode =
918             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
919         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
920                                      : RISCV::PseudoVMSLT_VX_M8_MASK;
921         break;
922       }
923       // Mask operations use the LMUL from the mask type.
924       switch (RISCVTargetLowering::getLMUL(VT)) {
925       default:
926         llvm_unreachable("Unexpected LMUL!");
927       case RISCVII::VLMUL::LMUL_F8:
928         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
929         VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF8;
930         break;
931       case RISCVII::VLMUL::LMUL_F4:
932         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
933         VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF4;
934         break;
935       case RISCVII::VLMUL::LMUL_F2:
936         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
937         VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF2;
938         break;
939       case RISCVII::VLMUL::LMUL_1:
940         VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
941         VMANDNOpcode = RISCV::PseudoVMANDN_MM_M1;
942         break;
943       case RISCVII::VLMUL::LMUL_2:
944         VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
945         VMANDNOpcode = RISCV::PseudoVMANDN_MM_M2;
946         break;
947       case RISCVII::VLMUL::LMUL_4:
948         VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
949         VMANDNOpcode = RISCV::PseudoVMANDN_MM_M4;
950         break;
951       case RISCVII::VLMUL::LMUL_8:
952         VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
953         VMANDNOpcode = RISCV::PseudoVMANDN_MM_M8;
954         break;
955       }
956       SDValue SEW = CurDAG->getTargetConstant(
957           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
958       SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
959       SDValue VL;
960       selectVLOp(Node->getOperand(5), VL);
961       SDValue MaskedOff = Node->getOperand(1);
962       SDValue Mask = Node->getOperand(4);
963       // If the MaskedOff value and the Mask are the same value use
964       // vmslt{u}.vx vt, va, x;  vmandn.mm vd, vd, vt
965       // This avoids needing to copy v0 to vd before starting the next sequence.
966       if (Mask == MaskedOff) {
967         SDValue Cmp = SDValue(
968             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
969             0);
970         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
971                                                  {Mask, Cmp, VL, MaskSEW}));
972         return;
973       }
974 
975       // Mask needs to be copied to V0.
976       SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
977                                            RISCV::V0, Mask, SDValue());
978       SDValue Glue = Chain.getValue(1);
979       SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
980 
981       // Otherwise use
982       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
983       SDValue Cmp = SDValue(
984           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
985                                  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
986           0);
987       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
988                                                {Cmp, Mask, VL, MaskSEW}));
989       return;
990     }
991     }
992     break;
993   }
994   case ISD::INTRINSIC_W_CHAIN: {
995     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
996     switch (IntNo) {
997       // By default we do not custom select any intrinsic.
998     default:
999       break;
1000 
1001     case Intrinsic::riscv_vsetvli:
1002     case Intrinsic::riscv_vsetvlimax: {
1003       if (!Subtarget->hasVInstructions())
1004         break;
1005 
1006       bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
1007       unsigned Offset = VLMax ? 2 : 3;
1008 
1009       assert(Node->getNumOperands() == Offset + 2 &&
1010              "Unexpected number of operands");
1011 
1012       unsigned SEW =
1013           RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
1014       RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
1015           Node->getConstantOperandVal(Offset + 1) & 0x7);
1016 
1017       unsigned VTypeI = RISCVVType::encodeVTYPE(
1018           VLMul, SEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
1019       SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
1020 
1021       SDValue VLOperand;
1022       unsigned Opcode = RISCV::PseudoVSETVLI;
1023       if (VLMax) {
1024         VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
1025         Opcode = RISCV::PseudoVSETVLIX0;
1026       } else {
1027         VLOperand = Node->getOperand(2);
1028 
1029         if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
1030           uint64_t AVL = C->getZExtValue();
1031           if (isUInt<5>(AVL)) {
1032             SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
1033             ReplaceNode(
1034                 Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
1035                                              MVT::Other, VLImm, VTypeIOp,
1036                                              /* Chain */ Node->getOperand(0)));
1037             return;
1038           }
1039         }
1040       }
1041 
1042       ReplaceNode(Node,
1043                   CurDAG->getMachineNode(Opcode, DL, XLenVT,
1044                                          MVT::Other, VLOperand, VTypeIOp,
1045                                          /* Chain */ Node->getOperand(0)));
1046       return;
1047     }
1048     case Intrinsic::riscv_vlseg2:
1049     case Intrinsic::riscv_vlseg3:
1050     case Intrinsic::riscv_vlseg4:
1051     case Intrinsic::riscv_vlseg5:
1052     case Intrinsic::riscv_vlseg6:
1053     case Intrinsic::riscv_vlseg7:
1054     case Intrinsic::riscv_vlseg8: {
1055       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1056       return;
1057     }
1058     case Intrinsic::riscv_vlseg2_mask:
1059     case Intrinsic::riscv_vlseg3_mask:
1060     case Intrinsic::riscv_vlseg4_mask:
1061     case Intrinsic::riscv_vlseg5_mask:
1062     case Intrinsic::riscv_vlseg6_mask:
1063     case Intrinsic::riscv_vlseg7_mask:
1064     case Intrinsic::riscv_vlseg8_mask: {
1065       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1066       return;
1067     }
1068     case Intrinsic::riscv_vlsseg2:
1069     case Intrinsic::riscv_vlsseg3:
1070     case Intrinsic::riscv_vlsseg4:
1071     case Intrinsic::riscv_vlsseg5:
1072     case Intrinsic::riscv_vlsseg6:
1073     case Intrinsic::riscv_vlsseg7:
1074     case Intrinsic::riscv_vlsseg8: {
1075       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1076       return;
1077     }
1078     case Intrinsic::riscv_vlsseg2_mask:
1079     case Intrinsic::riscv_vlsseg3_mask:
1080     case Intrinsic::riscv_vlsseg4_mask:
1081     case Intrinsic::riscv_vlsseg5_mask:
1082     case Intrinsic::riscv_vlsseg6_mask:
1083     case Intrinsic::riscv_vlsseg7_mask:
1084     case Intrinsic::riscv_vlsseg8_mask: {
1085       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1086       return;
1087     }
1088     case Intrinsic::riscv_vloxseg2:
1089     case Intrinsic::riscv_vloxseg3:
1090     case Intrinsic::riscv_vloxseg4:
1091     case Intrinsic::riscv_vloxseg5:
1092     case Intrinsic::riscv_vloxseg6:
1093     case Intrinsic::riscv_vloxseg7:
1094     case Intrinsic::riscv_vloxseg8:
1095       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1096       return;
1097     case Intrinsic::riscv_vluxseg2:
1098     case Intrinsic::riscv_vluxseg3:
1099     case Intrinsic::riscv_vluxseg4:
1100     case Intrinsic::riscv_vluxseg5:
1101     case Intrinsic::riscv_vluxseg6:
1102     case Intrinsic::riscv_vluxseg7:
1103     case Intrinsic::riscv_vluxseg8:
1104       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1105       return;
1106     case Intrinsic::riscv_vloxseg2_mask:
1107     case Intrinsic::riscv_vloxseg3_mask:
1108     case Intrinsic::riscv_vloxseg4_mask:
1109     case Intrinsic::riscv_vloxseg5_mask:
1110     case Intrinsic::riscv_vloxseg6_mask:
1111     case Intrinsic::riscv_vloxseg7_mask:
1112     case Intrinsic::riscv_vloxseg8_mask:
1113       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1114       return;
1115     case Intrinsic::riscv_vluxseg2_mask:
1116     case Intrinsic::riscv_vluxseg3_mask:
1117     case Intrinsic::riscv_vluxseg4_mask:
1118     case Intrinsic::riscv_vluxseg5_mask:
1119     case Intrinsic::riscv_vluxseg6_mask:
1120     case Intrinsic::riscv_vluxseg7_mask:
1121     case Intrinsic::riscv_vluxseg8_mask:
1122       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1123       return;
1124     case Intrinsic::riscv_vlseg8ff:
1125     case Intrinsic::riscv_vlseg7ff:
1126     case Intrinsic::riscv_vlseg6ff:
1127     case Intrinsic::riscv_vlseg5ff:
1128     case Intrinsic::riscv_vlseg4ff:
1129     case Intrinsic::riscv_vlseg3ff:
1130     case Intrinsic::riscv_vlseg2ff: {
1131       selectVLSEGFF(Node, /*IsMasked*/ false);
1132       return;
1133     }
1134     case Intrinsic::riscv_vlseg8ff_mask:
1135     case Intrinsic::riscv_vlseg7ff_mask:
1136     case Intrinsic::riscv_vlseg6ff_mask:
1137     case Intrinsic::riscv_vlseg5ff_mask:
1138     case Intrinsic::riscv_vlseg4ff_mask:
1139     case Intrinsic::riscv_vlseg3ff_mask:
1140     case Intrinsic::riscv_vlseg2ff_mask: {
1141       selectVLSEGFF(Node, /*IsMasked*/ true);
1142       return;
1143     }
1144     case Intrinsic::riscv_vloxei:
1145     case Intrinsic::riscv_vloxei_mask:
1146     case Intrinsic::riscv_vluxei:
1147     case Intrinsic::riscv_vluxei_mask: {
1148       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1149                       IntNo == Intrinsic::riscv_vluxei_mask;
1150       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1151                        IntNo == Intrinsic::riscv_vloxei_mask;
1152 
1153       MVT VT = Node->getSimpleValueType(0);
1154       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1155 
1156       unsigned CurOp = 2;
1157       SmallVector<SDValue, 8> Operands;
1158       if (IsMasked)
1159         Operands.push_back(Node->getOperand(CurOp++));
1160 
1161       MVT IndexVT;
1162       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1163                                  /*IsStridedOrIndexed*/ true, Operands,
1164                                  /*IsLoad=*/true, &IndexVT);
1165 
1166       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1167              "Element count mismatch");
1168 
1169       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1170       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1171       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1172       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1173           IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1174           static_cast<unsigned>(IndexLMUL));
1175       MachineSDNode *Load =
1176           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1177 
1178       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1179         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1180 
1181       ReplaceNode(Node, Load);
1182       return;
1183     }
1184     case Intrinsic::riscv_vlm:
1185     case Intrinsic::riscv_vle:
1186     case Intrinsic::riscv_vle_mask:
1187     case Intrinsic::riscv_vlse:
1188     case Intrinsic::riscv_vlse_mask: {
1189       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1190                       IntNo == Intrinsic::riscv_vlse_mask;
1191       bool IsStrided =
1192           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1193 
1194       MVT VT = Node->getSimpleValueType(0);
1195       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1196 
1197       unsigned CurOp = 2;
1198       SmallVector<SDValue, 8> Operands;
1199       if (IsMasked)
1200         Operands.push_back(Node->getOperand(CurOp++));
1201 
1202       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1203                                  Operands, /*IsLoad=*/true);
1204 
1205       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1206       const RISCV::VLEPseudo *P =
1207           RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
1208                               static_cast<unsigned>(LMUL));
1209       MachineSDNode *Load =
1210           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1211 
1212       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1213         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1214 
1215       ReplaceNode(Node, Load);
1216       return;
1217     }
1218     case Intrinsic::riscv_vleff:
1219     case Intrinsic::riscv_vleff_mask: {
1220       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1221 
1222       MVT VT = Node->getSimpleValueType(0);
1223       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1224 
1225       unsigned CurOp = 2;
1226       SmallVector<SDValue, 7> Operands;
1227       if (IsMasked)
1228         Operands.push_back(Node->getOperand(CurOp++));
1229 
1230       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1231                                  /*IsStridedOrIndexed*/ false, Operands,
1232                                  /*IsLoad=*/true);
1233 
1234       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1235       const RISCV::VLEPseudo *P =
1236           RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW,
1237                               static_cast<unsigned>(LMUL));
1238       MachineSDNode *Load =
1239           CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
1240                                  MVT::Other, MVT::Glue, Operands);
1241       SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
1242                                               /*Glue*/ SDValue(Load, 2));
1243 
1244       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1245         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1246 
1247       ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
1248       ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
1249       ReplaceUses(SDValue(Node, 2), SDValue(Load, 1));   // Chain
1250       CurDAG->RemoveDeadNode(Node);
1251       return;
1252     }
1253     }
1254     break;
1255   }
1256   case ISD::INTRINSIC_VOID: {
1257     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1258     switch (IntNo) {
1259     case Intrinsic::riscv_vsseg2:
1260     case Intrinsic::riscv_vsseg3:
1261     case Intrinsic::riscv_vsseg4:
1262     case Intrinsic::riscv_vsseg5:
1263     case Intrinsic::riscv_vsseg6:
1264     case Intrinsic::riscv_vsseg7:
1265     case Intrinsic::riscv_vsseg8: {
1266       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1267       return;
1268     }
1269     case Intrinsic::riscv_vsseg2_mask:
1270     case Intrinsic::riscv_vsseg3_mask:
1271     case Intrinsic::riscv_vsseg4_mask:
1272     case Intrinsic::riscv_vsseg5_mask:
1273     case Intrinsic::riscv_vsseg6_mask:
1274     case Intrinsic::riscv_vsseg7_mask:
1275     case Intrinsic::riscv_vsseg8_mask: {
1276       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1277       return;
1278     }
1279     case Intrinsic::riscv_vssseg2:
1280     case Intrinsic::riscv_vssseg3:
1281     case Intrinsic::riscv_vssseg4:
1282     case Intrinsic::riscv_vssseg5:
1283     case Intrinsic::riscv_vssseg6:
1284     case Intrinsic::riscv_vssseg7:
1285     case Intrinsic::riscv_vssseg8: {
1286       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1287       return;
1288     }
1289     case Intrinsic::riscv_vssseg2_mask:
1290     case Intrinsic::riscv_vssseg3_mask:
1291     case Intrinsic::riscv_vssseg4_mask:
1292     case Intrinsic::riscv_vssseg5_mask:
1293     case Intrinsic::riscv_vssseg6_mask:
1294     case Intrinsic::riscv_vssseg7_mask:
1295     case Intrinsic::riscv_vssseg8_mask: {
1296       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1297       return;
1298     }
1299     case Intrinsic::riscv_vsoxseg2:
1300     case Intrinsic::riscv_vsoxseg3:
1301     case Intrinsic::riscv_vsoxseg4:
1302     case Intrinsic::riscv_vsoxseg5:
1303     case Intrinsic::riscv_vsoxseg6:
1304     case Intrinsic::riscv_vsoxseg7:
1305     case Intrinsic::riscv_vsoxseg8:
1306       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1307       return;
1308     case Intrinsic::riscv_vsuxseg2:
1309     case Intrinsic::riscv_vsuxseg3:
1310     case Intrinsic::riscv_vsuxseg4:
1311     case Intrinsic::riscv_vsuxseg5:
1312     case Intrinsic::riscv_vsuxseg6:
1313     case Intrinsic::riscv_vsuxseg7:
1314     case Intrinsic::riscv_vsuxseg8:
1315       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1316       return;
1317     case Intrinsic::riscv_vsoxseg2_mask:
1318     case Intrinsic::riscv_vsoxseg3_mask:
1319     case Intrinsic::riscv_vsoxseg4_mask:
1320     case Intrinsic::riscv_vsoxseg5_mask:
1321     case Intrinsic::riscv_vsoxseg6_mask:
1322     case Intrinsic::riscv_vsoxseg7_mask:
1323     case Intrinsic::riscv_vsoxseg8_mask:
1324       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1325       return;
1326     case Intrinsic::riscv_vsuxseg2_mask:
1327     case Intrinsic::riscv_vsuxseg3_mask:
1328     case Intrinsic::riscv_vsuxseg4_mask:
1329     case Intrinsic::riscv_vsuxseg5_mask:
1330     case Intrinsic::riscv_vsuxseg6_mask:
1331     case Intrinsic::riscv_vsuxseg7_mask:
1332     case Intrinsic::riscv_vsuxseg8_mask:
1333       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1334       return;
1335     case Intrinsic::riscv_vsoxei:
1336     case Intrinsic::riscv_vsoxei_mask:
1337     case Intrinsic::riscv_vsuxei:
1338     case Intrinsic::riscv_vsuxei_mask: {
1339       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1340                       IntNo == Intrinsic::riscv_vsuxei_mask;
1341       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1342                        IntNo == Intrinsic::riscv_vsoxei_mask;
1343 
1344       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1345       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1346 
1347       unsigned CurOp = 2;
1348       SmallVector<SDValue, 8> Operands;
1349       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1350 
1351       MVT IndexVT;
1352       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1353                                  /*IsStridedOrIndexed*/ true, Operands,
1354                                  /*IsLoad=*/false, &IndexVT);
1355 
1356       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1357              "Element count mismatch");
1358 
1359       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1360       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1361       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1362       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1363           IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1364           static_cast<unsigned>(IndexLMUL));
1365       MachineSDNode *Store =
1366           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1367 
1368       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1369         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1370 
1371       ReplaceNode(Node, Store);
1372       return;
1373     }
1374     case Intrinsic::riscv_vsm:
1375     case Intrinsic::riscv_vse:
1376     case Intrinsic::riscv_vse_mask:
1377     case Intrinsic::riscv_vsse:
1378     case Intrinsic::riscv_vsse_mask: {
1379       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1380                       IntNo == Intrinsic::riscv_vsse_mask;
1381       bool IsStrided =
1382           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1383 
1384       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1385       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1386 
1387       unsigned CurOp = 2;
1388       SmallVector<SDValue, 8> Operands;
1389       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1390 
1391       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1392                                  Operands);
1393 
1394       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1395       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1396           IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1397       MachineSDNode *Store =
1398           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1399       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1400         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1401 
1402       ReplaceNode(Node, Store);
1403       return;
1404     }
1405     }
1406     break;
1407   }
1408   case ISD::BITCAST: {
1409     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1410     // Just drop bitcasts between vectors if both are fixed or both are
1411     // scalable.
1412     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1413         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1414       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1415       CurDAG->RemoveDeadNode(Node);
1416       return;
1417     }
1418     break;
1419   }
1420   case ISD::INSERT_SUBVECTOR: {
1421     SDValue V = Node->getOperand(0);
1422     SDValue SubV = Node->getOperand(1);
1423     SDLoc DL(SubV);
1424     auto Idx = Node->getConstantOperandVal(2);
1425     MVT SubVecVT = SubV.getSimpleValueType();
1426 
1427     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1428     MVT SubVecContainerVT = SubVecVT;
1429     // Establish the correct scalable-vector types for any fixed-length type.
1430     if (SubVecVT.isFixedLengthVector())
1431       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1432     if (VT.isFixedLengthVector())
1433       VT = TLI.getContainerForFixedLengthVector(VT);
1434 
1435     const auto *TRI = Subtarget->getRegisterInfo();
1436     unsigned SubRegIdx;
1437     std::tie(SubRegIdx, Idx) =
1438         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1439             VT, SubVecContainerVT, Idx, TRI);
1440 
1441     // If the Idx hasn't been completely eliminated then this is a subvector
1442     // insert which doesn't naturally align to a vector register. These must
1443     // be handled using instructions to manipulate the vector registers.
1444     if (Idx != 0)
1445       break;
1446 
1447     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1448     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1449                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1450                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1451     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1452     assert((!IsSubVecPartReg || V.isUndef()) &&
1453            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1454            "the subvector is smaller than a full-sized register");
1455 
1456     // If we haven't set a SubRegIdx, then we must be going between
1457     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1458     if (SubRegIdx == RISCV::NoSubRegister) {
1459       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1460       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1461                  InRegClassID &&
1462              "Unexpected subvector extraction");
1463       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1464       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1465                                                DL, VT, SubV, RC);
1466       ReplaceNode(Node, NewNode);
1467       return;
1468     }
1469 
1470     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1471     ReplaceNode(Node, Insert.getNode());
1472     return;
1473   }
1474   case ISD::EXTRACT_SUBVECTOR: {
1475     SDValue V = Node->getOperand(0);
1476     auto Idx = Node->getConstantOperandVal(1);
1477     MVT InVT = V.getSimpleValueType();
1478     SDLoc DL(V);
1479 
1480     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1481     MVT SubVecContainerVT = VT;
1482     // Establish the correct scalable-vector types for any fixed-length type.
1483     if (VT.isFixedLengthVector())
1484       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1485     if (InVT.isFixedLengthVector())
1486       InVT = TLI.getContainerForFixedLengthVector(InVT);
1487 
1488     const auto *TRI = Subtarget->getRegisterInfo();
1489     unsigned SubRegIdx;
1490     std::tie(SubRegIdx, Idx) =
1491         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1492             InVT, SubVecContainerVT, Idx, TRI);
1493 
1494     // If the Idx hasn't been completely eliminated then this is a subvector
1495     // extract which doesn't naturally align to a vector register. These must
1496     // be handled using instructions to manipulate the vector registers.
1497     if (Idx != 0)
1498       break;
1499 
1500     // If we haven't set a SubRegIdx, then we must be going between
1501     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1502     if (SubRegIdx == RISCV::NoSubRegister) {
1503       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1504       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1505                  InRegClassID &&
1506              "Unexpected subvector extraction");
1507       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1508       SDNode *NewNode =
1509           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1510       ReplaceNode(Node, NewNode);
1511       return;
1512     }
1513 
1514     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1515     ReplaceNode(Node, Extract.getNode());
1516     return;
1517   }
1518   case ISD::SPLAT_VECTOR:
1519   case RISCVISD::VMV_V_X_VL:
1520   case RISCVISD::VFMV_V_F_VL: {
1521     // Try to match splat of a scalar load to a strided load with stride of x0.
1522     SDValue Src = Node->getOperand(0);
1523     auto *Ld = dyn_cast<LoadSDNode>(Src);
1524     if (!Ld)
1525       break;
1526     EVT MemVT = Ld->getMemoryVT();
1527     // The memory VT should be the same size as the element type.
1528     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1529       break;
1530     if (!IsProfitableToFold(Src, Node, Node) ||
1531         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1532       break;
1533 
1534     SDValue VL;
1535     if (Node->getOpcode() == ISD::SPLAT_VECTOR)
1536       VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1537     else
1538       selectVLOp(Node->getOperand(1), VL);
1539 
1540     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1541     SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1542 
1543     SDValue Operands[] = {Ld->getBasePtr(),
1544                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1545                           Ld->getChain()};
1546 
1547     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1548     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1549         /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, Log2SEW,
1550         static_cast<unsigned>(LMUL));
1551     MachineSDNode *Load =
1552         CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1553 
1554     if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1555       CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1556 
1557     ReplaceNode(Node, Load);
1558     return;
1559   }
1560   }
1561 
1562   // Select the default instruction.
1563   SelectCode(Node);
1564 }
1565 
1566 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
1567     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1568   switch (ConstraintID) {
1569   case InlineAsm::Constraint_m:
1570     // We just support simple memory operands that have a single address
1571     // operand and need no special handling.
1572     OutOps.push_back(Op);
1573     return false;
1574   case InlineAsm::Constraint_A:
1575     OutOps.push_back(Op);
1576     return false;
1577   default:
1578     break;
1579   }
1580 
1581   return true;
1582 }
1583 
1584 bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
1585   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1586     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1587     return true;
1588   }
1589   return false;
1590 }
1591 
1592 bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
1593   // If this is FrameIndex, select it directly. Otherwise just let it get
1594   // selected to a register independently.
1595   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1596     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1597   else
1598     Base = Addr;
1599   return true;
1600 }
1601 
1602 bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1603                                         SDValue &ShAmt) {
1604   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1605   // amount. If there is an AND on the shift amount, we can bypass it if it
1606   // doesn't affect any of those bits.
1607   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1608     const APInt &AndMask = N->getConstantOperandAPInt(1);
1609 
1610     // Since the max shift amount is a power of 2 we can subtract 1 to make a
1611     // mask that covers the bits needed to represent all shift amounts.
1612     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1613     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1614 
1615     if (ShMask.isSubsetOf(AndMask)) {
1616       ShAmt = N.getOperand(0);
1617       return true;
1618     }
1619 
1620     // SimplifyDemandedBits may have optimized the mask so try restoring any
1621     // bits that are known zero.
1622     KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1623     if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1624       ShAmt = N.getOperand(0);
1625       return true;
1626     }
1627   }
1628 
1629   ShAmt = N;
1630   return true;
1631 }
1632 
1633 bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
1634   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1635       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1636     Val = N.getOperand(0);
1637     return true;
1638   }
1639   MVT VT = N.getSimpleValueType();
1640   if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1641     Val = N;
1642     return true;
1643   }
1644 
1645   return false;
1646 }
1647 
1648 bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
1649   if (N.getOpcode() == ISD::AND) {
1650     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1651     if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1652       Val = N.getOperand(0);
1653       return true;
1654     }
1655   }
1656   MVT VT = N.getSimpleValueType();
1657   APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
1658   if (CurDAG->MaskedValueIsZero(N, Mask)) {
1659     Val = N;
1660     return true;
1661   }
1662 
1663   return false;
1664 }
1665 
1666 // Return true if all users of this SDNode* only consume the lower \p Bits.
1667 // This can be used to form W instructions for add/sub/mul/shl even when the
1668 // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
1669 // SimplifyDemandedBits has made it so some users see a sext_inreg and some
1670 // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
1671 // the add/sub/mul/shl to become non-W instructions. By checking the users we
1672 // may be able to use a W instruction and CSE with the other instruction if
1673 // this has happened. We could try to detect that the CSE opportunity exists
1674 // before doing this, but that would be more complicated.
1675 // TODO: Does this need to look through AND/OR/XOR to their users to find more
1676 // opportunities.
1677 bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
1678   assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
1679           Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
1680           Node->getOpcode() == ISD::SRL ||
1681           Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
1682           isa<ConstantSDNode>(Node)) &&
1683          "Unexpected opcode");
1684 
1685   for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
1686     SDNode *User = *UI;
1687     // Users of this node should have already been instruction selected
1688     if (!User->isMachineOpcode())
1689       return false;
1690 
1691     // TODO: Add more opcodes?
1692     switch (User->getMachineOpcode()) {
1693     default:
1694       return false;
1695     case RISCV::ADDW:
1696     case RISCV::ADDIW:
1697     case RISCV::SUBW:
1698     case RISCV::MULW:
1699     case RISCV::SLLW:
1700     case RISCV::SLLIW:
1701     case RISCV::SRAW:
1702     case RISCV::SRAIW:
1703     case RISCV::SRLW:
1704     case RISCV::SRLIW:
1705     case RISCV::DIVW:
1706     case RISCV::DIVUW:
1707     case RISCV::REMW:
1708     case RISCV::REMUW:
1709     case RISCV::ROLW:
1710     case RISCV::RORW:
1711     case RISCV::RORIW:
1712     case RISCV::CLZW:
1713     case RISCV::CTZW:
1714     case RISCV::CPOPW:
1715     case RISCV::SLLIUW:
1716     case RISCV::FCVT_H_W:
1717     case RISCV::FCVT_H_WU:
1718     case RISCV::FCVT_S_W:
1719     case RISCV::FCVT_S_WU:
1720     case RISCV::FCVT_D_W:
1721     case RISCV::FCVT_D_WU:
1722       if (Bits < 32)
1723         return false;
1724       break;
1725     case RISCV::SLLI:
1726       // SLLI only uses the lower (XLen - ShAmt) bits.
1727       if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
1728         return false;
1729       break;
1730     case RISCV::ADDUW:
1731     case RISCV::SH1ADDUW:
1732     case RISCV::SH2ADDUW:
1733     case RISCV::SH3ADDUW:
1734       // The first operand to add.uw/shXadd.uw is implicitly zero extended from
1735       // 32 bits.
1736       if (UI.getOperandNo() != 0 || Bits < 32)
1737         return false;
1738       break;
1739     case RISCV::SB:
1740       if (UI.getOperandNo() != 0 || Bits < 8)
1741         return false;
1742       break;
1743     case RISCV::SH:
1744       if (UI.getOperandNo() != 0 || Bits < 16)
1745         return false;
1746       break;
1747     case RISCV::SW:
1748       if (UI.getOperandNo() != 0 || Bits < 32)
1749         return false;
1750       break;
1751     }
1752   }
1753 
1754   return true;
1755 }
1756 
1757 // Select VL as a 5 bit immediate or a value that will become a register. This
1758 // allows us to choose betwen VSETIVLI or VSETVLI later.
1759 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
1760   auto *C = dyn_cast<ConstantSDNode>(N);
1761   if (C && isUInt<5>(C->getZExtValue()))
1762     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1763                                    N->getValueType(0));
1764   else
1765     VL = N;
1766 
1767   return true;
1768 }
1769 
1770 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
1771   if (N.getOpcode() != ISD::SPLAT_VECTOR &&
1772       N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1773       N.getOpcode() != RISCVISD::VMV_V_X_VL)
1774     return false;
1775   SplatVal = N.getOperand(0);
1776   return true;
1777 }
1778 
1779 using ValidateFn = bool (*)(int64_t);
1780 
1781 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1782                                    SelectionDAG &DAG,
1783                                    const RISCVSubtarget &Subtarget,
1784                                    ValidateFn ValidateImm) {
1785   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1786        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1787        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1788       !isa<ConstantSDNode>(N.getOperand(0)))
1789     return false;
1790 
1791   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1792 
1793   // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL
1794   // share semantics when the operand type is wider than the resulting vector
1795   // element type: an implicit truncation first takes place. Therefore, perform
1796   // a manual truncation/sign-extension in order to ignore any truncated bits
1797   // and catch any zero-extended immediate.
1798   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1799   // sign-extending to (XLenVT -1).
1800   MVT XLenVT = Subtarget.getXLenVT();
1801   assert(XLenVT == N.getOperand(0).getSimpleValueType() &&
1802          "Unexpected splat operand type");
1803   MVT EltVT = N.getSimpleValueType().getVectorElementType();
1804   if (EltVT.bitsLT(XLenVT))
1805     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1806 
1807   if (!ValidateImm(SplatImm))
1808     return false;
1809 
1810   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1811   return true;
1812 }
1813 
1814 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
1815   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1816                                 [](int64_t Imm) { return isInt<5>(Imm); });
1817 }
1818 
1819 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
1820   return selectVSplatSimmHelper(
1821       N, SplatVal, *CurDAG, *Subtarget,
1822       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1823 }
1824 
1825 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
1826                                                       SDValue &SplatVal) {
1827   return selectVSplatSimmHelper(
1828       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1829         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1830       });
1831 }
1832 
1833 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
1834   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1835        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1836        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1837       !isa<ConstantSDNode>(N.getOperand(0)))
1838     return false;
1839 
1840   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1841 
1842   if (!isUInt<5>(SplatImm))
1843     return false;
1844 
1845   SplatVal =
1846       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
1847 
1848   return true;
1849 }
1850 
1851 bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
1852                                        SDValue &Imm) {
1853   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
1854     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
1855 
1856     if (!isInt<5>(ImmVal))
1857       return false;
1858 
1859     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
1860     return true;
1861   }
1862 
1863   return false;
1864 }
1865 
1866 // Merge an ADDI into the offset of a load/store instruction where possible.
1867 // (load (addi base, off1), off2) -> (load base, off1+off2)
1868 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
1869 // This is possible when off1+off2 fits a 12-bit immediate.
1870 bool RISCVDAGToDAGISel::doPeepholeLoadStoreADDI(SDNode *N) {
1871   int OffsetOpIdx;
1872   int BaseOpIdx;
1873 
1874   // Only attempt this optimisation for I-type loads and S-type stores.
1875   switch (N->getMachineOpcode()) {
1876   default:
1877     return false;
1878   case RISCV::LB:
1879   case RISCV::LH:
1880   case RISCV::LW:
1881   case RISCV::LBU:
1882   case RISCV::LHU:
1883   case RISCV::LWU:
1884   case RISCV::LD:
1885   case RISCV::FLH:
1886   case RISCV::FLW:
1887   case RISCV::FLD:
1888     BaseOpIdx = 0;
1889     OffsetOpIdx = 1;
1890     break;
1891   case RISCV::SB:
1892   case RISCV::SH:
1893   case RISCV::SW:
1894   case RISCV::SD:
1895   case RISCV::FSH:
1896   case RISCV::FSW:
1897   case RISCV::FSD:
1898     BaseOpIdx = 1;
1899     OffsetOpIdx = 2;
1900     break;
1901   }
1902 
1903   if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
1904     return false;
1905 
1906   SDValue Base = N->getOperand(BaseOpIdx);
1907 
1908   // If the base is an ADDI, we can merge it in to the load/store.
1909   if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
1910     return false;
1911 
1912   SDValue ImmOperand = Base.getOperand(1);
1913   uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
1914 
1915   if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
1916     int64_t Offset1 = Const->getSExtValue();
1917     int64_t CombinedOffset = Offset1 + Offset2;
1918     if (!isInt<12>(CombinedOffset))
1919       return false;
1920     ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
1921                                            ImmOperand.getValueType());
1922   } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
1923     // If the off1 in (addi base, off1) is a global variable's address (its
1924     // low part, really), then we can rely on the alignment of that variable
1925     // to provide a margin of safety before off1 can overflow the 12 bits.
1926     // Check if off2 falls within that margin; if so off1+off2 can't overflow.
1927     const DataLayout &DL = CurDAG->getDataLayout();
1928     Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
1929     if (Offset2 != 0 && Alignment <= Offset2)
1930       return false;
1931     int64_t Offset1 = GA->getOffset();
1932     int64_t CombinedOffset = Offset1 + Offset2;
1933     ImmOperand = CurDAG->getTargetGlobalAddress(
1934         GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
1935         CombinedOffset, GA->getTargetFlags());
1936   } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
1937     // Ditto.
1938     Align Alignment = CP->getAlign();
1939     if (Offset2 != 0 && Alignment <= Offset2)
1940       return false;
1941     int64_t Offset1 = CP->getOffset();
1942     int64_t CombinedOffset = Offset1 + Offset2;
1943     ImmOperand = CurDAG->getTargetConstantPool(
1944         CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
1945         CombinedOffset, CP->getTargetFlags());
1946   } else {
1947     return false;
1948   }
1949 
1950   LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase:    ");
1951   LLVM_DEBUG(Base->dump(CurDAG));
1952   LLVM_DEBUG(dbgs() << "\nN: ");
1953   LLVM_DEBUG(N->dump(CurDAG));
1954   LLVM_DEBUG(dbgs() << "\n");
1955 
1956   // Modify the offset operand of the load/store.
1957   if (BaseOpIdx == 0) // Load
1958     CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
1959                                N->getOperand(2));
1960   else // Store
1961     CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
1962                                ImmOperand, N->getOperand(3));
1963 
1964   return true;
1965 }
1966 
1967 // Try to remove sext.w if the input is a W instruction or can be made into
1968 // a W instruction cheaply.
1969 bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
1970   // Look for the sext.w pattern, addiw rd, rs1, 0.
1971   if (N->getMachineOpcode() != RISCV::ADDIW ||
1972       !isNullConstant(N->getOperand(1)))
1973     return false;
1974 
1975   SDValue N0 = N->getOperand(0);
1976   if (!N0.isMachineOpcode())
1977     return false;
1978 
1979   switch (N0.getMachineOpcode()) {
1980   default:
1981     break;
1982   case RISCV::ADD:
1983   case RISCV::ADDI:
1984   case RISCV::SUB:
1985   case RISCV::MUL:
1986   case RISCV::SLLI: {
1987     // Convert sext.w+add/sub/mul to their W instructions. This will create
1988     // a new independent instruction. This improves latency.
1989     unsigned Opc;
1990     switch (N0.getMachineOpcode()) {
1991     default:
1992       llvm_unreachable("Unexpected opcode!");
1993     case RISCV::ADD:  Opc = RISCV::ADDW;  break;
1994     case RISCV::ADDI: Opc = RISCV::ADDIW; break;
1995     case RISCV::SUB:  Opc = RISCV::SUBW;  break;
1996     case RISCV::MUL:  Opc = RISCV::MULW;  break;
1997     case RISCV::SLLI: Opc = RISCV::SLLIW; break;
1998     }
1999 
2000     SDValue N00 = N0.getOperand(0);
2001     SDValue N01 = N0.getOperand(1);
2002 
2003     // Shift amount needs to be uimm5.
2004     if (N0.getMachineOpcode() == RISCV::SLLI &&
2005         !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
2006       break;
2007 
2008     SDNode *Result =
2009         CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
2010                                N00, N01);
2011     ReplaceUses(N, Result);
2012     return true;
2013   }
2014   case RISCV::ADDW:
2015   case RISCV::ADDIW:
2016   case RISCV::SUBW:
2017   case RISCV::MULW:
2018   case RISCV::SLLIW:
2019     // Result is already sign extended just remove the sext.w.
2020     // NOTE: We only handle the nodes that are selected with hasAllWUsers.
2021     ReplaceUses(N, N0.getNode());
2022     return true;
2023   }
2024 
2025   return false;
2026 }
2027 
2028 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
2029 // for instruction scheduling.
2030 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
2031   return new RISCVDAGToDAGISel(TM);
2032 }
2033