xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/RISCV/RISCVISelLowering.h (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===-- RISCVISelLowering.h - RISCV DAG Lowering Interface ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
15 #define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
16 
17 #include "RISCV.h"
18 #include "llvm/CodeGen/SelectionDAG.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 
21 namespace llvm {
22 class RISCVSubtarget;
23 struct RISCVRegisterInfo;
24 namespace RISCVISD {
25 enum NodeType : unsigned {
26   FIRST_NUMBER = ISD::BUILTIN_OP_END,
27   RET_FLAG,
28   URET_FLAG,
29   SRET_FLAG,
30   MRET_FLAG,
31   CALL,
32   /// Select with condition operator - This selects between a true value and
33   /// a false value (ops #3 and #4) based on the boolean result of comparing
34   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
35   /// condition code in op #2, a XLenVT constant from the ISD::CondCode enum.
36   /// The lhs and rhs are XLenVT integers. The true and false values can be
37   /// integer or floating point.
38   SELECT_CC,
39   BR_CC,
40   BuildPairF64,
41   SplitF64,
42   TAIL,
43   // Multiply high for signedxunsigned.
44   MULHSU,
45   // RV64I shifts, directly matching the semantics of the named RISC-V
46   // instructions.
47   SLLW,
48   SRAW,
49   SRLW,
50   // 32-bit operations from RV64M that can't be simply matched with a pattern
51   // at instruction selection time. These have undefined behavior for division
52   // by 0 or overflow (divw) like their target independent counterparts.
53   DIVW,
54   DIVUW,
55   REMUW,
56   // RV64IB rotates, directly matching the semantics of the named RISC-V
57   // instructions.
58   ROLW,
59   RORW,
60   // RV64IZbb bit counting instructions directly matching the semantics of the
61   // named RISC-V instructions.
62   CLZW,
63   CTZW,
64   // RV64IB/RV32IB funnel shifts, with the semantics of the named RISC-V
65   // instructions, but the same operand order as fshl/fshr intrinsics.
66   FSR,
67   FSL,
68   // RV64IB funnel shifts, with the semantics of the named RISC-V instructions,
69   // but the same operand order as fshl/fshr intrinsics.
70   FSRW,
71   FSLW,
72   // FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as
73   // XLEN is the only legal integer width.
74   //
75   // FMV_H_X matches the semantics of the FMV.H.X.
76   // FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result.
77   // FMV_W_X_RV64 matches the semantics of the FMV.W.X.
78   // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result.
79   //
80   // This is a more convenient semantic for producing dagcombines that remove
81   // unnecessary GPR->FPR->GPR moves.
82   FMV_H_X,
83   FMV_X_ANYEXTH,
84   FMV_W_X_RV64,
85   FMV_X_ANYEXTW_RV64,
86   // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target
87   // (returns (Lo, Hi)). It takes a chain operand.
88   READ_CYCLE_WIDE,
89   // Generalized Reverse and Generalized Or-Combine - directly matching the
90   // semantics of the named RISC-V instructions. Lowered as custom nodes as
91   // TableGen chokes when faced with commutative permutations in deeply-nested
92   // DAGs. Each node takes an input operand and a control operand and outputs a
93   // bit-manipulated version of input. All operands are i32 or XLenVT.
94   GREV,
95   GREVW,
96   GORC,
97   GORCW,
98   SHFL,
99   SHFLW,
100   UNSHFL,
101   UNSHFLW,
102   // Bit Compress/Decompress implement the generic bit extract and bit deposit
103   // functions. This operation is also referred to as bit gather/scatter, bit
104   // pack/unpack, parallel extract/deposit, compress/expand, or right
105   // compress/right expand.
106   BCOMPRESS,
107   BCOMPRESSW,
108   BDECOMPRESS,
109   BDECOMPRESSW,
110   // Vector Extension
111   // VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand
112   // for the VL value to be used for the operation.
113   VMV_V_X_VL,
114   // VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand
115   // for the VL value to be used for the operation.
116   VFMV_V_F_VL,
117   // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign
118   // extended from the vector element size.
119   VMV_X_S,
120   // VMV_S_X_VL matches the semantics of vmv.s.x. It carries a VL operand.
121   VMV_S_X_VL,
122   // VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand.
123   VFMV_S_F_VL,
124   // Splats an i64 scalar to a vector type (with element type i64) where the
125   // scalar is a sign-extended i32.
126   SPLAT_VECTOR_I64,
127   // Read VLENB CSR
128   READ_VLENB,
129   // Truncates a RVV integer vector by one power-of-two. Carries both an extra
130   // mask and VL operand.
131   TRUNCATE_VECTOR_VL,
132   // Matches the semantics of vslideup/vslidedown. The first operand is the
133   // pass-thru operand, the second is the source vector, the third is the
134   // XLenVT index (either constant or non-constant), the fourth is the mask
135   // and the fifth the VL.
136   VSLIDEUP_VL,
137   VSLIDEDOWN_VL,
138   // Matches the semantics of vslide1up/slide1down. The first operand is the
139   // source vector, the second is the XLenVT scalar value. The third and fourth
140   // operands are the mask and VL operands.
141   VSLIDE1UP_VL,
142   VSLIDE1DOWN_VL,
143   // Matches the semantics of the vid.v instruction, with a mask and VL
144   // operand.
145   VID_VL,
146   // Matches the semantics of the vfcnvt.rod function (Convert double-width
147   // float to single-width float, rounding towards odd). Takes a double-width
148   // float vector and produces a single-width float vector. Also has a mask and
149   // VL operand.
150   VFNCVT_ROD_VL,
151   // These nodes match the semantics of the corresponding RVV vector reduction
152   // instructions. They produce a vector result which is the reduction
153   // performed over the first vector operand plus the first element of the
154   // second vector operand. The first operand is an unconstrained vector type,
155   // and the result and second operand's types are expected to be the
156   // corresponding full-width LMUL=1 type for the first operand:
157   //   nxv8i8 = vecreduce_add nxv32i8, nxv8i8
158   //   nxv2i32 = vecreduce_add nxv8i32, nxv2i32
159   // The different in types does introduce extra vsetvli instructions but
160   // similarly it reduces the number of registers consumed per reduction.
161   // Also has a mask and VL operand.
162   VECREDUCE_ADD_VL,
163   VECREDUCE_UMAX_VL,
164   VECREDUCE_SMAX_VL,
165   VECREDUCE_UMIN_VL,
166   VECREDUCE_SMIN_VL,
167   VECREDUCE_AND_VL,
168   VECREDUCE_OR_VL,
169   VECREDUCE_XOR_VL,
170   VECREDUCE_FADD_VL,
171   VECREDUCE_SEQ_FADD_VL,
172   VECREDUCE_FMIN_VL,
173   VECREDUCE_FMAX_VL,
174 
175   // Vector binary and unary ops with a mask as a third operand, and VL as a
176   // fourth operand.
177   // FIXME: Can we replace these with ISD::VP_*?
178   ADD_VL,
179   AND_VL,
180   MUL_VL,
181   OR_VL,
182   SDIV_VL,
183   SHL_VL,
184   SREM_VL,
185   SRA_VL,
186   SRL_VL,
187   SUB_VL,
188   UDIV_VL,
189   UREM_VL,
190   XOR_VL,
191   FADD_VL,
192   FSUB_VL,
193   FMUL_VL,
194   FDIV_VL,
195   FNEG_VL,
196   FABS_VL,
197   FSQRT_VL,
198   FMA_VL,
199   FCOPYSIGN_VL,
200   SMIN_VL,
201   SMAX_VL,
202   UMIN_VL,
203   UMAX_VL,
204   FMINNUM_VL,
205   FMAXNUM_VL,
206   MULHS_VL,
207   MULHU_VL,
208   FP_TO_SINT_VL,
209   FP_TO_UINT_VL,
210   SINT_TO_FP_VL,
211   UINT_TO_FP_VL,
212   FP_ROUND_VL,
213   FP_EXTEND_VL,
214 
215   // Vector compare producing a mask. Fourth operand is input mask. Fifth
216   // operand is VL.
217   SETCC_VL,
218 
219   // Vector select with an additional VL operand. This operation is unmasked.
220   VSELECT_VL,
221 
222   // Mask binary operators.
223   VMAND_VL,
224   VMOR_VL,
225   VMXOR_VL,
226 
227   // Set mask vector to all zeros or ones.
228   VMCLR_VL,
229   VMSET_VL,
230 
231   // Matches the semantics of vrgather.vx and vrgather.vv with an extra operand
232   // for VL.
233   VRGATHER_VX_VL,
234   VRGATHER_VV_VL,
235   VRGATHEREI16_VV_VL,
236 
237   // Vector sign/zero extend with additional mask & VL operands.
238   VSEXT_VL,
239   VZEXT_VL,
240   //  vpopc.m with additional mask and VL operands.
241   VPOPC_VL,
242 
243   // Reads value of CSR.
244   // The first operand is a chain pointer. The second specifies address of the
245   // required CSR. Two results are produced, the read value and the new chain
246   // pointer.
247   READ_CSR,
248   // Write value to CSR.
249   // The first operand is a chain pointer, the second specifies address of the
250   // required CSR and the third is the value to write. The result is the new
251   // chain pointer.
252   WRITE_CSR,
253   // Read and write value of CSR.
254   // The first operand is a chain pointer, the second specifies address of the
255   // required CSR and the third is the value to write. Two results are produced,
256   // the value read before the modification and the new chain pointer.
257   SWAP_CSR,
258 
259   // Memory opcodes start here.
260   VLE_VL = ISD::FIRST_TARGET_MEMORY_OPCODE,
261   VSE_VL,
262 
263   // WARNING: Do not add anything in the end unless you want the node to
264   // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
265   // opcodes will be thought as target memory ops!
266 };
267 } // namespace RISCVISD
268 
269 class RISCVTargetLowering : public TargetLowering {
270   const RISCVSubtarget &Subtarget;
271 
272 public:
273   explicit RISCVTargetLowering(const TargetMachine &TM,
274                                const RISCVSubtarget &STI);
275 
getSubtarget()276   const RISCVSubtarget &getSubtarget() const { return Subtarget; }
277 
278   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
279                           MachineFunction &MF,
280                           unsigned Intrinsic) const override;
281   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
282                              unsigned AS,
283                              Instruction *I = nullptr) const override;
284   bool isLegalICmpImmediate(int64_t Imm) const override;
285   bool isLegalAddImmediate(int64_t Imm) const override;
286   bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
287   bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
288   bool isZExtFree(SDValue Val, EVT VT2) const override;
289   bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
290   bool isCheapToSpeculateCttz() const override;
291   bool isCheapToSpeculateCtlz() const override;
292   bool isFPImmLegal(const APFloat &Imm, EVT VT,
293                     bool ForCodeSize) const override;
294 
softPromoteHalfType()295   bool softPromoteHalfType() const override { return true; }
296 
297   /// Return the register type for a given MVT, ensuring vectors are treated
298   /// as a series of gpr sized integers.
299   MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
300                                     EVT VT) const override;
301 
302   /// Return the number of registers for a given MVT, ensuring vectors are
303   /// treated as a series of gpr sized integers.
304   unsigned getNumRegistersForCallingConv(LLVMContext &Context,
305                                          CallingConv::ID CC,
306                                          EVT VT) const override;
307 
308   /// Return true if the given shuffle mask can be codegen'd directly, or if it
309   /// should be stack expanded.
310   bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
311 
312   bool hasBitPreservingFPLogic(EVT VT) const override;
313   bool
314   shouldExpandBuildVectorWithShuffles(EVT VT,
315                                       unsigned DefinedValues) const override;
316 
317   // Provide custom lowering hooks for some operations.
318   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
319   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
320                           SelectionDAG &DAG) const override;
321 
322   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
323 
324   bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
325                                     const APInt &DemandedElts,
326                                     TargetLoweringOpt &TLO) const override;
327 
328   void computeKnownBitsForTargetNode(const SDValue Op,
329                                      KnownBits &Known,
330                                      const APInt &DemandedElts,
331                                      const SelectionDAG &DAG,
332                                      unsigned Depth) const override;
333   unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
334                                            const APInt &DemandedElts,
335                                            const SelectionDAG &DAG,
336                                            unsigned Depth) const override;
337 
338   // This method returns the name of a target specific DAG node.
339   const char *getTargetNodeName(unsigned Opcode) const override;
340 
341   ConstraintType getConstraintType(StringRef Constraint) const override;
342 
343   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
344 
345   std::pair<unsigned, const TargetRegisterClass *>
346   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
347                                StringRef Constraint, MVT VT) const override;
348 
349   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
350                                     std::vector<SDValue> &Ops,
351                                     SelectionDAG &DAG) const override;
352 
353   MachineBasicBlock *
354   EmitInstrWithCustomInserter(MachineInstr &MI,
355                               MachineBasicBlock *BB) const override;
356 
357   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
358                          EVT VT) const override;
359 
convertSetCCLogicToBitwiseLogic(EVT VT)360   bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
361     return VT.isScalarInteger();
362   }
convertSelectOfConstantsToMath(EVT VT)363   bool convertSelectOfConstantsToMath(EVT VT) const override { return true; }
364 
shouldInsertFencesForAtomic(const Instruction * I)365   bool shouldInsertFencesForAtomic(const Instruction *I) const override {
366     return isa<LoadInst>(I) || isa<StoreInst>(I);
367   }
368   Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
369                                 AtomicOrdering Ord) const override;
370   Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
371                                  AtomicOrdering Ord) const override;
372 
373   bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
374                                   EVT VT) const override;
375 
getExtendForAtomicOps()376   ISD::NodeType getExtendForAtomicOps() const override {
377     return ISD::SIGN_EXTEND;
378   }
379 
getExtendForAtomicCmpSwapArg()380   ISD::NodeType getExtendForAtomicCmpSwapArg() const override {
381     return ISD::SIGN_EXTEND;
382   }
383 
shouldExpandShift(SelectionDAG & DAG,SDNode * N)384   bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
385     if (DAG.getMachineFunction().getFunction().hasMinSize())
386       return false;
387     return true;
388   }
389   bool isDesirableToCommuteWithShift(const SDNode *N,
390                                      CombineLevel Level) const override;
391 
392   /// If a physical register, this returns the register that receives the
393   /// exception address on entry to an EH pad.
394   Register
395   getExceptionPointerRegister(const Constant *PersonalityFn) const override;
396 
397   /// If a physical register, this returns the register that receives the
398   /// exception typeid on entry to a landing pad.
399   Register
400   getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
401 
402   bool shouldExtendTypeInLibCall(EVT Type) const override;
403   bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override;
404 
405   /// Returns the register with the specified architectural or ABI name. This
406   /// method is necessary to lower the llvm.read_register.* and
407   /// llvm.write_register.* intrinsics. Allocatable registers must be reserved
408   /// with the clang -ffixed-xX flag for access to be allowed.
409   Register getRegisterByName(const char *RegName, LLT VT,
410                              const MachineFunction &MF) const override;
411 
412   // Lower incoming arguments, copy physregs into vregs
413   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
414                                bool IsVarArg,
415                                const SmallVectorImpl<ISD::InputArg> &Ins,
416                                const SDLoc &DL, SelectionDAG &DAG,
417                                SmallVectorImpl<SDValue> &InVals) const override;
418   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
419                       bool IsVarArg,
420                       const SmallVectorImpl<ISD::OutputArg> &Outs,
421                       LLVMContext &Context) const override;
422   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
423                       const SmallVectorImpl<ISD::OutputArg> &Outs,
424                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
425                       SelectionDAG &DAG) const override;
426   SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
427                     SmallVectorImpl<SDValue> &InVals) const override;
428 
shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty)429   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
430                                          Type *Ty) const override {
431     return true;
432   }
433   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
shouldConsiderGEPOffsetSplit()434   bool shouldConsiderGEPOffsetSplit() const override { return true; }
435 
436   bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
437                               SDValue C) const override;
438 
439   TargetLowering::AtomicExpansionKind
440   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
441   Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder, AtomicRMWInst *AI,
442                                       Value *AlignedAddr, Value *Incr,
443                                       Value *Mask, Value *ShiftAmt,
444                                       AtomicOrdering Ord) const override;
445   TargetLowering::AtomicExpansionKind
446   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
447   Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilder<> &Builder,
448                                           AtomicCmpXchgInst *CI,
449                                           Value *AlignedAddr, Value *CmpVal,
450                                           Value *NewVal, Value *Mask,
451                                           AtomicOrdering Ord) const override;
452 
453   /// Returns true if the target allows unaligned memory accesses of the
454   /// specified type.
455   bool allowsMisalignedMemoryAccesses(
456       EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
457       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
458       bool *Fast = nullptr) const override;
459 
460   bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL,
461                                    SDValue Val, SDValue *Parts,
462                                    unsigned NumParts, MVT PartVT,
463                                    Optional<CallingConv::ID> CC) const override;
464 
465   SDValue
466   joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
467                              const SDValue *Parts, unsigned NumParts,
468                              MVT PartVT, EVT ValueVT,
469                              Optional<CallingConv::ID> CC) const override;
470 
471   static RISCVII::VLMUL getLMUL(MVT VT);
472   static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul);
473   static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);
474   static unsigned getRegClassIDForVecVT(MVT VT);
475   static std::pair<unsigned, unsigned>
476   decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT,
477                                            unsigned InsertExtractIdx,
478                                            const RISCVRegisterInfo *TRI);
479   MVT getContainerForFixedLengthVector(MVT VT) const;
480 
481   bool shouldRemoveExtendFromGSIndex(EVT VT) const override;
482 
483 private:
484   void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
485                         const SmallVectorImpl<ISD::InputArg> &Ins,
486                         bool IsRet) const;
487   void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo,
488                          const SmallVectorImpl<ISD::OutputArg> &Outs,
489                          bool IsRet, CallLoweringInfo *CLI) const;
490 
491   template <class NodeTy>
492   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const;
493 
494   SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
495                            bool UseGOT) const;
496   SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
497 
498   SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
499   SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
500   SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
501   SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
502   SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
503   SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
504   SDValue lowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
505   SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
506   SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
507   SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
508   SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
509   SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
510   SDValue lowerSPLAT_VECTOR_PARTS(SDValue Op, SelectionDAG &DAG) const;
511   SDValue lowerVectorMaskSplat(SDValue Op, SelectionDAG &DAG) const;
512   SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
513                              int64_t ExtTrueVal) const;
514   SDValue lowerVectorMaskTrunc(SDValue Op, SelectionDAG &DAG) const;
515   SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
516   SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
517   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
518   SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
519   SDValue lowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
520   SDValue lowerVectorMaskVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
521   SDValue lowerFPVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
522   SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
523   SDValue lowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
524   SDValue lowerSTEP_VECTOR(SDValue Op, SelectionDAG &DAG) const;
525   SDValue lowerVECTOR_REVERSE(SDValue Op, SelectionDAG &DAG) const;
526   SDValue lowerABS(SDValue Op, SelectionDAG &DAG) const;
527   SDValue lowerMLOAD(SDValue Op, SelectionDAG &DAG) const;
528   SDValue lowerMSTORE(SDValue Op, SelectionDAG &DAG) const;
529   SDValue lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op,
530                                                SelectionDAG &DAG) const;
531   SDValue lowerMGATHER(SDValue Op, SelectionDAG &DAG) const;
532   SDValue lowerMSCATTER(SDValue Op, SelectionDAG &DAG) const;
533   SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const;
534   SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const;
535   SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const;
536   SDValue lowerFixedLengthVectorLogicOpToRVV(SDValue Op, SelectionDAG &DAG,
537                                              unsigned MaskOpc,
538                                              unsigned VecOpc) const;
539   SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
540                                             SelectionDAG &DAG) const;
541   SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG, unsigned NewOpc,
542                             bool HasMask = true) const;
543   SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc) const;
544   SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG,
545                                             unsigned ExtendOpc) const;
546   SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
547   SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
548 
549   bool isEligibleForTailCallOptimization(
550       CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
551       const SmallVector<CCValAssign, 16> &ArgLocs) const;
552 
553   /// Generate error diagnostics if any register used by CC has been marked
554   /// reserved.
555   void validateCCReservedRegs(
556       const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
557       MachineFunction &MF) const;
558 
559   bool useRVVForFixedLengthVectorVT(MVT VT) const;
560 };
561 
562 namespace RISCV {
563 // We use 64 bits as the known part in the scalable vector types.
564 static constexpr unsigned RVVBitsPerBlock = 64;
565 } // namespace RISCV
566 
567 namespace RISCVVIntrinsicsTable {
568 
569 struct RISCVVIntrinsicInfo {
570   unsigned IntrinsicID;
571   uint8_t SplatOperand;
572 };
573 
574 using namespace RISCV;
575 
576 #define GET_RISCVVIntrinsicsTable_DECL
577 #include "RISCVGenSearchableTables.inc"
578 
579 } // end namespace RISCVVIntrinsicsTable
580 
581 } // end namespace llvm
582 
583 #endif
584