1//===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// This file contains the required infrastructure and VL patterns to 10/// support code generation for the standard 'V' (Vector) extension, version 11/// version 1.0. 12/// 13/// This file is included from and depends upon RISCVInstrInfoVPseudos.td 14/// 15/// Note: the patterns for RVV intrinsics are found in 16/// RISCVInstrInfoVPseudos.td. 17/// 18//===----------------------------------------------------------------------===// 19 20//===----------------------------------------------------------------------===// 21// Helpers to define the VL patterns. 22//===----------------------------------------------------------------------===// 23 24def SDT_RISCVIntUnOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, 25 SDTCisSameAs<0, 2>, 26 SDTCisVec<0>, SDTCisInt<0>, 27 SDTCVecEltisVT<3, i1>, 28 SDTCisSameNumEltsAs<0, 3>, 29 SDTCisVT<4, XLenVT>]>; 30 31def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 32 SDTCisSameAs<0, 2>, 33 SDTCisVec<0>, SDTCisInt<0>, 34 SDTCisSameAs<0, 3>, 35 SDTCVecEltisVT<4, i1>, 36 SDTCisSameNumEltsAs<0, 4>, 37 SDTCisVT<5, XLenVT>]>; 38 39// Input: (vector, vector/scalar, passthru, mask, roundmode, vl) 40def SDT_RISCVVNBinOp_RM_VL : SDTypeProfile<1, 6, [SDTCisVec<0>, SDTCisInt<0>, 41 SDTCisSameAs<0, 3>, 42 SDTCisSameNumEltsAs<0, 1>, 43 SDTCisVec<1>, 44 SDTCisOpSmallerThanOp<2, 1>, 45 SDTCisSameAs<0, 2>, 46 SDTCisSameNumEltsAs<0, 4>, 47 SDTCVecEltisVT<4, i1>, 48 SDTCisVT<5, XLenVT>, 49 SDTCisVT<6, XLenVT>]>; 50 51def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 52 SDTCisVec<0>, SDTCisFP<0>, 53 SDTCVecEltisVT<2, i1>, 54 SDTCisSameNumEltsAs<0, 2>, 55 SDTCisVT<3, XLenVT>]>; 56def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 57 SDTCisSameAs<0, 2>, 58 SDTCisVec<0>, SDTCisFP<0>, 59 SDTCisSameAs<0, 3>, 60 SDTCVecEltisVT<4, i1>, 61 SDTCisSameNumEltsAs<0, 4>, 62 SDTCisVT<5, XLenVT>]>; 63 64def SDT_RISCVCopySign_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 65 SDTCisSameAs<0, 2>, 66 SDTCisVec<0>, SDTCisFP<0>, 67 SDTCisSameAs<0, 3>, 68 SDTCVecEltisVT<4, i1>, 69 SDTCisSameNumEltsAs<0, 4>, 70 SDTCisVT<5, XLenVT>]>; 71 72def riscv_vmv_v_v_vl : SDNode<"RISCVISD::VMV_V_V_VL", 73 SDTypeProfile<1, 3, [SDTCisVec<0>, 74 SDTCisSameAs<0, 1>, 75 SDTCisSameAs<0, 2>, 76 SDTCisVT<3, XLenVT>]>>; 77def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL", 78 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, 79 SDTCisSameAs<0, 1>, 80 SDTCisVT<2, XLenVT>, 81 SDTCisVT<3, XLenVT>]>>; 82def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL", 83 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>, 84 SDTCisSameAs<0, 1>, 85 SDTCisEltOfVec<2, 0>, 86 SDTCisVT<3, XLenVT>]>>; 87def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL", 88 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 89 SDTCisInt<0>, 90 SDTCisVT<2, XLenVT>, 91 SDTCisVT<3, XLenVT>]>>; 92def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL", 93 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 94 SDTCisFP<0>, 95 SDTCisEltOfVec<2, 0>, 96 SDTCisVT<3, XLenVT>]>>; 97 98def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 99def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>; 100def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 101def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 102def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 103def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 104def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 105def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 106def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>; 107def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>; 108def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>; 109def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>; 110def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>; 111def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>; 112def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>; 113def riscv_rotl_vl : SDNode<"RISCVISD::ROTL_VL", SDT_RISCVIntBinOp_VL>; 114def riscv_rotr_vl : SDNode<"RISCVISD::ROTR_VL", SDT_RISCVIntBinOp_VL>; 115def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 116def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 117def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 118def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 119 120def riscv_bitreverse_vl : SDNode<"RISCVISD::BITREVERSE_VL", SDT_RISCVIntUnOp_VL>; 121def riscv_bswap_vl : SDNode<"RISCVISD::BSWAP_VL", SDT_RISCVIntUnOp_VL>; 122def riscv_ctlz_vl : SDNode<"RISCVISD::CTLZ_VL", SDT_RISCVIntUnOp_VL>; 123def riscv_cttz_vl : SDNode<"RISCVISD::CTTZ_VL", SDT_RISCVIntUnOp_VL>; 124def riscv_ctpop_vl : SDNode<"RISCVISD::CTPOP_VL", SDT_RISCVIntUnOp_VL>; 125 126def riscv_avgfloors_vl : SDNode<"RISCVISD::AVGFLOORS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 127def riscv_avgflooru_vl : SDNode<"RISCVISD::AVGFLOORU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 128def riscv_avgceils_vl : SDNode<"RISCVISD::AVGCEILS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 129def riscv_avgceilu_vl : SDNode<"RISCVISD::AVGCEILU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 130def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 131def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; 132def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>; 133def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>; 134 135def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 136def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>; 137def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 138def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>; 139def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>; 140def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>; 141def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>; 142def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVCopySign_VL>; 143def riscv_vfmin_vl : SDNode<"RISCVISD::VFMIN_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 144def riscv_vfmax_vl : SDNode<"RISCVISD::VFMAX_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; 145 146def riscv_strict_fadd_vl : SDNode<"RISCVISD::STRICT_FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; 147def riscv_strict_fsub_vl : SDNode<"RISCVISD::STRICT_FSUB_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; 148def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; 149def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; 150def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; 151 152def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), 153 [(riscv_fadd_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), 154 (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl)]>; 155def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), 156 [(riscv_fsub_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), 157 (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl)]>; 158def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), 159 [(riscv_fmul_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), 160 (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl)]>; 161def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), 162 [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), 163 (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl)]>; 164def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 165 [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), 166 (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; 167 168def riscv_fclass_vl : SDNode<"RISCVISD::FCLASS_VL", 169 SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>, 170 SDTCisFP<1>, SDTCisVec<1>, 171 SDTCisSameSizeAs<0, 1>, 172 SDTCisSameNumEltsAs<0, 1>, 173 SDTCVecEltisVT<2, i1>, 174 SDTCisSameNumEltsAs<0, 2>, 175 SDTCisVT<3, XLenVT>]>>; 176 177def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, 178 SDTCisSameAs<0, 2>, 179 SDTCisSameAs<0, 3>, 180 SDTCisVec<0>, SDTCisFP<0>, 181 SDTCVecEltisVT<4, i1>, 182 SDTCisSameNumEltsAs<0, 4>, 183 SDTCisVT<5, XLenVT>]>; 184def riscv_vfmadd_vl : SDNode<"RISCVISD::VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 185def riscv_vfnmadd_vl : SDNode<"RISCVISD::VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 186def riscv_vfmsub_vl : SDNode<"RISCVISD::VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 187def riscv_vfnmsub_vl : SDNode<"RISCVISD::VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; 188 189def SDT_RISCVWVecFMA_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 190 SDTCisVec<1>, SDTCisFP<1>, 191 SDTCisOpSmallerThanOp<1, 0>, 192 SDTCisSameNumEltsAs<0, 1>, 193 SDTCisSameAs<1, 2>, 194 SDTCisSameAs<0, 3>, 195 SDTCVecEltisVT<4, i1>, 196 SDTCisSameNumEltsAs<0, 4>, 197 SDTCisVT<5, XLenVT>]>; 198def riscv_vfwmadd_vl : SDNode<"RISCVISD::VFWMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 199def riscv_vfwnmadd_vl : SDNode<"RISCVISD::VFWNMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 200def riscv_vfwmsub_vl : SDNode<"RISCVISD::VFWMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 201def riscv_vfwnmsub_vl : SDNode<"RISCVISD::VFWNMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; 202 203def riscv_strict_vfmadd_vl : SDNode<"RISCVISD::STRICT_VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 204def riscv_strict_vfnmadd_vl : SDNode<"RISCVISD::STRICT_VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 205def riscv_strict_vfmsub_vl : SDNode<"RISCVISD::STRICT_VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 206def riscv_strict_vfnmsub_vl : SDNode<"RISCVISD::STRICT_VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; 207 208def any_riscv_vfmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 209 [(riscv_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 210 (riscv_strict_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 211def any_riscv_vfnmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 212 [(riscv_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 213 (riscv_strict_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 214def any_riscv_vfmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 215 [(riscv_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 216 (riscv_strict_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 217def any_riscv_vfnmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 218 [(riscv_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), 219 (riscv_strict_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; 220 221def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [ 222 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>, 223 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 224]>; 225def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [ 226 SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, 227 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 228]>; 229 230def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; 231def riscv_strict_fpround_vl : SDNode<"RISCVISD::STRICT_FP_ROUND_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; 232def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; 233def riscv_strict_fpextend_vl : SDNode<"RISCVISD::STRICT_FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL, [SDNPHasChain]>; 234def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; 235def riscv_strict_fncvt_rod_vl : SDNode<"RISCVISD::STRICT_VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; 236 237def any_riscv_fpround_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 238 [(riscv_fpround_vl node:$src, node:$mask, node:$vl), 239 (riscv_strict_fpround_vl node:$src, node:$mask, node:$vl)]>; 240def any_riscv_fpextend_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 241 [(riscv_fpextend_vl node:$src, node:$mask, node:$vl), 242 (riscv_strict_fpextend_vl node:$src, node:$mask, node:$vl)]>; 243def any_riscv_fncvt_rod_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 244 [(riscv_fncvt_rod_vl node:$src, node:$mask, node:$vl), 245 (riscv_strict_fncvt_rod_vl node:$src, node:$mask, node:$vl)]>; 246 247def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [ 248 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 249 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 250]>; 251def SDT_RISCVFP2IOp_RM_VL : SDTypeProfile<1, 4, [ 252 SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, 253 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 254 SDTCisVT<4, XLenVT> // Rounding mode 255]>; 256 257def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [ 258 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 259 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> 260]>; 261def SDT_RISCVI2FPOp_RM_VL : SDTypeProfile<1, 4, [ 262 SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, 263 SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, 264 SDTCisVT<4, XLenVT> // Rounding mode 265]>; 266 267def SDT_RISCVSETCCOP_VL : SDTypeProfile<1, 6, [ 268 SDTCVecEltisVT<0, i1>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, 269 SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>, SDTCisSameAs<0, 4>, 270 SDTCisSameAs<0, 5>, SDTCisVT<6, XLenVT>]>; 271 272// Float -> Int 273def riscv_vfcvt_rm_xu_f_vl : SDNode<"RISCVISD::VFCVT_RM_XU_F_VL", SDT_RISCVFP2IOp_RM_VL>; 274def riscv_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL>; 275 276def riscv_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>; 277def riscv_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>; 278 279def riscv_strict_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL, [SDNPHasChain]>; 280def riscv_strict_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; 281def riscv_strict_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::STRICT_VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; 282 283def any_riscv_vfcvt_rm_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl, node:$rm), 284 [(riscv_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm), 285 (riscv_strict_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm)]>; 286def any_riscv_vfcvt_rtz_xu_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 287 [(riscv_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl), 288 (riscv_strict_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl)]>; 289def any_riscv_vfcvt_rtz_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 290 [(riscv_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl), 291 (riscv_strict_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl)]>; 292 293// Int -> Float 294def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 295def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; 296def riscv_vfcvt_rm_f_xu_vl : SDNode<"RISCVISD::VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>; 297def riscv_vfcvt_rm_f_x_vl : SDNode<"RISCVISD::VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>; 298 299def riscv_strict_sint_to_fp_vl : SDNode<"RISCVISD::STRICT_SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; 300def riscv_strict_uint_to_fp_vl : SDNode<"RISCVISD::STRICT_UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; 301 302def any_riscv_sint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 303 [(riscv_sint_to_fp_vl node:$src, node:$mask, node:$vl), 304 (riscv_strict_sint_to_fp_vl node:$src, node:$mask, node:$vl)]>; 305def any_riscv_uint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 306 [(riscv_uint_to_fp_vl node:$src, node:$mask, node:$vl), 307 (riscv_strict_uint_to_fp_vl node:$src, node:$mask, node:$vl)]>; 308 309def riscv_vfround_noexcept_vl: SDNode<"RISCVISD::VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>; 310def riscv_strict_vfround_noexcept_vl: SDNode<"RISCVISD::STRICT_VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; 311 312def any_riscv_vfround_noexcept_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), 313 [(riscv_vfround_noexcept_vl node:$src, node:$mask, node:$vl), 314 (riscv_strict_vfround_noexcept_vl node:$src, node:$mask, node:$vl)]>; 315 316def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDT_RISCVSETCCOP_VL>; 317def riscv_strict_fsetcc_vl : SDNode<"RISCVISD::STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; 318def riscv_strict_fsetccs_vl : SDNode<"RISCVISD::STRICT_FSETCCS_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; 319def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl), 320 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl), 321 (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl)]>; 322def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl), 323 [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl), 324 (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl)]>; 325 326def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL", 327 SDTypeProfile<1, 5, [SDTCisVec<0>, 328 SDTCisSameAs<0, 1>, 329 SDTCisVT<2, XLenVT>, 330 SDTCisSameAs<0, 3>, 331 SDTCVecEltisVT<4, i1>, 332 SDTCisSameNumEltsAs<0, 4>, 333 SDTCisVT<5, XLenVT>]>>; 334def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL", 335 SDTypeProfile<1, 5, [SDTCisVec<0>, 336 SDTCisSameAs<0, 1>, 337 SDTCisInt<2>, 338 SDTCisSameNumEltsAs<0, 2>, 339 SDTCisSameSizeAs<0, 2>, 340 SDTCisSameAs<0, 3>, 341 SDTCVecEltisVT<4, i1>, 342 SDTCisSameNumEltsAs<0, 4>, 343 SDTCisVT<5, XLenVT>]>>; 344def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL", 345 SDTypeProfile<1, 5, [SDTCisVec<0>, 346 SDTCisSameAs<0, 1>, 347 SDTCisInt<2>, 348 SDTCVecEltisVT<2, i16>, 349 SDTCisSameNumEltsAs<0, 2>, 350 SDTCisSameAs<0, 3>, 351 SDTCVecEltisVT<4, i1>, 352 SDTCisSameNumEltsAs<0, 4>, 353 SDTCisVT<5, XLenVT>]>>; 354 355def SDT_RISCVVMERGE_VL : SDTypeProfile<1, 5, [ 356 SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>, 357 SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisSameAs<0, 4>, 358 SDTCisVT<5, XLenVT> 359]>; 360 361def riscv_vmerge_vl : SDNode<"RISCVISD::VMERGE_VL", SDT_RISCVVMERGE_VL>; 362 363def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, 364 SDTCisVT<1, XLenVT>]>; 365def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>; 366def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>; 367 368def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 369 SDTCisSameAs<0, 2>, 370 SDTCVecEltisVT<0, i1>, 371 SDTCisVT<3, XLenVT>]>; 372def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 373def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 374def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; 375 376def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>; 377 378def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl), 379 (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>; 380 381def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL", 382 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 383 SDTCisVec<1>, SDTCisInt<1>, 384 SDTCVecEltisVT<2, i1>, 385 SDTCisSameNumEltsAs<1, 2>, 386 SDTCisVT<3, XLenVT>]>>; 387 388def riscv_vfirst_vl : SDNode<"RISCVISD::VFIRST_VL", 389 SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, 390 SDTCisVec<1>, SDTCisInt<1>, 391 SDTCVecEltisVT<2, i1>, 392 SDTCisSameNumEltsAs<1, 2>, 393 SDTCisVT<3, XLenVT>]>>; 394 395def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, 396 SDTCisSameNumEltsAs<0, 1>, 397 SDTCisSameNumEltsAs<1, 2>, 398 SDTCVecEltisVT<2, i1>, 399 SDTCisVT<3, XLenVT>]>; 400def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>; 401def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>; 402def riscv_ext_vl : PatFrags<(ops node:$A, node:$B, node:$C), 403 [(riscv_sext_vl node:$A, node:$B, node:$C), 404 (riscv_zext_vl node:$A, node:$B, node:$C)]>; 405 406def SDT_RISCVVTRUNCATE_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, 407 SDTCisSameNumEltsAs<0, 1>, 408 SDTCisSameNumEltsAs<0, 2>, 409 SDTCVecEltisVT<2, i1>, 410 SDTCisVT<3, XLenVT>]>; 411def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL", 412 SDT_RISCVVTRUNCATE_VL>; 413def riscv_trunc_vector_vl_ssat : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL_SSAT", 414 SDT_RISCVVTRUNCATE_VL>; 415def riscv_trunc_vector_vl_usat : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL_USAT", 416 SDT_RISCVVTRUNCATE_VL>; 417 418def SDT_RISCVVWIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 419 SDTCisInt<1>, 420 SDTCisSameNumEltsAs<0, 1>, 421 SDTCisOpSmallerThanOp<1, 0>, 422 SDTCisSameAs<1, 2>, 423 SDTCisSameAs<0, 3>, 424 SDTCisSameNumEltsAs<1, 4>, 425 SDTCVecEltisVT<4, i1>, 426 SDTCisVT<5, XLenVT>]>; 427def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 428def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 429def riscv_vwmulsu_vl : SDNode<"RISCVISD::VWMULSU_VL", SDT_RISCVVWIntBinOp_VL>; 430def riscv_vwadd_vl : SDNode<"RISCVISD::VWADD_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 431def riscv_vwaddu_vl : SDNode<"RISCVISD::VWADDU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; 432def riscv_vwsub_vl : SDNode<"RISCVISD::VWSUB_VL", SDT_RISCVVWIntBinOp_VL, []>; 433def riscv_vwsubu_vl : SDNode<"RISCVISD::VWSUBU_VL", SDT_RISCVVWIntBinOp_VL, []>; 434def riscv_vwsll_vl : SDNode<"RISCVISD::VWSLL_VL", SDT_RISCVVWIntBinOp_VL, []>; 435 436def SDT_RISCVVWIntTernOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 437 SDTCisInt<1>, 438 SDTCisSameNumEltsAs<0, 1>, 439 SDTCisOpSmallerThanOp<1, 0>, 440 SDTCisSameAs<1, 2>, 441 SDTCisSameAs<0, 3>, 442 SDTCisSameNumEltsAs<1, 4>, 443 SDTCVecEltisVT<4, i1>, 444 SDTCisVT<5, XLenVT>]>; 445def riscv_vwmacc_vl : SDNode<"RISCVISD::VWMACC_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; 446def riscv_vwmaccu_vl : SDNode<"RISCVISD::VWMACCU_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; 447def riscv_vwmaccsu_vl : SDNode<"RISCVISD::VWMACCSU_VL", SDT_RISCVVWIntTernOp_VL, []>; 448 449def SDT_RISCVVWFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 450 SDTCisFP<1>, 451 SDTCisSameNumEltsAs<0, 1>, 452 SDTCisOpSmallerThanOp<1, 0>, 453 SDTCisSameAs<1, 2>, 454 SDTCisSameAs<0, 3>, 455 SDTCisSameNumEltsAs<1, 4>, 456 SDTCVecEltisVT<4, i1>, 457 SDTCisVT<5, XLenVT>]>; 458def riscv_vfwmul_vl : SDNode<"RISCVISD::VFWMUL_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; 459def riscv_vfwadd_vl : SDNode<"RISCVISD::VFWADD_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; 460def riscv_vfwsub_vl : SDNode<"RISCVISD::VFWSUB_VL", SDT_RISCVVWFPBinOp_VL, []>; 461 462def SDT_RISCVVWIntBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, 463 SDTCisSameAs<0, 1>, 464 SDTCisInt<2>, 465 SDTCisSameNumEltsAs<1, 2>, 466 SDTCisOpSmallerThanOp<2, 1>, 467 SDTCisSameAs<0, 3>, 468 SDTCisSameNumEltsAs<1, 4>, 469 SDTCVecEltisVT<4, i1>, 470 SDTCisVT<5, XLenVT>]>; 471def riscv_vwadd_w_vl : SDNode<"RISCVISD::VWADD_W_VL", SDT_RISCVVWIntBinOpW_VL>; 472def riscv_vwaddu_w_vl : SDNode<"RISCVISD::VWADDU_W_VL", SDT_RISCVVWIntBinOpW_VL>; 473def riscv_vwsub_w_vl : SDNode<"RISCVISD::VWSUB_W_VL", SDT_RISCVVWIntBinOpW_VL>; 474def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWIntBinOpW_VL>; 475 476def SDT_RISCVVWFPBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, 477 SDTCisSameAs<0, 1>, 478 SDTCisFP<2>, 479 SDTCisSameNumEltsAs<1, 2>, 480 SDTCisOpSmallerThanOp<2, 1>, 481 SDTCisSameAs<0, 3>, 482 SDTCisSameNumEltsAs<1, 4>, 483 SDTCVecEltisVT<4, i1>, 484 SDTCisVT<5, XLenVT>]>; 485 486def riscv_vfwadd_w_vl : SDNode<"RISCVISD::VFWADD_W_VL", SDT_RISCVVWFPBinOpW_VL>; 487def riscv_vfwsub_w_vl : SDNode<"RISCVISD::VFWSUB_W_VL", SDT_RISCVVWFPBinOpW_VL>; 488 489def SDTRVVVecReduce : SDTypeProfile<1, 6, [ 490 SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>, 491 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT>, 492 SDTCisVT<6, XLenVT> 493]>; 494 495def riscv_add_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 496 node:$E), 497 (riscv_add_vl node:$A, node:$B, node:$C, 498 node:$D, node:$E), [{ 499 return N->hasOneUse(); 500}]>; 501 502def riscv_sub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 503 node:$E), 504 (riscv_sub_vl node:$A, node:$B, node:$C, 505 node:$D, node:$E), [{ 506 return N->hasOneUse(); 507}]>; 508 509def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 510 node:$E), 511 (riscv_mul_vl node:$A, node:$B, node:$C, 512 node:$D, node:$E), [{ 513 return N->hasOneUse(); 514}]>; 515 516def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 517 node:$E), 518 (riscv_vwmul_vl node:$A, node:$B, node:$C, 519 node:$D, node:$E), [{ 520 return N->hasOneUse(); 521}]>; 522 523def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 524 node:$E), 525 (riscv_vwmulu_vl node:$A, node:$B, node:$C, 526 node:$D, node:$E), [{ 527 return N->hasOneUse(); 528}]>; 529 530def riscv_vwmulsu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 531 node:$E), 532 (riscv_vwmulsu_vl node:$A, node:$B, node:$C, 533 node:$D, node:$E), [{ 534 return N->hasOneUse(); 535}]>; 536 537def riscv_sext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 538 (riscv_sext_vl node:$A, node:$B, node:$C), [{ 539 return N->hasOneUse(); 540}]>; 541 542def riscv_zext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 543 (riscv_zext_vl node:$A, node:$B, node:$C), [{ 544 return N->hasOneUse(); 545}]>; 546 547def riscv_ext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 548 (riscv_ext_vl node:$A, node:$B, node:$C), [{ 549 return N->hasOneUse(); 550}]>; 551 552def riscv_fpextend_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), 553 (riscv_fpextend_vl node:$A, node:$B, node:$C), [{ 554 return N->hasOneUse(); 555}]>; 556 557def riscv_vfmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 558 node:$E), 559 (riscv_vfmadd_vl node:$A, node:$B, 560 node:$C, node:$D, node:$E), [{ 561 return N->hasOneUse(); 562}]>; 563 564def riscv_vfnmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 565 node:$E), 566 (riscv_vfnmadd_vl node:$A, node:$B, 567 node:$C, node:$D, node:$E), [{ 568 return N->hasOneUse(); 569}]>; 570 571def riscv_vfmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 572 node:$E), 573 (riscv_vfmsub_vl node:$A, node:$B, 574 node:$C, node:$D, node:$E), [{ 575 return N->hasOneUse(); 576}]>; 577 578def riscv_vfnmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, 579 node:$E), 580 (riscv_vfnmsub_vl node:$A, node:$B, 581 node:$C, node:$D, node:$E), [{ 582 return N->hasOneUse(); 583}]>; 584 585foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR", 586 "FADD", "SEQ_FADD", "FMIN", "FMAX"] in 587 def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; 588 589// Give explicit Complexity to prefer simm5/uimm5. 590def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>; 591def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [], [], 3>; 592def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<5>", [], [], 3>; 593def SplatPat_uimm6 : ComplexPattern<vAny, 1, "selectVSplatUimmBits<6>", [], [], 3>; 594def SplatPat_simm5_plus1 595 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", [], [], 3>; 596def SplatPat_simm5_plus1_nonzero 597 : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", [], [], 3>; 598 599// Selects extends or truncates of splats where we only care about the lowest 8 600// bits of each element. 601def Low8BitsSplatPat 602 : ComplexPattern<vAny, 1, "selectLow8BitsVSplat", [], [], 2>; 603 604// Ignore the vl operand on vmv_v_f, and vmv_s_f. 605def SplatFPOp : PatFrags<(ops node:$op), 606 [(riscv_vfmv_v_f_vl undef, node:$op, srcvalue), 607 (riscv_vfmv_s_f_vl undef, node:$op, srcvalue)]>; 608 609def sew8simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<8>", []>; 610def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>; 611def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>; 612def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>; 613 614class VPatBinaryVL_V<SDPatternOperator vop, 615 string instruction_name, 616 string suffix, 617 ValueType result_type, 618 ValueType op1_type, 619 ValueType op2_type, 620 ValueType mask_type, 621 int log2sew, 622 LMULInfo vlmul, 623 VReg result_reg_class, 624 VReg op1_reg_class, 625 VReg op2_reg_class, 626 bit isSEWAware = 0> 627 : Pat<(result_type (vop 628 (op1_type op1_reg_class:$rs1), 629 (op2_type op2_reg_class:$rs2), 630 (result_type result_reg_class:$passthru), 631 (mask_type V0), 632 VLOpFrag)), 633 (!cast<Instruction>( 634 !if(isSEWAware, 635 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 636 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) 637 result_reg_class:$passthru, 638 op1_reg_class:$rs1, 639 op2_reg_class:$rs2, 640 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 641 642class VPatBinaryVL_V_RM<SDPatternOperator vop, 643 string instruction_name, 644 string suffix, 645 ValueType result_type, 646 ValueType op1_type, 647 ValueType op2_type, 648 ValueType mask_type, 649 int log2sew, 650 LMULInfo vlmul, 651 VReg result_reg_class, 652 VReg op1_reg_class, 653 VReg op2_reg_class, 654 bit isSEWAware = 0> 655 : Pat<(result_type (vop 656 (op1_type op1_reg_class:$rs1), 657 (op2_type op2_reg_class:$rs2), 658 (result_type result_reg_class:$passthru), 659 (mask_type V0), 660 VLOpFrag)), 661 (!cast<Instruction>( 662 !if(isSEWAware, 663 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 664 instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) 665 result_reg_class:$passthru, 666 op1_reg_class:$rs1, 667 op2_reg_class:$rs2, 668 (mask_type V0), 669 // Value to indicate no rounding mode change in 670 // RISCVInsertReadWriteCSR 671 FRM_DYN, 672 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 673 674multiclass VPatTiedBinaryNoMaskVL_V<SDNode vop, 675 string instruction_name, 676 string suffix, 677 ValueType result_type, 678 ValueType op2_type, 679 int sew, 680 LMULInfo vlmul, 681 VReg result_reg_class, 682 VReg op2_reg_class> { 683 def : Pat<(result_type (vop 684 (result_type result_reg_class:$rs1), 685 (op2_type op2_reg_class:$rs2), 686 srcvalue, 687 true_mask, 688 VLOpFrag)), 689 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 690 result_reg_class:$rs1, 691 op2_reg_class:$rs2, 692 GPR:$vl, sew, TAIL_AGNOSTIC)>; 693 // Tail undisturbed 694 def : Pat<(riscv_vmerge_vl true_mask, 695 (result_type (vop 696 result_reg_class:$rs1, 697 (op2_type op2_reg_class:$rs2), 698 srcvalue, 699 true_mask, 700 VLOpFrag)), 701 result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag), 702 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") 703 result_reg_class:$rs1, 704 op2_reg_class:$rs2, 705 GPR:$vl, sew, TU_MU)>; 706} 707 708class VPatTiedBinaryMaskVL_V<SDNode vop, 709 string instruction_name, 710 string suffix, 711 ValueType result_type, 712 ValueType op2_type, 713 ValueType mask_type, 714 int sew, 715 LMULInfo vlmul, 716 VReg result_reg_class, 717 VReg op2_reg_class> : 718 Pat<(result_type (vop 719 (result_type result_reg_class:$rs1), 720 (op2_type op2_reg_class:$rs2), 721 (result_type result_reg_class:$rs1), 722 (mask_type V0), 723 VLOpFrag)), 724 (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_MASK_TIED") 725 result_reg_class:$rs1, 726 op2_reg_class:$rs2, 727 (mask_type V0), GPR:$vl, sew, TU_MU)>; 728 729multiclass VPatTiedBinaryNoMaskVL_V_RM<SDNode vop, 730 string instruction_name, 731 string suffix, 732 ValueType result_type, 733 ValueType op2_type, 734 int log2sew, 735 LMULInfo vlmul, 736 VReg result_reg_class, 737 VReg op2_reg_class, 738 bit isSEWAware = 0> { 739 defvar name = !if(isSEWAware, 740 instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_TIED", 741 instruction_name#"_"#suffix#"_"#vlmul.MX#"_TIED"); 742 def : Pat<(result_type (vop 743 (result_type result_reg_class:$rs1), 744 (op2_type op2_reg_class:$rs2), 745 srcvalue, 746 true_mask, 747 VLOpFrag)), 748 (!cast<Instruction>(name) 749 result_reg_class:$rs1, 750 op2_reg_class:$rs2, 751 // Value to indicate no rounding mode change in 752 // RISCVInsertReadWriteCSR 753 FRM_DYN, 754 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 755 // Tail undisturbed 756 def : Pat<(riscv_vmerge_vl true_mask, 757 (result_type (vop 758 result_reg_class:$rs1, 759 (op2_type op2_reg_class:$rs2), 760 srcvalue, 761 true_mask, 762 VLOpFrag)), 763 result_reg_class:$rs1, result_reg_class:$rs1, VLOpFrag), 764 (!cast<Instruction>(name) 765 result_reg_class:$rs1, 766 op2_reg_class:$rs2, 767 // Value to indicate no rounding mode change in 768 // RISCVInsertReadWriteCSR 769 FRM_DYN, 770 GPR:$vl, log2sew, TU_MU)>; 771} 772 773class VPatBinaryVL_XI<SDPatternOperator vop, 774 string instruction_name, 775 string suffix, 776 ValueType result_type, 777 ValueType vop1_type, 778 ValueType vop2_type, 779 ValueType mask_type, 780 int log2sew, 781 LMULInfo vlmul, 782 VReg result_reg_class, 783 VReg vop_reg_class, 784 ComplexPattern SplatPatKind, 785 DAGOperand xop_kind, 786 bit isSEWAware = 0> 787 : Pat<(result_type (vop 788 (vop1_type vop_reg_class:$rs1), 789 (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), 790 (result_type result_reg_class:$passthru), 791 (mask_type V0), 792 VLOpFrag)), 793 (!cast<Instruction>( 794 !if(isSEWAware, 795 instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 796 instruction_name#_#suffix#_#vlmul.MX#"_MASK")) 797 result_reg_class:$passthru, 798 vop_reg_class:$rs1, 799 xop_kind:$rs2, 800 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 801 802multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name, 803 list<VTypeInfo> vtilist = AllIntegerVectors, 804 bit isSEWAware = 0> { 805 foreach vti = vtilist in { 806 let Predicates = GetVTypePredicates<vti>.Predicates in { 807 def : VPatBinaryVL_V<vop, instruction_name, "VV", 808 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 809 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 810 vti.RegClass, isSEWAware>; 811 def : VPatBinaryVL_XI<vop, instruction_name, "VX", 812 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 813 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 814 SplatPat, GPR, isSEWAware>; 815 } 816 } 817} 818 819multiclass VPatBinaryVL_VV_VX_VI<SDPatternOperator vop, string instruction_name, 820 Operand ImmType = simm5> 821 : VPatBinaryVL_VV_VX<vop, instruction_name> { 822 foreach vti = AllIntegerVectors in { 823 let Predicates = GetVTypePredicates<vti>.Predicates in 824 def : VPatBinaryVL_XI<vop, instruction_name, "VI", 825 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 826 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 827 !cast<ComplexPattern>(SplatPat#_#ImmType), 828 ImmType>; 829 } 830} 831 832multiclass VPatBinaryWVL_VV_VX<SDPatternOperator vop, string instruction_name> { 833 foreach VtiToWti = AllWidenableIntVectors in { 834 defvar vti = VtiToWti.Vti; 835 defvar wti = VtiToWti.Wti; 836 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 837 GetVTypePredicates<wti>.Predicates) in { 838 def : VPatBinaryVL_V<vop, instruction_name, "VV", 839 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 840 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 841 vti.RegClass>; 842 def : VPatBinaryVL_XI<vop, instruction_name, "VX", 843 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 844 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 845 SplatPat, GPR>; 846 } 847 } 848} 849 850multiclass VPatBinaryWVL_VV_VX_WV_WX<SDPatternOperator vop, SDNode vop_w, 851 string instruction_name> 852 : VPatBinaryWVL_VV_VX<vop, instruction_name> { 853 foreach VtiToWti = AllWidenableIntVectors in { 854 defvar vti = VtiToWti.Vti; 855 defvar wti = VtiToWti.Wti; 856 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 857 GetVTypePredicates<wti>.Predicates) in { 858 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 859 wti.Vector, vti.Vector, vti.Log2SEW, 860 vti.LMul, wti.RegClass, vti.RegClass>; 861 def : VPatTiedBinaryMaskVL_V<vop_w, instruction_name, "WV", 862 wti.Vector, vti.Vector, wti.Mask, 863 vti.Log2SEW, vti.LMul, wti.RegClass, 864 vti.RegClass>; 865 def : VPatBinaryVL_V<vop_w, instruction_name, "WV", 866 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 867 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 868 vti.RegClass>; 869 def : VPatBinaryVL_XI<vop_w, instruction_name, "WX", 870 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 871 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 872 SplatPat, GPR>; 873 } 874 } 875} 876 877class VPatBinaryVL_VF<SDPatternOperator vop, 878 string instruction_name, 879 ValueType result_type, 880 ValueType vop1_type, 881 ValueType vop2_type, 882 ValueType mask_type, 883 int log2sew, 884 LMULInfo vlmul, 885 VReg result_reg_class, 886 VReg vop_reg_class, 887 RegisterClass scalar_reg_class, 888 bit isSEWAware = 0> 889 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), 890 (vop2_type (SplatFPOp scalar_reg_class:$rs2)), 891 (result_type result_reg_class:$passthru), 892 (mask_type V0), 893 VLOpFrag)), 894 (!cast<Instruction>( 895 !if(isSEWAware, 896 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 897 instruction_name#"_"#vlmul.MX#"_MASK")) 898 result_reg_class:$passthru, 899 vop_reg_class:$rs1, 900 scalar_reg_class:$rs2, 901 (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 902 903class VPatBinaryVL_VF_RM<SDPatternOperator vop, 904 string instruction_name, 905 ValueType result_type, 906 ValueType vop1_type, 907 ValueType vop2_type, 908 ValueType mask_type, 909 int log2sew, 910 LMULInfo vlmul, 911 VReg result_reg_class, 912 VReg vop_reg_class, 913 RegisterClass scalar_reg_class, 914 bit isSEWAware = 0> 915 : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), 916 (vop2_type (SplatFPOp scalar_reg_class:$rs2)), 917 (result_type result_reg_class:$passthru), 918 (mask_type V0), 919 VLOpFrag)), 920 (!cast<Instruction>( 921 !if(isSEWAware, 922 instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", 923 instruction_name#"_"#vlmul.MX#"_MASK")) 924 result_reg_class:$passthru, 925 vop_reg_class:$rs1, 926 scalar_reg_class:$rs2, 927 (mask_type V0), 928 // Value to indicate no rounding mode change in 929 // RISCVInsertReadWriteCSR 930 FRM_DYN, 931 GPR:$vl, log2sew, TAIL_AGNOSTIC)>; 932 933multiclass VPatBinaryFPVL_VV_VF<SDPatternOperator vop, string instruction_name, 934 bit isSEWAware = 0> { 935 foreach vti = AllFloatVectors in { 936 let Predicates = GetVTypePredicates<vti>.Predicates in { 937 def : VPatBinaryVL_V<vop, instruction_name, "VV", 938 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 939 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 940 vti.RegClass, isSEWAware>; 941 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 942 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 943 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 944 vti.ScalarRegClass, isSEWAware>; 945 } 946 } 947} 948 949multiclass VPatBinaryFPVL_VV_VF_RM<SDPatternOperator vop, string instruction_name, 950 bit isSEWAware = 0> { 951 foreach vti = AllFloatVectors in { 952 let Predicates = GetVTypePredicates<vti>.Predicates in { 953 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV", 954 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 955 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 956 vti.RegClass, isSEWAware>; 957 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 958 vti.Vector, vti.Vector, vti.Vector, vti.Mask, 959 vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, 960 vti.ScalarRegClass, isSEWAware>; 961 } 962 } 963} 964 965multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name, 966 bit isSEWAware = 0> { 967 foreach fvti = AllFloatVectors in { 968 let Predicates = GetVTypePredicates<fvti>.Predicates in 969 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 970 fvti.RegClass:$rs1, 971 (fvti.Vector fvti.RegClass:$passthru), 972 (fvti.Mask V0), 973 VLOpFrag)), 974 (!cast<Instruction>( 975 !if(isSEWAware, 976 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", 977 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) 978 fvti.RegClass:$passthru, 979 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 980 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 981 } 982} 983 984multiclass VPatBinaryFPVL_R_VF_RM<SDPatternOperator vop, string instruction_name, 985 bit isSEWAware = 0> { 986 foreach fvti = AllFloatVectors in { 987 let Predicates = GetVTypePredicates<fvti>.Predicates in 988 def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 989 fvti.RegClass:$rs1, 990 (fvti.Vector fvti.RegClass:$passthru), 991 (fvti.Mask V0), 992 VLOpFrag)), 993 (!cast<Instruction>( 994 !if(isSEWAware, 995 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", 996 instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) 997 fvti.RegClass:$passthru, 998 fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, 999 (fvti.Mask V0), 1000 // Value to indicate no rounding mode change in 1001 // RISCVInsertReadWriteCSR 1002 FRM_DYN, 1003 GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; 1004 } 1005} 1006 1007multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name, 1008 CondCode cc> { 1009 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1010 vti.RegClass:$rs2, cc, 1011 VR:$passthru, 1012 (vti.Mask V0), 1013 VLOpFrag)), 1014 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 1015 VR:$passthru, 1016 vti.RegClass:$rs1, 1017 vti.RegClass:$rs2, 1018 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1019} 1020 1021// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped. 1022multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_name, 1023 CondCode cc, CondCode invcc> 1024 : VPatIntegerSetCCVL_VV<vti, instruction_name, cc> { 1025 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2), 1026 vti.RegClass:$rs1, invcc, 1027 VR:$passthru, 1028 (vti.Mask V0), 1029 VLOpFrag)), 1030 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") 1031 VR:$passthru, vti.RegClass:$rs1, 1032 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1033} 1034 1035multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name, 1036 CondCode cc, CondCode invcc> { 1037 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK"); 1038 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1039 (SplatPat (XLenVT GPR:$rs2)), cc, 1040 VR:$passthru, 1041 (vti.Mask V0), 1042 VLOpFrag)), 1043 (instruction_masked VR:$passthru, vti.RegClass:$rs1, 1044 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1045 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), 1046 (vti.Vector vti.RegClass:$rs1), invcc, 1047 VR:$passthru, 1048 (vti.Mask V0), 1049 VLOpFrag)), 1050 (instruction_masked VR:$passthru, vti.RegClass:$rs1, 1051 GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 1052} 1053 1054multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name, 1055 CondCode cc, CondCode invcc> { 1056 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 1057 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1058 (SplatPat_simm5 simm5:$rs2), cc, 1059 VR:$passthru, 1060 (vti.Mask V0), 1061 VLOpFrag)), 1062 (instruction_masked VR:$passthru, vti.RegClass:$rs1, 1063 XLenVT:$rs2, (vti.Mask V0), GPR:$vl, 1064 vti.Log2SEW)>; 1065 1066 // FIXME: Can do some canonicalization to remove these patterns. 1067 def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2), 1068 (vti.Vector vti.RegClass:$rs1), invcc, 1069 VR:$passthru, 1070 (vti.Mask V0), 1071 VLOpFrag)), 1072 (instruction_masked VR:$passthru, vti.RegClass:$rs1, 1073 simm5:$rs2, (vti.Mask V0), GPR:$vl, 1074 vti.Log2SEW)>; 1075} 1076 1077multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti, 1078 string instruction_name, 1079 CondCode cc, CondCode invcc, 1080 ComplexPattern splatpat_kind> { 1081 defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); 1082 def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), 1083 (splatpat_kind simm5:$rs2), cc, 1084 VR:$passthru, 1085 (vti.Mask V0), 1086 VLOpFrag)), 1087 (instruction_masked VR:$passthru, vti.RegClass:$rs1, 1088 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 1089 vti.Log2SEW)>; 1090 1091 // FIXME: Can do some canonicalization to remove these patterns. 1092 def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2), 1093 (vti.Vector vti.RegClass:$rs1), invcc, 1094 VR:$passthru, 1095 (vti.Mask V0), 1096 VLOpFrag)), 1097 (instruction_masked VR:$passthru, vti.RegClass:$rs1, 1098 (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl, 1099 vti.Log2SEW)>; 1100} 1101 1102multiclass VPatFPSetCCVL_VV_VF_FV<SDPatternOperator vop, CondCode cc, 1103 string inst_name, 1104 string swapped_op_inst_name> { 1105 foreach fvti = AllFloatVectors in { 1106 let Predicates = GetVTypePredicates<fvti>.Predicates in { 1107 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), 1108 fvti.RegClass:$rs2, 1109 cc, 1110 VR:$passthru, 1111 (fvti.Mask V0), 1112 VLOpFrag)), 1113 (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK") 1114 VR:$passthru, fvti.RegClass:$rs1, 1115 fvti.RegClass:$rs2, (fvti.Mask V0), 1116 GPR:$vl, fvti.Log2SEW)>; 1117 def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), 1118 (SplatFPOp fvti.ScalarRegClass:$rs2), 1119 cc, 1120 VR:$passthru, 1121 (fvti.Mask V0), 1122 VLOpFrag)), 1123 (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 1124 VR:$passthru, fvti.RegClass:$rs1, 1125 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 1126 GPR:$vl, fvti.Log2SEW)>; 1127 def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2), 1128 (fvti.Vector fvti.RegClass:$rs1), 1129 cc, 1130 VR:$passthru, 1131 (fvti.Mask V0), 1132 VLOpFrag)), 1133 (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") 1134 VR:$passthru, fvti.RegClass:$rs1, 1135 fvti.ScalarRegClass:$rs2, (fvti.Mask V0), 1136 GPR:$vl, fvti.Log2SEW)>; 1137 } 1138 } 1139} 1140 1141multiclass VPatExtendVL_V<SDNode vop, string inst_name, string suffix, 1142 list <VTypeInfoToFraction> fraction_list> { 1143 foreach vtiTofti = fraction_list in { 1144 defvar vti = vtiTofti.Vti; 1145 defvar fti = vtiTofti.Fti; 1146 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1147 GetVTypePredicates<fti>.Predicates) in 1148 def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), 1149 (fti.Mask V0), VLOpFrag)), 1150 (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK") 1151 (vti.Vector (IMPLICIT_DEF)), 1152 fti.RegClass:$rs2, 1153 (fti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1154 } 1155} 1156 1157// Single width converting 1158 1159multiclass VPatConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> { 1160 foreach fvti = AllFloatVectors in { 1161 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1162 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1163 GetVTypePredicates<ivti>.Predicates) in 1164 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1165 (fvti.Mask V0), 1166 VLOpFrag)), 1167 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1168 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1169 (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TA_MA)>; 1170 } 1171} 1172 1173 1174multiclass VPatConvertFP2I_RM_VL_V<SDPatternOperator vop, string instruction_name> { 1175 foreach fvti = AllFloatVectors in { 1176 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1177 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1178 GetVTypePredicates<ivti>.Predicates) in 1179 def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1180 (fvti.Mask V0), (XLenVT timm:$frm), 1181 VLOpFrag)), 1182 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK") 1183 (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1184 (fvti.Mask V0), timm:$frm, GPR:$vl, ivti.Log2SEW, 1185 TA_MA)>; 1186 } 1187} 1188 1189multiclass VPatConvertI2FPVL_V_RM<SDPatternOperator vop, string instruction_name> { 1190 foreach fvti = AllFloatVectors in { 1191 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1192 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1193 GetVTypePredicates<ivti>.Predicates) in 1194 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1195 (ivti.Mask V0), 1196 VLOpFrag)), 1197 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 1198 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1199 (ivti.Mask V0), 1200 // Value to indicate no rounding mode change in 1201 // RISCVInsertReadWriteCSR 1202 FRM_DYN, 1203 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1204 } 1205} 1206 1207multiclass VPatConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> { 1208 foreach fvti = AllFloatVectors in { 1209 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 1210 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1211 GetVTypePredicates<ivti>.Predicates) in 1212 def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1213 (ivti.Mask V0), (XLenVT timm:$frm), 1214 VLOpFrag)), 1215 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 1216 (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1217 (ivti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1218 } 1219} 1220 1221// Widening converting 1222 1223multiclass VPatWConvertFP2IVL_V<SDPatternOperator vop, string instruction_name> { 1224 foreach fvtiToFWti = AllWidenableFloatVectors in { 1225 defvar fvti = fvtiToFWti.Vti; 1226 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1227 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1228 GetVTypePredicates<iwti>.Predicates) in 1229 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1230 (fvti.Mask V0), 1231 VLOpFrag)), 1232 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1233 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1234 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 1235 } 1236} 1237 1238 1239multiclass VPatWConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> { 1240 foreach fvtiToFWti = AllWidenableFloatVectors in { 1241 defvar fvti = fvtiToFWti.Vti; 1242 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1243 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1244 GetVTypePredicates<iwti>.Predicates) in 1245 def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), 1246 (fvti.Mask V0), (XLenVT timm:$frm), 1247 VLOpFrag)), 1248 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK") 1249 (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 1250 (fvti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1251 } 1252} 1253 1254multiclass VPatWConvertI2FPVL_V<SDPatternOperator vop, 1255 string instruction_name> { 1256 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1257 defvar ivti = vtiToWti.Vti; 1258 defvar fwti = vtiToWti.Wti; 1259 let Predicates = !listconcat(GetVTypePredicates<ivti>.Predicates, 1260 GetVTypePredicates<fwti>.Predicates) in 1261 def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), 1262 (ivti.Mask V0), 1263 VLOpFrag)), 1264 (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_E"#ivti.SEW#"_MASK") 1265 (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, 1266 (ivti.Mask V0), 1267 GPR:$vl, ivti.Log2SEW, TA_MA)>; 1268 } 1269} 1270 1271// Narrowing converting 1272 1273multiclass VPatNConvertFP2IVL_W<SDPatternOperator vop, 1274 string instruction_name> { 1275 // Reuse the same list of types used in the widening nodes, but just swap the 1276 // direction of types around so we're converting from Wti -> Vti 1277 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1278 defvar vti = vtiToWti.Vti; 1279 defvar fwti = vtiToWti.Wti; 1280 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1281 GetVTypePredicates<fwti>.Predicates) in 1282 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1283 (fwti.Mask V0), 1284 VLOpFrag)), 1285 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1286 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1287 (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1288 } 1289} 1290 1291multiclass VPatNConvertFP2I_RM_VL_W<SDNode vop, string instruction_name> { 1292 foreach vtiToWti = AllWidenableIntToFloatVectors in { 1293 defvar vti = vtiToWti.Vti; 1294 defvar fwti = vtiToWti.Wti; 1295 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1296 GetVTypePredicates<fwti>.Predicates) in 1297 def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), 1298 (fwti.Mask V0), (XLenVT timm:$frm), 1299 VLOpFrag)), 1300 (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK") 1301 (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 1302 (fwti.Mask V0), timm:$frm, GPR:$vl, vti.Log2SEW, TA_MA)>; 1303 } 1304} 1305 1306multiclass VPatNConvertI2FPVL_W_RM<SDPatternOperator vop, 1307 string instruction_name> { 1308 foreach fvtiToFWti = AllWidenableFloatVectors in { 1309 defvar fvti = fvtiToFWti.Vti; 1310 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1311 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1312 GetVTypePredicates<iwti>.Predicates) in 1313 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 1314 (iwti.Mask V0), 1315 VLOpFrag)), 1316 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 1317 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 1318 (iwti.Mask V0), 1319 // Value to indicate no rounding mode change in 1320 // RISCVInsertReadWriteCSR 1321 FRM_DYN, 1322 GPR:$vl, fvti.Log2SEW, TA_MA)>; 1323 } 1324} 1325 1326multiclass VPatNConvertI2FP_RM_VL_W<SDNode vop, string instruction_name> { 1327 foreach fvtiToFWti = AllWidenableFloatVectors in { 1328 defvar fvti = fvtiToFWti.Vti; 1329 defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti; 1330 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 1331 GetVTypePredicates<iwti>.Predicates) in 1332 def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), 1333 (iwti.Mask V0), (XLenVT timm:$frm), 1334 VLOpFrag)), 1335 (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 1336 (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, 1337 (iwti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; 1338 } 1339} 1340 1341multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> { 1342 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 1343 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 1344 let Predicates = GetVTypePredicates<vti>.Predicates in { 1345 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$passthru), 1346 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1347 (vti.Mask V0), VLOpFrag, 1348 (XLenVT timm:$policy))), 1349 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1350 (vti_m1.Vector VR:$passthru), 1351 (vti.Vector vti.RegClass:$rs1), 1352 (vti_m1.Vector VR:$rs2), 1353 (vti.Mask V0), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1354 } 1355 } 1356} 1357 1358multiclass VPatReductionVL_RM<SDNode vop, string instruction_name, bit is_float> { 1359 foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { 1360 defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1"); 1361 let Predicates = GetVTypePredicates<vti>.Predicates in { 1362 def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$passthru), 1363 (vti.Vector vti.RegClass:$rs1), VR:$rs2, 1364 (vti.Mask V0), VLOpFrag, 1365 (XLenVT timm:$policy))), 1366 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1367 (vti_m1.Vector VR:$passthru), 1368 (vti.Vector vti.RegClass:$rs1), 1369 (vti_m1.Vector VR:$rs2), 1370 (vti.Mask V0), 1371 // Value to indicate no rounding mode change in 1372 // RISCVInsertReadWriteCSR 1373 FRM_DYN, 1374 GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; 1375 } 1376 } 1377} 1378 1379multiclass VPatBinaryVL_WV_WX_WI<SDNode op, string instruction_name> { 1380 foreach vtiToWti = AllWidenableIntVectors in { 1381 defvar vti = vtiToWti.Vti; 1382 defvar wti = vtiToWti.Wti; 1383 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1384 GetVTypePredicates<wti>.Predicates) in { 1385 def : Pat< 1386 (vti.Vector 1387 (riscv_trunc_vector_vl 1388 (op (wti.Vector wti.RegClass:$rs2), 1389 (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))), 1390 (vti.Mask true_mask), 1391 VLOpFrag)), 1392 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX) 1393 (vti.Vector (IMPLICIT_DEF)), 1394 wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1395 1396 def : Pat< 1397 (vti.Vector 1398 (riscv_trunc_vector_vl 1399 (op (wti.Vector wti.RegClass:$rs2), 1400 (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))), 1401 (vti.Mask true_mask), 1402 VLOpFrag)), 1403 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1404 (vti.Vector (IMPLICIT_DEF)), 1405 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1406 1407 def : Pat< 1408 (vti.Vector 1409 (riscv_trunc_vector_vl 1410 (op (wti.Vector wti.RegClass:$rs2), 1411 (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (vti.Mask true_mask), 1412 VLOpFrag)), 1413 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1414 (vti.Vector (IMPLICIT_DEF)), 1415 wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1416 } 1417 } 1418} 1419 1420multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1421 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1422 defvar vti = vtiToWti.Vti; 1423 defvar wti = vtiToWti.Wti; 1424 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1425 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1426 GetVTypePredicates<wti>.Predicates) in { 1427 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$passthru), 1428 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1429 VR:$rs2, (vti.Mask V0), VLOpFrag, 1430 (XLenVT timm:$policy))), 1431 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1432 (wti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1), 1433 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1434 (XLenVT timm:$policy))>; 1435 } 1436 } 1437} 1438 1439multiclass VPatWidenReductionVL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1440 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1441 defvar vti = vtiToWti.Vti; 1442 defvar wti = vtiToWti.Wti; 1443 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1444 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1445 GetVTypePredicates<wti>.Predicates) in { 1446 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$passthru), 1447 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), 1448 VR:$rs2, (vti.Mask V0), VLOpFrag, 1449 (XLenVT timm:$policy))), 1450 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1451 (wti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1), 1452 (wti_m1.Vector VR:$rs2), (vti.Mask V0), 1453 // Value to indicate no rounding mode change in 1454 // RISCVInsertReadWriteCSR 1455 FRM_DYN, 1456 GPR:$vl, vti.Log2SEW, 1457 (XLenVT timm:$policy))>; 1458 } 1459 } 1460} 1461 1462multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1463 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1464 defvar vti = vtiToWti.Vti; 1465 defvar wti = vtiToWti.Wti; 1466 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1467 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1468 GetVTypePredicates<wti>.Predicates) in { 1469 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$passthru), 1470 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1471 VR:$rs2, (vti.Mask V0), VLOpFrag, 1472 (XLenVT timm:$policy))), 1473 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1474 (wti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1), 1475 (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1476 (XLenVT timm:$policy))>; 1477 } 1478 } 1479} 1480 1481multiclass VPatWidenReductionVL_Ext_VL_RM<SDNode vop, PatFrags extop, string instruction_name, bit is_float> { 1482 foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { 1483 defvar vti = vtiToWti.Vti; 1484 defvar wti = vtiToWti.Wti; 1485 defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1"); 1486 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1487 GetVTypePredicates<wti>.Predicates) in { 1488 def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$passthru), 1489 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), 1490 VR:$rs2, (vti.Mask V0), VLOpFrag, 1491 (XLenVT timm:$policy))), 1492 (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") 1493 (wti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1), 1494 (wti_m1.Vector VR:$rs2), (vti.Mask V0), 1495 // Value to indicate no rounding mode change in 1496 // RISCVInsertReadWriteCSR 1497 FRM_DYN, 1498 GPR:$vl, vti.Log2SEW, 1499 (XLenVT timm:$policy))>; 1500 } 1501 } 1502} 1503 1504multiclass VPatBinaryFPWVL_VV_VF<SDNode vop, string instruction_name> { 1505 foreach fvtiToFWti = AllWidenableFloatVectors in { 1506 defvar vti = fvtiToFWti.Vti; 1507 defvar wti = fvtiToFWti.Wti; 1508 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1509 GetVTypePredicates<wti>.Predicates) in { 1510 def : VPatBinaryVL_V<vop, instruction_name, "VV", 1511 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1512 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1513 vti.RegClass>; 1514 def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix, 1515 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1516 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1517 vti.ScalarRegClass>; 1518 } 1519 } 1520} 1521 1522multiclass VPatBinaryFPWVL_VV_VF_RM<SDNode vop, string instruction_name, 1523 bit isSEWAware = 0> { 1524 foreach fvtiToFWti = AllWidenableFloatVectors in { 1525 defvar vti = fvtiToFWti.Vti; 1526 defvar wti = fvtiToFWti.Wti; 1527 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1528 GetVTypePredicates<wti>.Predicates) in { 1529 def : VPatBinaryVL_V_RM<vop, instruction_name, "VV", 1530 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1531 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1532 vti.RegClass, isSEWAware>; 1533 def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix, 1534 wti.Vector, vti.Vector, vti.Vector, vti.Mask, 1535 vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass, 1536 vti.ScalarRegClass, isSEWAware>; 1537 } 1538 } 1539} 1540 1541multiclass VPatBinaryFPWVL_VV_VF_WV_WF<SDNode vop, SDNode vop_w, string instruction_name> 1542 : VPatBinaryFPWVL_VV_VF<vop, instruction_name> { 1543 foreach fvtiToFWti = AllWidenableFloatVectors in { 1544 defvar vti = fvtiToFWti.Vti; 1545 defvar wti = fvtiToFWti.Wti; 1546 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1547 GetVTypePredicates<wti>.Predicates) in { 1548 defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV", 1549 wti.Vector, vti.Vector, vti.Log2SEW, 1550 vti.LMul, wti.RegClass, vti.RegClass>; 1551 def : VPatBinaryVL_V<vop_w, instruction_name, "WV", 1552 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1553 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1554 vti.RegClass>; 1555 def : VPatBinaryVL_VF<vop_w, instruction_name#"_W"#vti.ScalarSuffix, 1556 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1557 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1558 vti.ScalarRegClass>; 1559 } 1560 } 1561} 1562 1563multiclass VPatBinaryFPWVL_VV_VF_WV_WF_RM< 1564 SDNode vop, SDNode vop_w, string instruction_name, bit isSEWAware = 0> 1565 : VPatBinaryFPWVL_VV_VF_RM<vop, instruction_name, isSEWAware> { 1566 foreach fvtiToFWti = AllWidenableFloatVectors in { 1567 defvar vti = fvtiToFWti.Vti; 1568 defvar wti = fvtiToFWti.Wti; 1569 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1570 GetVTypePredicates<wti>.Predicates) in { 1571 defm : VPatTiedBinaryNoMaskVL_V_RM<vop_w, instruction_name, "WV", 1572 wti.Vector, vti.Vector, vti.Log2SEW, 1573 vti.LMul, wti.RegClass, vti.RegClass, 1574 isSEWAware>; 1575 def : VPatBinaryVL_V_RM<vop_w, instruction_name, "WV", 1576 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1577 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1578 vti.RegClass, isSEWAware>; 1579 def : VPatBinaryVL_VF_RM<vop_w, instruction_name#"_W"#vti.ScalarSuffix, 1580 wti.Vector, wti.Vector, vti.Vector, vti.Mask, 1581 vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass, 1582 vti.ScalarRegClass, isSEWAware>; 1583 } 1584 } 1585} 1586 1587multiclass VPatNarrowShiftSplatExt_WX<SDNode op, PatFrags extop, string instruction_name> { 1588 foreach vtiToWti = AllWidenableIntVectors in { 1589 defvar vti = vtiToWti.Vti; 1590 defvar wti = vtiToWti.Wti; 1591 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1592 GetVTypePredicates<wti>.Predicates) in 1593 def : Pat< 1594 (vti.Vector 1595 (riscv_trunc_vector_vl 1596 (op (wti.Vector wti.RegClass:$rs2), 1597 (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1))), 1598 (vti.Mask true_mask), VLOpFrag)), 1599 srcvalue, (wti.Mask true_mask), VLOpFrag), 1600 (vti.Mask true_mask), VLOpFrag)), 1601 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1602 (vti.Vector (IMPLICIT_DEF)), 1603 wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 1604 } 1605} 1606 1607multiclass VPatNarrowShiftExtVL_WV<SDNode op, PatFrags extop, string instruction_name> { 1608 foreach vtiToWti = AllWidenableIntVectors in { 1609 defvar vti = vtiToWti.Vti; 1610 defvar wti = vtiToWti.Wti; 1611 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1612 GetVTypePredicates<wti>.Predicates) in 1613 def : Pat< 1614 (vti.Vector 1615 (riscv_trunc_vector_vl 1616 (op (wti.Vector wti.RegClass:$rs2), 1617 (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), 1618 (vti.Mask true_mask), VLOpFrag)), 1619 srcvalue, (vti.Mask true_mask), VLOpFrag), 1620 (vti.Mask V0), VLOpFrag)), 1621 (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_MASK") 1622 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.RegClass:$rs1, 1623 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1624 } 1625} 1626 1627multiclass VPatNarrowShiftVL_WV<SDNode op, string instruction_name> { 1628 defm : VPatNarrowShiftExtVL_WV<op, riscv_sext_vl_oneuse, instruction_name>; 1629 defm : VPatNarrowShiftExtVL_WV<op, riscv_zext_vl_oneuse, instruction_name>; 1630} 1631 1632multiclass VPatMultiplyAddVL_VV_VX<SDNode op, string instruction_name> { 1633 foreach vti = AllIntegerVectors in { 1634 defvar suffix = vti.LMul.MX; 1635 let Predicates = GetVTypePredicates<vti>.Predicates in { 1636 // NOTE: We choose VMADD because it has the most commuting freedom. So it 1637 // works best with how TwoAddressInstructionPass tries commuting. 1638 def : Pat<(vti.Vector 1639 (op vti.RegClass:$rs2, 1640 (riscv_mul_vl_oneuse vti.RegClass:$rs1, 1641 vti.RegClass:$rd, 1642 srcvalue, (vti.Mask true_mask), VLOpFrag), 1643 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1644 (!cast<Instruction>(instruction_name#"_VV_"# suffix) 1645 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1646 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1647 // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally 1648 // commutable. 1649 def : Pat<(vti.Vector 1650 (op vti.RegClass:$rs2, 1651 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), 1652 vti.RegClass:$rd, 1653 srcvalue, (vti.Mask true_mask), VLOpFrag), 1654 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1655 (!cast<Instruction>(instruction_name#"_VX_" # suffix) 1656 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1657 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1658 } 1659 } 1660} 1661 1662multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> { 1663 foreach vti = AllIntegerVectors in { 1664 defvar suffix = vti.LMul.MX; 1665 let Predicates = GetVTypePredicates<vti>.Predicates in { 1666 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1667 (vti.Vector (op vti.RegClass:$rd, 1668 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1669 srcvalue, (vti.Mask true_mask), VLOpFrag), 1670 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1671 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1672 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1673 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1674 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1675 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1676 (vti.Vector (op vti.RegClass:$rd, 1677 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1678 srcvalue, (vti.Mask true_mask), VLOpFrag), 1679 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1680 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1681 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1682 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1683 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1684 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1685 (vti.Vector (op vti.RegClass:$rd, 1686 (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2, 1687 srcvalue, (vti.Mask true_mask), VLOpFrag), 1688 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1689 vti.RegClass:$rd, undef, VLOpFrag), 1690 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1691 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1692 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1693 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1694 (vti.Vector (op vti.RegClass:$rd, 1695 (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, 1696 srcvalue, (vti.Mask true_mask), VLOpFrag), 1697 srcvalue, (vti.Mask true_mask), VLOpFrag)), 1698 vti.RegClass:$rd, undef, VLOpFrag), 1699 (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK") 1700 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1701 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1702 } 1703 } 1704} 1705 1706multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name> { 1707 foreach vtiTowti = AllWidenableIntVectors in { 1708 defvar vti = vtiTowti.Vti; 1709 defvar wti = vtiTowti.Wti; 1710 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1711 GetVTypePredicates<wti>.Predicates) in { 1712 def : Pat<(vwmacc_op (vti.Vector vti.RegClass:$rs1), 1713 (vti.Vector vti.RegClass:$rs2), 1714 (wti.Vector wti.RegClass:$rd), 1715 (vti.Mask V0), VLOpFrag), 1716 (!cast<Instruction>(instr_name#"_VV_"#vti.LMul.MX#"_MASK") 1717 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1718 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1719 def : Pat<(vwmacc_op (SplatPat XLenVT:$rs1), 1720 (vti.Vector vti.RegClass:$rs2), 1721 (wti.Vector wti.RegClass:$rd), 1722 (vti.Mask V0), VLOpFrag), 1723 (!cast<Instruction>(instr_name#"_VX_"#vti.LMul.MX#"_MASK") 1724 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, 1725 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1726 TAIL_AGNOSTIC)>; 1727 } 1728 } 1729} 1730 1731multiclass VPatNarrowShiftSplat_WX_WI<SDNode op, string instruction_name> { 1732 foreach vtiTowti = AllWidenableIntVectors in { 1733 defvar vti = vtiTowti.Vti; 1734 defvar wti = vtiTowti.Wti; 1735 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1736 GetVTypePredicates<wti>.Predicates) in { 1737 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1738 (wti.Vector (op wti.RegClass:$rs1, (SplatPat XLenVT:$rs2), 1739 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1740 (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX) 1741 (vti.Vector (IMPLICIT_DEF)), 1742 wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 1743 def : Pat<(vti.Vector (riscv_trunc_vector_vl 1744 (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), 1745 srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), 1746 (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX) 1747 (vti.Vector (IMPLICIT_DEF)), 1748 wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 1749 } 1750 } 1751} 1752 1753multiclass VPatFPMulAddVL_VV_VF<SDPatternOperator vop, string instruction_name> { 1754 foreach vti = AllFloatVectors in { 1755 defvar suffix = vti.LMul.MX; 1756 let Predicates = GetVTypePredicates<vti>.Predicates in { 1757 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1758 vti.RegClass:$rs2, (vti.Mask V0), 1759 VLOpFrag)), 1760 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1761 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1762 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1763 1764 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1765 vti.RegClass:$rd, vti.RegClass:$rs2, 1766 (vti.Mask V0), 1767 VLOpFrag)), 1768 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1769 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1770 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1771 } 1772 } 1773} 1774 1775multiclass VPatFPMulAddVL_VV_VF_RM<SDPatternOperator vop, string instruction_name> { 1776 foreach vti = AllFloatVectors in { 1777 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 1778 let Predicates = GetVTypePredicates<vti>.Predicates in { 1779 def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, 1780 vti.RegClass:$rs2, (vti.Mask V0), 1781 VLOpFrag)), 1782 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1783 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1784 (vti.Mask V0), 1785 // Value to indicate no rounding mode change in 1786 // RISCVInsertReadWriteCSR 1787 FRM_DYN, 1788 GPR:$vl, vti.Log2SEW, TA_MA)>; 1789 1790 def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), 1791 vti.RegClass:$rd, vti.RegClass:$rs2, 1792 (vti.Mask V0), 1793 VLOpFrag)), 1794 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1795 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1796 (vti.Mask V0), 1797 // Value to indicate no rounding mode change in 1798 // RISCVInsertReadWriteCSR 1799 FRM_DYN, 1800 GPR:$vl, vti.Log2SEW, TA_MA)>; 1801 } 1802 } 1803} 1804 1805multiclass VPatFPMulAccVL_VV_VF<PatFrag vop, string instruction_name> { 1806 foreach vti = AllFloatVectors in { 1807 defvar suffix = vti.LMul.MX; 1808 let Predicates = GetVTypePredicates<vti>.Predicates in { 1809 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1810 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1811 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1812 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1813 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1814 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1815 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1816 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1817 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1818 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1819 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1820 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1821 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1822 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1823 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1824 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1825 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1826 vti.RegClass:$rd, undef, VLOpFrag), 1827 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1828 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1829 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1830 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1831 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1832 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1833 vti.RegClass:$rd, undef, VLOpFrag), 1834 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1835 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1836 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1837 } 1838 } 1839} 1840 1841multiclass VPatFPMulAccVL_VV_VF_RM<PatFrag vop, string instruction_name> { 1842 foreach vti = AllFloatVectors in { 1843 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 1844 let Predicates = GetVTypePredicates<vti>.Predicates in { 1845 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1846 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1847 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1848 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1849 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1850 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1851 (vti.Mask V0), 1852 // Value to indicate no rounding mode change in 1853 // RISCVInsertReadWriteCSR 1854 FRM_DYN, 1855 GPR:$vl, vti.Log2SEW, TU_MU)>; 1856 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1857 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1858 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1859 vti.RegClass:$rd, vti.RegClass:$rd, VLOpFrag), 1860 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1861 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1862 (vti.Mask V0), 1863 // Value to indicate no rounding mode change in 1864 // RISCVInsertReadWriteCSR 1865 FRM_DYN, 1866 GPR:$vl, vti.Log2SEW, TU_MU)>; 1867 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1868 (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2, 1869 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1870 vti.RegClass:$rd, undef, VLOpFrag), 1871 (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK") 1872 vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1873 (vti.Mask V0), 1874 // Value to indicate no rounding mode change in 1875 // RISCVInsertReadWriteCSR 1876 FRM_DYN, 1877 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1878 def : Pat<(riscv_vmerge_vl (vti.Mask V0), 1879 (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2, 1880 vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)), 1881 vti.RegClass:$rd, undef, VLOpFrag), 1882 (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") 1883 vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1884 (vti.Mask V0), 1885 // Value to indicate no rounding mode change in 1886 // RISCVInsertReadWriteCSR 1887 FRM_DYN, 1888 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 1889 } 1890 } 1891} 1892 1893multiclass VPatWidenFPMulAccVL_VV_VF<SDNode vop, string instruction_name> { 1894 foreach vtiToWti = AllWidenableFloatVectors in { 1895 defvar vti = vtiToWti.Vti; 1896 defvar wti = vtiToWti.Wti; 1897 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1898 GetVTypePredicates<wti>.Predicates) in { 1899 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 1900 (vti.Vector vti.RegClass:$rs2), 1901 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1902 VLOpFrag), 1903 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX #"_MASK") 1904 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1905 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1906 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 1907 (vti.Vector vti.RegClass:$rs2), 1908 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1909 VLOpFrag), 1910 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX #"_MASK") 1911 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1912 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 1913 } 1914 } 1915} 1916 1917multiclass VPatWidenFPMulAccVL_VV_VF_RM<SDNode vop, string instruction_name, 1918 list<VTypeInfoToWide> vtiToWtis = 1919 AllWidenableFloatVectors> { 1920 foreach vtiToWti = vtiToWtis in { 1921 defvar vti = vtiToWti.Vti; 1922 defvar wti = vtiToWti.Wti; 1923 defvar suffix = vti.LMul.MX # "_E" # vti.SEW; 1924 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 1925 GetVTypePredicates<wti>.Predicates, 1926 !if(!eq(vti.Scalar, bf16), 1927 [HasStdExtZvfbfwma], 1928 [])) in { 1929 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 1930 (vti.Vector vti.RegClass:$rs2), 1931 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1932 VLOpFrag), 1933 (!cast<Instruction>(instruction_name#"_VV_"#suffix#"_MASK") 1934 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, 1935 (vti.Mask V0), 1936 // Value to indicate no rounding mode change in 1937 // RISCVInsertReadWriteCSR 1938 FRM_DYN, 1939 GPR:$vl, vti.Log2SEW, TA_MA)>; 1940 def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), 1941 (vti.Vector vti.RegClass:$rs2), 1942 (wti.Vector wti.RegClass:$rd), (vti.Mask V0), 1943 VLOpFrag), 1944 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix#"_MASK") 1945 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, 1946 (vti.Mask V0), 1947 // Value to indicate no rounding mode change in 1948 // RISCVInsertReadWriteCSR 1949 FRM_DYN, 1950 GPR:$vl, vti.Log2SEW, TA_MA)>; 1951 } 1952 } 1953} 1954 1955multiclass VPatSlideVL_VX_VI<SDNode vop, string instruction_name> { 1956 foreach vti = AllVectors in { 1957 defvar ivti = GetIntVTypeInfo<vti>.Vti; 1958 let Predicates = GetVTypePredicates<ivti>.Predicates in { 1959 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd), 1960 (vti.Vector vti.RegClass:$rs1), 1961 uimm5:$rs2, (vti.Mask V0), 1962 VLOpFrag, (XLenVT timm:$policy))), 1963 (!cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK") 1964 vti.RegClass:$rd, vti.RegClass:$rs1, uimm5:$rs2, 1965 (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1966 (XLenVT timm:$policy))>; 1967 1968 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd), 1969 (vti.Vector vti.RegClass:$rs1), 1970 GPR:$rs2, (vti.Mask V0), 1971 VLOpFrag, (XLenVT timm:$policy))), 1972 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") 1973 vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, 1974 (vti.Mask V0), GPR:$vl, vti.Log2SEW, 1975 (XLenVT timm:$policy))>; 1976 } 1977 } 1978} 1979 1980multiclass VPatSlide1VL_VX<SDNode vop, string instruction_name> { 1981 foreach vti = AllIntegerVectors in { 1982 let Predicates = GetVTypePredicates<vti>.Predicates in { 1983 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3), 1984 (vti.Vector vti.RegClass:$rs1), 1985 GPR:$rs2, (vti.Mask V0), VLOpFrag)), 1986 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") 1987 vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, 1988 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 1989 } 1990 } 1991} 1992 1993multiclass VPatSlide1VL_VF<SDNode vop, string instruction_name> { 1994 foreach vti = AllFloatVectors in { 1995 let Predicates = GetVTypePredicates<vti>.Predicates in { 1996 def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3), 1997 (vti.Vector vti.RegClass:$rs1), 1998 vti.Scalar:$rs2, (vti.Mask V0), VLOpFrag)), 1999 (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_MASK") 2000 vti.RegClass:$rs3, vti.RegClass:$rs1, vti.Scalar:$rs2, 2001 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; 2002 } 2003 } 2004} 2005 2006multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> { 2007 foreach vti = AllIntegerVectors in { 2008 let Predicates = GetVTypePredicates<vti>.Predicates in { 2009 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 2010 (vti.Vector vti.RegClass:$rs2), 2011 vti.RegClass:$passthru, (vti.Mask V0), VLOpFrag), 2012 (!cast<Instruction>("PseudoVAADD"#suffix#"_VV_"#vti.LMul.MX#"_MASK") 2013 vti.RegClass:$passthru, vti.RegClass:$rs1, vti.RegClass:$rs2, 2014 (vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2015 def : Pat<(vop (vti.Vector vti.RegClass:$rs1), 2016 (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 2017 vti.RegClass:$passthru, (vti.Mask V0), VLOpFrag), 2018 (!cast<Instruction>("PseudoVAADD"#suffix#"_VX_"#vti.LMul.MX#"_MASK") 2019 vti.RegClass:$passthru, vti.RegClass:$rs1, GPR:$rs2, 2020 (vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2021 } 2022 } 2023} 2024 2025//===----------------------------------------------------------------------===// 2026// Patterns. 2027//===----------------------------------------------------------------------===// 2028 2029// 11. Vector Integer Arithmetic Instructions 2030 2031// 11.1. Vector Single-Width Integer Add and Subtract 2032defm : VPatBinaryVL_VV_VX_VI<riscv_add_vl, "PseudoVADD">; 2033defm : VPatBinaryVL_VV_VX<riscv_sub_vl, "PseudoVSUB">; 2034// Handle VRSUB specially since it's the only integer binary op with reversed 2035// pattern operands 2036foreach vti = AllIntegerVectors in { 2037 let Predicates = GetVTypePredicates<vti>.Predicates in { 2038 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))), 2039 (vti.Vector vti.RegClass:$rs1), 2040 vti.RegClass:$passthru, (vti.Mask V0), VLOpFrag), 2041 (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK") 2042 vti.RegClass:$passthru, vti.RegClass:$rs1, GPR:$rs2, 2043 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2044 def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), 2045 (vti.Vector vti.RegClass:$rs1), 2046 vti.RegClass:$passthru, (vti.Mask V0), VLOpFrag), 2047 (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK") 2048 vti.RegClass:$passthru, vti.RegClass:$rs1, simm5:$rs2, 2049 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2050 } 2051} 2052 2053// 11.2. Vector Widening Integer Add/Subtract 2054defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">; 2055defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">; 2056defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsub_vl, riscv_vwsub_w_vl, "PseudoVWSUB">; 2057defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsubu_vl, riscv_vwsubu_w_vl, "PseudoVWSUBU">; 2058 2059// shl_vl (ext_vl v, splat 1) is a special case of widening add. 2060foreach vtiToWti = AllWidenableIntVectors in { 2061 defvar vti = vtiToWti.Vti; 2062 defvar wti = vtiToWti.Wti; 2063 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2064 GetVTypePredicates<wti>.Predicates) in { 2065 def : Pat<(riscv_shl_vl (wti.Vector (riscv_sext_vl_oneuse 2066 (vti.Vector vti.RegClass:$rs1), 2067 (vti.Mask V0), VLOpFrag)), 2068 (wti.Vector (riscv_vmv_v_x_vl 2069 (wti.Vector undef), 1, VLOpFrag)), 2070 wti.RegClass:$passthru, (vti.Mask V0), VLOpFrag), 2071 (!cast<Instruction>("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK") 2072 wti.RegClass:$passthru, vti.RegClass:$rs1, vti.RegClass:$rs1, 2073 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2074 def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse 2075 (vti.Vector vti.RegClass:$rs1), 2076 (vti.Mask V0), VLOpFrag)), 2077 (wti.Vector (riscv_vmv_v_x_vl 2078 (wti.Vector undef), 1, VLOpFrag)), 2079 wti.RegClass:$passthru, (vti.Mask V0), VLOpFrag), 2080 (!cast<Instruction>("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK") 2081 wti.RegClass:$passthru, vti.RegClass:$rs1, vti.RegClass:$rs1, 2082 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2083 } 2084} 2085 2086// 11.3. Vector Integer Extension 2087defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF2", 2088 AllFractionableVF2IntVectors>; 2089defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF2", 2090 AllFractionableVF2IntVectors>; 2091defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF4", 2092 AllFractionableVF4IntVectors>; 2093defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF4", 2094 AllFractionableVF4IntVectors>; 2095defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF8", 2096 AllFractionableVF8IntVectors>; 2097defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF8", 2098 AllFractionableVF8IntVectors>; 2099 2100// 11.5. Vector Bitwise Logical Instructions 2101defm : VPatBinaryVL_VV_VX_VI<riscv_and_vl, "PseudoVAND">; 2102defm : VPatBinaryVL_VV_VX_VI<riscv_or_vl, "PseudoVOR">; 2103defm : VPatBinaryVL_VV_VX_VI<riscv_xor_vl, "PseudoVXOR">; 2104 2105// 11.6. Vector Single-Width Bit Shift Instructions 2106defm : VPatBinaryVL_VV_VX_VI<riscv_shl_vl, "PseudoVSLL", uimm5>; 2107defm : VPatBinaryVL_VV_VX_VI<riscv_srl_vl, "PseudoVSRL", uimm5>; 2108defm : VPatBinaryVL_VV_VX_VI<riscv_sra_vl, "PseudoVSRA", uimm5>; 2109 2110foreach vti = AllIntegerVectors in { 2111 // Emit shift by 1 as an add since it might be faster. 2112 let Predicates = GetVTypePredicates<vti>.Predicates in 2113 def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1), 2114 (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)), 2115 srcvalue, (vti.Mask true_mask), VLOpFrag), 2116 (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX) 2117 (vti.Vector (IMPLICIT_DEF)), 2118 vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; 2119} 2120 2121// 11.7. Vector Narrowing Integer Right Shift Instructions 2122defm : VPatBinaryVL_WV_WX_WI<srl, "PseudoVNSRL">; 2123defm : VPatBinaryVL_WV_WX_WI<sra, "PseudoVNSRA">; 2124 2125defm : VPatNarrowShiftSplat_WX_WI<riscv_sra_vl, "PseudoVNSRA">; 2126defm : VPatNarrowShiftSplat_WX_WI<riscv_srl_vl, "PseudoVNSRL">; 2127defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_sext_vl_oneuse, "PseudoVNSRA">; 2128defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_zext_vl_oneuse, "PseudoVNSRA">; 2129defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_sext_vl_oneuse, "PseudoVNSRL">; 2130defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_zext_vl_oneuse, "PseudoVNSRL">; 2131 2132defm : VPatNarrowShiftVL_WV<riscv_srl_vl, "PseudoVNSRL">; 2133defm : VPatNarrowShiftVL_WV<riscv_sra_vl, "PseudoVNSRA">; 2134 2135foreach vtiTowti = AllWidenableIntVectors in { 2136 defvar vti = vtiTowti.Vti; 2137 defvar wti = vtiTowti.Wti; 2138 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2139 GetVTypePredicates<wti>.Predicates) in 2140 def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), 2141 (vti.Mask V0), 2142 VLOpFrag)), 2143 (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX#"_MASK") 2144 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2145 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2146} 2147 2148// 11.8. Vector Integer Comparison Instructions 2149foreach vti = AllIntegerVectors in { 2150 let Predicates = GetVTypePredicates<vti>.Predicates in { 2151 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSEQ", SETEQ>; 2152 defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSNE", SETNE>; 2153 2154 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 2155 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 2156 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2157 defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2158 2159 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 2160 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 2161 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>; 2162 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>; 2163 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2164 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2165 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 2166 defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 2167 // There is no VMSGE(U)_VX instruction 2168 2169 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>; 2170 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>; 2171 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>; 2172 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>; 2173 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>; 2174 defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>; 2175 2176 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLE", SETLT, SETGT, 2177 SplatPat_simm5_plus1>; 2178 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLEU", SETULT, SETUGT, 2179 SplatPat_simm5_plus1_nonzero>; 2180 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGT", SETGE, SETLE, 2181 SplatPat_simm5_plus1>; 2182 defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGTU", SETUGE, SETULE, 2183 SplatPat_simm5_plus1_nonzero>; 2184 } 2185} // foreach vti = AllIntegerVectors 2186 2187// 11.9. Vector Integer Min/Max Instructions 2188defm : VPatBinaryVL_VV_VX<riscv_umin_vl, "PseudoVMINU">; 2189defm : VPatBinaryVL_VV_VX<riscv_smin_vl, "PseudoVMIN">; 2190defm : VPatBinaryVL_VV_VX<riscv_umax_vl, "PseudoVMAXU">; 2191defm : VPatBinaryVL_VV_VX<riscv_smax_vl, "PseudoVMAX">; 2192 2193// 11.10. Vector Single-Width Integer Multiply Instructions 2194defm : VPatBinaryVL_VV_VX<riscv_mul_vl, "PseudoVMUL">; 2195defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", IntegerVectorsExceptI64>; 2196defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", IntegerVectorsExceptI64>; 2197// vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*. 2198let Predicates = [HasVInstructionsFullMultiply] in { 2199 defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH", I64IntegerVectors>; 2200 defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU", I64IntegerVectors>; 2201} 2202 2203// 11.11. Vector Integer Divide Instructions 2204defm : VPatBinaryVL_VV_VX<riscv_udiv_vl, "PseudoVDIVU", isSEWAware=1>; 2205defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV", isSEWAware=1>; 2206defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU", isSEWAware=1>; 2207defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM", isSEWAware=1>; 2208 2209// 11.12. Vector Widening Integer Multiply Instructions 2210defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">; 2211defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">; 2212defm : VPatBinaryWVL_VV_VX<riscv_vwmulsu_vl, "PseudoVWMULSU">; 2213 2214// 11.13 Vector Single-Width Integer Multiply-Add Instructions 2215defm : VPatMultiplyAddVL_VV_VX<riscv_add_vl, "PseudoVMADD">; 2216defm : VPatMultiplyAddVL_VV_VX<riscv_sub_vl, "PseudoVNMSUB">; 2217defm : VPatMultiplyAccVL_VV_VX<riscv_add_vl_oneuse, "PseudoVMACC">; 2218defm : VPatMultiplyAccVL_VV_VX<riscv_sub_vl_oneuse, "PseudoVNMSAC">; 2219 2220// 11.14. Vector Widening Integer Multiply-Add Instructions 2221defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmacc_vl, "PseudoVWMACC">; 2222defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccu_vl, "PseudoVWMACCU">; 2223defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmaccsu_vl, "PseudoVWMACCSU">; 2224foreach vtiTowti = AllWidenableIntVectors in { 2225 defvar vti = vtiTowti.Vti; 2226 defvar wti = vtiTowti.Wti; 2227 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2228 GetVTypePredicates<wti>.Predicates) in 2229 def : Pat<(riscv_vwmaccsu_vl (vti.Vector vti.RegClass:$rs1), 2230 (SplatPat XLenVT:$rs2), 2231 (wti.Vector wti.RegClass:$rd), 2232 (vti.Mask V0), VLOpFrag), 2233 (!cast<Instruction>("PseudoVWMACCUS_VX_"#vti.LMul.MX#"_MASK") 2234 wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1, 2235 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2236} 2237 2238// 11.15. Vector Integer Merge Instructions 2239foreach vti = AllIntegerVectors in { 2240 let Predicates = GetVTypePredicates<vti>.Predicates in { 2241 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2242 vti.RegClass:$rs1, 2243 vti.RegClass:$rs2, 2244 vti.RegClass:$passthru, 2245 VLOpFrag)), 2246 (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX) 2247 vti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1, 2248 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2249 2250 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2251 (SplatPat XLenVT:$rs1), 2252 vti.RegClass:$rs2, 2253 vti.RegClass:$passthru, 2254 VLOpFrag)), 2255 (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX) 2256 vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1, 2257 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2258 2259 def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask V0), 2260 (SplatPat_simm5 simm5:$rs1), 2261 vti.RegClass:$rs2, 2262 vti.RegClass:$passthru, 2263 VLOpFrag)), 2264 (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX) 2265 vti.RegClass:$passthru, vti.RegClass:$rs2, simm5:$rs1, 2266 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; 2267 } 2268} 2269 2270// 11.16. Vector Integer Move Instructions 2271foreach vti = AllVectors in { 2272 defvar ivti = GetIntVTypeInfo<vti>.Vti; 2273 let Predicates = GetVTypePredicates<ivti>.Predicates in { 2274 def : Pat<(vti.Vector (riscv_vmv_v_v_vl vti.RegClass:$passthru, 2275 vti.RegClass:$rs2, VLOpFrag)), 2276 (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX) 2277 vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2278} 2279 2280foreach vti = AllIntegerVectors in { 2281 let Predicates = GetVTypePredicates<vti>.Predicates in { 2282 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, GPR:$rs2, VLOpFrag)), 2283 (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX) 2284 vti.RegClass:$passthru, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; 2285 defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5"); 2286 def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, (ImmPat simm5:$imm5), 2287 VLOpFrag)), 2288 (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX) 2289 vti.RegClass:$passthru, simm5:$imm5, GPR:$vl, vti.Log2SEW, TU_MU)>; 2290 } 2291 } 2292} 2293 2294// 12. Vector Fixed-Point Arithmetic Instructions 2295 2296// 12.1. Vector Single-Width Saturating Add and Subtract 2297defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">; 2298defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">; 2299defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">; 2300defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">; 2301 2302// 12.2. Vector Single-Width Averaging Add and Subtract 2303defm : VPatAVGADDVL_VV_VX_RM<riscv_avgfloors_vl, 0b10>; 2304defm : VPatAVGADDVL_VV_VX_RM<riscv_avgflooru_vl, 0b10, suffix="U">; 2305defm : VPatAVGADDVL_VV_VX_RM<riscv_avgceils_vl, 0b00>; 2306defm : VPatAVGADDVL_VV_VX_RM<riscv_avgceilu_vl, 0b00, suffix="U">; 2307 2308// 12.5. Vector Narrowing Fixed-Point Clip Instructions 2309foreach vtiTowti = AllWidenableIntVectors in { 2310 defvar vti = vtiTowti.Vti; 2311 defvar wti = vtiTowti.Wti; 2312 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2313 GetVTypePredicates<wti>.Predicates) in { 2314 // Rounding mode here is arbitrary since we aren't shifting out any bits. 2315 def : Pat<(vti.Vector (riscv_trunc_vector_vl_ssat (wti.Vector wti.RegClass:$rs1), 2316 (vti.Mask V0), 2317 VLOpFrag)), 2318 (!cast<Instruction>("PseudoVNCLIP_WI_"#vti.LMul.MX#"_MASK") 2319 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2320 (vti.Mask V0), /*RNU*/0, GPR:$vl, vti.Log2SEW, TA_MA)>; 2321 def : Pat<(vti.Vector (riscv_trunc_vector_vl_usat (wti.Vector wti.RegClass:$rs1), 2322 (vti.Mask V0), 2323 VLOpFrag)), 2324 (!cast<Instruction>("PseudoVNCLIPU_WI_"#vti.LMul.MX#"_MASK") 2325 (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, 2326 (vti.Mask V0), /*RNU*/0, GPR:$vl, vti.Log2SEW, TA_MA)>; 2327 } 2328} 2329 2330// 13. Vector Floating-Point Instructions 2331 2332// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions 2333defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fadd_vl, "PseudoVFADD", isSEWAware=1>; 2334defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fsub_vl, "PseudoVFSUB", isSEWAware=1>; 2335defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fsub_vl, "PseudoVFRSUB", isSEWAware=1>; 2336 2337// 13.3. Vector Widening Floating-Point Add/Subtract Instructions 2338defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwadd_vl, riscv_vfwadd_w_vl, 2339 "PseudoVFWADD", isSEWAware=1>; 2340defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM<riscv_vfwsub_vl, riscv_vfwsub_w_vl, 2341 "PseudoVFWSUB", isSEWAware=1>; 2342 2343// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions 2344defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fmul_vl, "PseudoVFMUL", isSEWAware=1>; 2345defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fdiv_vl, "PseudoVFDIV", isSEWAware=1>; 2346defm : VPatBinaryFPVL_R_VF_RM<any_riscv_fdiv_vl, "PseudoVFRDIV", isSEWAware=1>; 2347 2348// 13.5. Vector Widening Floating-Point Multiply Instructions 2349defm : VPatBinaryFPWVL_VV_VF_RM<riscv_vfwmul_vl, "PseudoVFWMUL", isSEWAware=1>; 2350 2351// 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. 2352defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmadd_vl, "PseudoVFMADD">; 2353defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfmsub_vl, "PseudoVFMSUB">; 2354defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmadd_vl, "PseudoVFNMADD">; 2355defm : VPatFPMulAddVL_VV_VF_RM<any_riscv_vfnmsub_vl, "PseudoVFNMSUB">; 2356defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmadd_vl_oneuse, "PseudoVFMACC">; 2357defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfmsub_vl_oneuse, "PseudoVFMSAC">; 2358defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmadd_vl_oneuse, "PseudoVFNMACC">; 2359defm : VPatFPMulAccVL_VV_VF_RM<riscv_vfnmsub_vl_oneuse, "PseudoVFNMSAC">; 2360 2361// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions 2362defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmadd_vl, "PseudoVFWMACC">; 2363defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmadd_vl, "PseudoVFWMACCBF16", 2364 AllWidenableBFloatToFloatVectors>; 2365defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmadd_vl, "PseudoVFWNMACC">; 2366defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwmsub_vl, "PseudoVFWMSAC">; 2367defm : VPatWidenFPMulAccVL_VV_VF_RM<riscv_vfwnmsub_vl, "PseudoVFWNMSAC">; 2368 2369// 13.11. Vector Floating-Point MIN/MAX Instructions 2370defm : VPatBinaryFPVL_VV_VF<riscv_vfmin_vl, "PseudoVFMIN", isSEWAware=1>; 2371defm : VPatBinaryFPVL_VV_VF<riscv_vfmax_vl, "PseudoVFMAX", isSEWAware=1>; 2372 2373// 13.13. Vector Floating-Point Compare Instructions 2374defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETEQ, 2375 "PseudoVMFEQ", "PseudoVMFEQ">; 2376defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETOEQ, 2377 "PseudoVMFEQ", "PseudoVMFEQ">; 2378defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETNE, 2379 "PseudoVMFNE", "PseudoVMFNE">; 2380defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetcc_vl, SETUNE, 2381 "PseudoVMFNE", "PseudoVMFNE">; 2382defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLT, 2383 "PseudoVMFLT", "PseudoVMFGT">; 2384defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLT, 2385 "PseudoVMFLT", "PseudoVMFGT">; 2386defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETLE, 2387 "PseudoVMFLE", "PseudoVMFGE">; 2388defm : VPatFPSetCCVL_VV_VF_FV<any_riscv_fsetccs_vl, SETOLE, 2389 "PseudoVMFLE", "PseudoVMFGE">; 2390 2391foreach vti = AllFloatVectors in { 2392 let Predicates = GetVTypePredicates<vti>.Predicates in { 2393 // 13.8. Vector Floating-Point Square-Root Instruction 2394 def : Pat<(any_riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0), 2395 VLOpFrag), 2396 (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX # "_E" # vti.SEW # "_MASK") 2397 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 2398 (vti.Mask V0), 2399 // Value to indicate no rounding mode change in 2400 // RISCVInsertReadWriteCSR 2401 FRM_DYN, 2402 GPR:$vl, vti.Log2SEW, TA_MA)>; 2403 2404 // 13.12. Vector Floating-Point Sign-Injection Instructions 2405 def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 2406 VLOpFrag), 2407 (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_E"#vti.SEW#"_MASK") 2408 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 2409 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2410 TA_MA)>; 2411 // Handle fneg with VFSGNJN using the same input for both operands. 2412 def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0), 2413 VLOpFrag), 2414 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW #"_MASK") 2415 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, 2416 vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2417 TA_MA)>; 2418 2419 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2420 (vti.Vector vti.RegClass:$rs2), 2421 vti.RegClass:$passthru, 2422 (vti.Mask V0), 2423 VLOpFrag), 2424 (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK") 2425 vti.RegClass:$passthru, vti.RegClass:$rs1, 2426 vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2427 TAIL_AGNOSTIC)>; 2428 2429 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2430 (riscv_fneg_vl vti.RegClass:$rs2, 2431 (vti.Mask true_mask), 2432 VLOpFrag), 2433 srcvalue, 2434 (vti.Mask true_mask), 2435 VLOpFrag), 2436 (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW) 2437 (vti.Vector (IMPLICIT_DEF)), 2438 vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; 2439 2440 def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), 2441 (SplatFPOp vti.ScalarRegClass:$rs2), 2442 vti.RegClass:$passthru, 2443 (vti.Mask V0), 2444 VLOpFrag), 2445 (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK") 2446 vti.RegClass:$passthru, vti.RegClass:$rs1, 2447 vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2448 TAIL_AGNOSTIC)>; 2449 2450 // Rounding without exception to implement nearbyint. 2451 def : Pat<(any_riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1), 2452 (vti.Mask V0), VLOpFrag), 2453 (!cast<Instruction>("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK") 2454 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, 2455 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2456 2457 // 14.14. Vector Floating-Point Classify Instruction 2458 def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), 2459 (vti.Mask V0), VLOpFrag), 2460 (!cast<Instruction>("PseudoVFCLASS_V_"# vti.LMul.MX #"_MASK") 2461 (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, 2462 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; 2463 } 2464} 2465 2466foreach fvti = !listconcat(AllFloatVectors, AllBFloatVectors) in { 2467 // Floating-point vselects: 2468 // 11.15. Vector Integer Merge Instructions 2469 // 13.15. Vector Floating-Point Merge Instruction 2470 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 2471 let Predicates = GetVTypePredicates<ivti>.Predicates in { 2472 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2473 fvti.RegClass:$rs1, 2474 fvti.RegClass:$rs2, 2475 fvti.RegClass:$passthru, 2476 VLOpFrag)), 2477 (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX) 2478 fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0), 2479 GPR:$vl, fvti.Log2SEW)>; 2480 2481 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2482 (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))), 2483 fvti.RegClass:$rs2, 2484 fvti.RegClass:$passthru, 2485 VLOpFrag)), 2486 (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX) 2487 fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask V0), 2488 GPR:$vl, fvti.Log2SEW)>; 2489 2490 2491 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2492 (SplatFPOp (fvti.Scalar fpimm0)), 2493 fvti.RegClass:$rs2, 2494 fvti.RegClass:$passthru, 2495 VLOpFrag)), 2496 (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX) 2497 fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, (fvti.Mask V0), 2498 GPR:$vl, fvti.Log2SEW)>; 2499 } 2500} 2501 2502foreach fvti = AllFloatVectors in { 2503 let Predicates = GetVTypePredicates<fvti>.Predicates in { 2504 def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask V0), 2505 (SplatFPOp fvti.ScalarRegClass:$rs1), 2506 fvti.RegClass:$rs2, 2507 fvti.RegClass:$passthru, 2508 VLOpFrag)), 2509 (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) 2510 fvti.RegClass:$passthru, fvti.RegClass:$rs2, 2511 (fvti.Scalar fvti.ScalarRegClass:$rs1), 2512 (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; 2513 } 2514} 2515 2516foreach fvti = !listconcat(AllFloatVectors, AllBFloatVectors) in { 2517 defvar ivti = GetIntVTypeInfo<fvti>.Vti; 2518 let Predicates = GetVTypePredicates<ivti>.Predicates in { 2519 // 13.16. Vector Floating-Point Move Instruction 2520 // If we're splatting fpimm0, use vmv.v.x vd, x0. 2521 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2522 fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), 2523 (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX) 2524 $passthru, 0, GPR:$vl, fvti.Log2SEW, TU_MU)>; 2525 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2526 fvti.Vector:$passthru, (fvti.Scalar (SelectScalarFPAsInt (XLenVT GPR:$imm))), VLOpFrag)), 2527 (!cast<Instruction>("PseudoVMV_V_X_"#fvti.LMul.MX) 2528 $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW, TU_MU)>; 2529 } 2530} 2531 2532foreach fvti = AllFloatVectors in { 2533 let Predicates = GetVTypePredicates<fvti>.Predicates in { 2534 def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl 2535 fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), 2536 (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # 2537 fvti.LMul.MX) 2538 $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), 2539 GPR:$vl, fvti.Log2SEW, TU_MU)>; 2540 } 2541} 2542 2543// 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions 2544defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFCVT_XU_F_V">; 2545defm : VPatConvertFP2I_RM_VL_V<any_riscv_vfcvt_rm_x_f_vl, "PseudoVFCVT_X_F_V">; 2546 2547defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFCVT_RTZ_XU_F_V">; 2548defm : VPatConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFCVT_RTZ_X_F_V">; 2549 2550defm : VPatConvertI2FPVL_V_RM<any_riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">; 2551defm : VPatConvertI2FPVL_V_RM<any_riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">; 2552 2553defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFCVT_F_XU_V">; 2554defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFCVT_F_X_V">; 2555 2556// 13.18. Widening Floating-Point/Integer Type-Convert Instructions 2557defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFWCVT_XU_F_V">; 2558defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFWCVT_X_F_V">; 2559 2560defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFWCVT_RTZ_XU_F_V">; 2561defm : VPatWConvertFP2IVL_V<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFWCVT_RTZ_X_F_V">; 2562 2563defm : VPatWConvertI2FPVL_V<any_riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">; 2564defm : VPatWConvertI2FPVL_V<any_riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">; 2565 2566foreach fvtiToFWti = AllWidenableFloatVectors in { 2567 defvar fvti = fvtiToFWti.Vti; 2568 defvar fwti = fvtiToFWti.Wti; 2569 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 2570 !listconcat(GetVTypePredicates<fvti>.Predicates, 2571 GetVTypePredicates<fwti>.Predicates)) in 2572 def : Pat<(fwti.Vector (any_riscv_fpextend_vl 2573 (fvti.Vector fvti.RegClass:$rs1), 2574 (fvti.Mask V0), 2575 VLOpFrag)), 2576 (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 2577 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 2578 (fvti.Mask V0), 2579 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2580} 2581 2582foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in { 2583 defvar fvti = fvtiToFWti.Vti; 2584 defvar fwti = fvtiToFWti.Wti; 2585 let Predicates = [HasVInstructionsBF16Minimal] in 2586 def : Pat<(fwti.Vector (any_riscv_fpextend_vl 2587 (fvti.Vector fvti.RegClass:$rs1), 2588 (fvti.Mask V0), 2589 VLOpFrag)), 2590 (!cast<Instruction>("PseudoVFWCVTBF16_F_F_V_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 2591 (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, 2592 (fvti.Mask V0), 2593 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2594} 2595 2596// 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions 2597defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_xu_f_vl, "PseudoVFNCVT_XU_F_W">; 2598defm : VPatNConvertFP2I_RM_VL_W<riscv_vfcvt_rm_x_f_vl, "PseudoVFNCVT_X_F_W">; 2599 2600defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_xu_f_vl, "PseudoVFNCVT_RTZ_XU_F_W">; 2601defm : VPatNConvertFP2IVL_W<any_riscv_vfcvt_rtz_x_f_vl, "PseudoVFNCVT_RTZ_X_F_W">; 2602 2603defm : VPatNConvertI2FPVL_W_RM<any_riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">; 2604defm : VPatNConvertI2FPVL_W_RM<any_riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">; 2605 2606defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_xu_vl, "PseudoVFNCVT_F_XU_W">; 2607defm : VPatNConvertI2FP_RM_VL_W<riscv_vfcvt_rm_f_x_vl, "PseudoVFNCVT_F_X_W">; 2608 2609foreach fvtiToFWti = AllWidenableFloatVectors in { 2610 defvar fvti = fvtiToFWti.Vti; 2611 defvar fwti = fvtiToFWti.Wti; 2612 // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable. 2613 let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], 2614 !listconcat(GetVTypePredicates<fvti>.Predicates, 2615 GetVTypePredicates<fwti>.Predicates)) in { 2616 def : Pat<(fvti.Vector (any_riscv_fpround_vl 2617 (fwti.Vector fwti.RegClass:$rs1), 2618 (fwti.Mask V0), VLOpFrag)), 2619 (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 2620 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2621 (fwti.Mask V0), 2622 // Value to indicate no rounding mode change in 2623 // RISCVInsertReadWriteCSR 2624 FRM_DYN, 2625 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2626 2627 let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates, 2628 GetVTypePredicates<fwti>.Predicates) in 2629 def : Pat<(fvti.Vector (any_riscv_fncvt_rod_vl 2630 (fwti.Vector fwti.RegClass:$rs1), 2631 (fwti.Mask V0), VLOpFrag)), 2632 (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 2633 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2634 (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; 2635 } 2636} 2637 2638foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in { 2639 defvar fvti = fvtiToFWti.Vti; 2640 defvar fwti = fvtiToFWti.Wti; 2641 let Predicates = [HasVInstructionsBF16Minimal] in 2642 def : Pat<(fvti.Vector (any_riscv_fpround_vl 2643 (fwti.Vector fwti.RegClass:$rs1), 2644 (fwti.Mask V0), VLOpFrag)), 2645 (!cast<Instruction>("PseudoVFNCVTBF16_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") 2646 (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, 2647 (fwti.Mask V0), 2648 // Value to indicate no rounding mode change in 2649 // RISCVInsertReadWriteCSR 2650 FRM_DYN, 2651 GPR:$vl, fvti.Log2SEW, TA_MA)>; 2652} 2653 2654// 14. Vector Reduction Operations 2655 2656// 14.1. Vector Single-Width Integer Reduction Instructions 2657defm : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", is_float=0>; 2658defm : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", is_float=0>; 2659defm : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", is_float=0>; 2660defm : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", is_float=0>; 2661defm : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", is_float=0>; 2662defm : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", is_float=0>; 2663defm : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", is_float=0>; 2664defm : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", is_float=0>; 2665 2666// 14.2. Vector Widening Integer Reduction Instructions 2667defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, anyext_oneuse, "PseudoVWREDSUMU", is_float=0>; 2668defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, zext_oneuse, "PseudoVWREDSUMU", is_float=0>; 2669defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_zext_vl_oneuse, "PseudoVWREDSUMU", is_float=0>; 2670defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, sext_oneuse, "PseudoVWREDSUM", is_float=0>; 2671defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_sext_vl_oneuse, "PseudoVWREDSUM", is_float=0>; 2672 2673// 14.3. Vector Single-Width Floating-Point Reduction Instructions 2674defm : VPatReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", is_float=1>; 2675defm : VPatReductionVL_RM<rvv_vecreduce_FADD_vl, "PseudoVFREDUSUM", is_float=1>; 2676defm : VPatReductionVL<rvv_vecreduce_FMIN_vl, "PseudoVFREDMIN", is_float=1>; 2677defm : VPatReductionVL<rvv_vecreduce_FMAX_vl, "PseudoVFREDMAX", is_float=1>; 2678 2679// 14.4. Vector Widening Floating-Point Reduction Instructions 2680defm : VPatWidenReductionVL_RM<rvv_vecreduce_SEQ_FADD_vl, fpext_oneuse, 2681 "PseudoVFWREDOSUM", is_float=1>; 2682defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_SEQ_FADD_vl, 2683 riscv_fpextend_vl_oneuse, 2684 "PseudoVFWREDOSUM", is_float=1>; 2685defm : VPatWidenReductionVL_RM<rvv_vecreduce_FADD_vl, fpext_oneuse, 2686 "PseudoVFWREDUSUM", is_float=1>; 2687defm : VPatWidenReductionVL_Ext_VL_RM<rvv_vecreduce_FADD_vl, 2688 riscv_fpextend_vl_oneuse, 2689 "PseudoVFWREDUSUM", is_float=1>; 2690 2691// 15. Vector Mask Instructions 2692 2693foreach mti = AllMasks in { 2694 let Predicates = [HasVInstructions] in { 2695 // 15.1 Vector Mask-Register Logical Instructions 2696 def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)), 2697 (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 2698 def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)), 2699 (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; 2700 2701 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2702 (!cast<Instruction>("PseudoVMAND_MM_" # mti.BX) 2703 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2704 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2705 (!cast<Instruction>("PseudoVMOR_MM_" # mti.BX) 2706 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2707 def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)), 2708 (!cast<Instruction>("PseudoVMXOR_MM_" # mti.BX) 2709 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2710 2711 def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, 2712 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 2713 VLOpFrag)), 2714 (!cast<Instruction>("PseudoVMANDN_MM_" # mti.BX) 2715 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2716 def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, 2717 (riscv_vmnot_vl VR:$rs2, VLOpFrag), 2718 VLOpFrag)), 2719 (!cast<Instruction>("PseudoVMORN_MM_" # mti.BX) 2720 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2721 // XOR is associative so we need 2 patterns for VMXNOR. 2722 def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, 2723 VLOpFrag), 2724 VR:$rs2, VLOpFrag)), 2725 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.BX) 2726 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2727 2728 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2, 2729 VLOpFrag), 2730 VLOpFrag)), 2731 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.BX) 2732 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2733 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2, 2734 VLOpFrag), 2735 VLOpFrag)), 2736 (!cast<Instruction>("PseudoVMNOR_MM_" # mti.BX) 2737 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2738 def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2, 2739 VLOpFrag), 2740 VLOpFrag)), 2741 (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.BX) 2742 VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2743 2744 // Match the not idiom to the vmnot.m pseudo. 2745 def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)), 2746 (!cast<Instruction>("PseudoVMNAND_MM_" # mti.BX) 2747 VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>; 2748 2749 // 15.2 Vector count population in mask vcpop.m 2750 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 2751 VLOpFrag)), 2752 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX) 2753 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2754 def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0), 2755 VLOpFrag)), 2756 (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK") 2757 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2758 2759 // 15.3 vfirst find-first-set mask bit 2760 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), 2761 VLOpFrag)), 2762 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX) 2763 VR:$rs2, GPR:$vl, mti.Log2SEW)>; 2764 def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask V0), 2765 VLOpFrag)), 2766 (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX # "_MASK") 2767 VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; 2768 } 2769} 2770 2771// 16. Vector Permutation Instructions 2772 2773// 16.1. Integer Scalar Move Instructions 2774foreach vti = NoGroupIntegerVectors in { 2775 let Predicates = GetVTypePredicates<vti>.Predicates in { 2776 def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$passthru), 2777 vti.ScalarRegClass:$rs1, 2778 VLOpFrag)), 2779 (PseudoVMV_S_X $passthru, vti.ScalarRegClass:$rs1, GPR:$vl, 2780 vti.Log2SEW)>; 2781 } 2782} 2783 2784// 16.4. Vector Register Gather Instruction 2785foreach vti = AllIntegerVectors in { 2786 let Predicates = GetVTypePredicates<vti>.Predicates in { 2787 def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2788 vti.RegClass:$rs1, 2789 vti.RegClass:$passthru, 2790 (vti.Mask V0), 2791 VLOpFrag)), 2792 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") 2793 vti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1, 2794 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2795 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2796 vti.RegClass:$passthru, 2797 (vti.Mask V0), 2798 VLOpFrag)), 2799 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2800 vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1, 2801 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2802 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2803 uimm5:$imm, 2804 vti.RegClass:$passthru, 2805 (vti.Mask V0), 2806 VLOpFrag)), 2807 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2808 vti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$imm, 2809 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2810 } 2811 2812 // emul = lmul * 16 / sew 2813 defvar vlmul = vti.LMul; 2814 defvar octuple_lmul = vlmul.octuple; 2815 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2816 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2817 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2818 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2819 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; 2820 let Predicates = GetVTypePredicates<vti>.Predicates in 2821 def : Pat<(vti.Vector 2822 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2823 (ivti.Vector ivti.RegClass:$rs1), 2824 vti.RegClass:$passthru, 2825 (vti.Mask V0), 2826 VLOpFrag)), 2827 (!cast<Instruction>(inst#"_MASK") 2828 vti.RegClass:$passthru, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2829 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2830 } 2831} 2832 2833// 16.2. Floating-Point Scalar Move Instructions 2834foreach vti = NoGroupFloatVectors in { 2835 let Predicates = GetVTypePredicates<vti>.Predicates in { 2836 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru), 2837 (vti.Scalar (fpimm0)), 2838 VLOpFrag)), 2839 (PseudoVMV_S_X $passthru, (XLenVT X0), GPR:$vl, vti.Log2SEW)>; 2840 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru), 2841 (vti.Scalar (SelectScalarFPAsInt (XLenVT GPR:$imm))), 2842 VLOpFrag)), 2843 (PseudoVMV_S_X $passthru, GPR:$imm, GPR:$vl, vti.Log2SEW)>; 2844 def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru), 2845 vti.ScalarRegClass:$rs1, 2846 VLOpFrag)), 2847 (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix) 2848 vti.RegClass:$passthru, 2849 (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; 2850 } 2851} 2852 2853foreach vti = !listconcat(AllFloatVectors, AllBFloatVectors) in { 2854 defvar ivti = GetIntVTypeInfo<vti>.Vti; 2855 let Predicates = GetVTypePredicates<ivti>.Predicates in { 2856 def : Pat<(vti.Vector 2857 (riscv_vrgather_vv_vl vti.RegClass:$rs2, 2858 (ivti.Vector vti.RegClass:$rs1), 2859 vti.RegClass:$passthru, 2860 (vti.Mask V0), 2861 VLOpFrag)), 2862 (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") 2863 vti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1, 2864 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2865 def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, 2866 vti.RegClass:$passthru, 2867 (vti.Mask V0), 2868 VLOpFrag)), 2869 (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") 2870 vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1, 2871 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2872 def : Pat<(vti.Vector 2873 (riscv_vrgather_vx_vl vti.RegClass:$rs2, 2874 uimm5:$imm, 2875 vti.RegClass:$passthru, 2876 (vti.Mask V0), 2877 VLOpFrag)), 2878 (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") 2879 vti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$imm, 2880 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2881 } 2882 2883 defvar vlmul = vti.LMul; 2884 defvar octuple_lmul = vlmul.octuple; 2885 defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); 2886 if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { 2887 defvar emul_str = octuple_to_str<octuple_emul>.ret; 2888 defvar ivti = !cast<VTypeInfo>("VI16" # emul_str); 2889 defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; 2890 let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates, 2891 GetVTypePredicates<ivti>.Predicates) in 2892 def : Pat<(vti.Vector 2893 (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, 2894 (ivti.Vector ivti.RegClass:$rs1), 2895 vti.RegClass:$passthru, 2896 (vti.Mask V0), 2897 VLOpFrag)), 2898 (!cast<Instruction>(inst#"_MASK") 2899 vti.RegClass:$passthru, vti.RegClass:$rs2, ivti.RegClass:$rs1, 2900 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; 2901 } 2902} 2903 2904//===----------------------------------------------------------------------===// 2905// Miscellaneous RISCVISD SDNodes 2906//===----------------------------------------------------------------------===// 2907 2908def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2, 2909 [SDTCisVec<0>, SDTCVecEltisVT<1, i1>, 2910 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>; 2911 2912def SDTRVVSlide : SDTypeProfile<1, 6, [ 2913 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, 2914 SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>, 2915 SDTCisVT<6, XLenVT> 2916]>; 2917def SDTRVVSlide1 : SDTypeProfile<1, 5, [ 2918 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>, 2919 SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 2920 SDTCisVT<5, XLenVT> 2921]>; 2922def SDTRVVFSlide1 : SDTypeProfile<1, 5, [ 2923 SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisFP<0>, 2924 SDTCisEltOfVec<3, 0>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, 2925 SDTCisVT<5, XLenVT> 2926]>; 2927 2928def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>; 2929def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>; 2930def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>; 2931def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>; 2932def riscv_fslide1up_vl : SDNode<"RISCVISD::VFSLIDE1UP_VL", SDTRVVFSlide1, []>; 2933def riscv_fslide1down_vl : SDNode<"RISCVISD::VFSLIDE1DOWN_VL", SDTRVVFSlide1, []>; 2934 2935foreach vti = AllIntegerVectors in { 2936 let Predicates = GetVTypePredicates<vti>.Predicates in { 2937 def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask V0), 2938 VLOpFrag)), 2939 (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX#"_MASK") 2940 (vti.Vector (IMPLICIT_DEF)), (vti.Mask V0), GPR:$vl, vti.Log2SEW, 2941 TAIL_AGNOSTIC)>; 2942 } 2943} 2944 2945defm : VPatSlideVL_VX_VI<riscv_slideup_vl, "PseudoVSLIDEUP">; 2946defm : VPatSlideVL_VX_VI<riscv_slidedown_vl, "PseudoVSLIDEDOWN">; 2947defm : VPatSlide1VL_VX<riscv_slide1up_vl, "PseudoVSLIDE1UP">; 2948defm : VPatSlide1VL_VF<riscv_fslide1up_vl, "PseudoVFSLIDE1UP">; 2949defm : VPatSlide1VL_VX<riscv_slide1down_vl, "PseudoVSLIDE1DOWN">; 2950defm : VPatSlide1VL_VF<riscv_fslide1down_vl, "PseudoVFSLIDE1DOWN">; 2951