xref: /llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td (revision e376f9cb77717146290504da58740c97d9dc7eae)
1//===-- RISCVInstrInfoVPseudos.td - RISC-V 'V' Pseudos -----*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file contains the required infrastructure to support code generation
10/// for the standard 'V' (Vector) extension, version 1.0.
11///
12/// This file is included from RISCVInstrInfoV.td
13///
14/// Overview of our vector instruction pseudos.  Many of the instructions
15/// have behavior which depends on the value of VTYPE.  Several core aspects of
16/// the compiler - e.g. register allocation - depend on fields in this
17/// configuration register.  The details of which fields matter differ by the
18/// specific instruction, but the common dimensions are:
19///
20/// LMUL/EMUL - Most instructions can write to differently sized register groups
21/// depending on LMUL.
22///
23/// Masked vs Unmasked - Many instructions which allow a mask disallow register
24/// overlap.  As a result, masked vs unmasked require different register
25/// allocation constraints.
26///
27/// Policy - For each of mask and tail policy, there are three options:
28/// * "Undisturbed" - As defined in the specification, required to preserve the
29/// exact bit pattern of inactive lanes.
30/// * "Agnostic" - As defined in the specification, required to either preserve
31/// the exact bit pattern of inactive lanes, or produce the bit pattern -1 for
32/// those lanes.  Note that each lane can make this choice independently.
33/// Instructions which produce masks (and only those instructions) also have the
34/// option of producing a result as-if VL had been VLMAX.
35/// * "Undefined" - The bit pattern of the inactive lanes is unspecified, and
36/// can be changed without impacting the semantics of the program.  Note that
37/// this concept does not exist in the specification, and requires source
38/// knowledge to be preserved.
39///
40/// SEW - Some instructions have semantics which depend on SEW.  This is
41/// relatively rare, and mostly impacts scheduling and cost estimation.
42///
43/// We have two techniques we use to represent the impact of these fields:
44/// * For fields which don't impact register classes, we largely use
45/// dummy operands on the pseudo instructions which convey information
46/// about the value of VTYPE.
47/// * For fields which do impact register classes (and a few bits of
48/// legacy - see policy discussion below), we define a family of pseudo
49/// instructions for each actual instruction.  Said differently, we encode
50/// each of the preceding fields which are relevant for a given instruction
51/// in the opcode space.
52///
53/// Currently, the policy is represented via the following intrinsic families:
54/// * _MASK - Can represent all three policy states for both tail and mask.  If
55///   passthrough is IMPLICIT_DEF (or NoReg), then represents "undefined".
56///   Otherwise, policy operand and tablegen flags drive the interpretation.
57///   (If policy operand is not present - there are a couple, though we're
58///   rapidly removing them - a non-undefined policy defaults to "tail
59///   agnostic", and "mask undisturbed".  Since this is the only variant with
60///   a mask, all other variants are "mask undefined".
61/// * Unsuffixed w/ both passthrough and policy operand. Can represent all
62///   three policy states.  If passthrough is IMPLICIT_DEF (or NoReg), then
63///   represents "undefined".  Otherwise, policy operand and tablegen flags
64///   drive the interpretation.
65/// * Unsuffixed w/o passthrough or policy operand -- Does not have a
66///   passthrough operand, and thus represents the "undefined" state.  Note
67///   that terminology in code frequently refers to these as "TA" which is
68///   confusing.  We're in the process of migrating away from this
69///   representation.
70///
71//===----------------------------------------------------------------------===//
72
73def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S",
74                           SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>,
75                                                SDTCisInt<1>]>>;
76def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB",
77                              SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>;
78
79// Operand that is allowed to be a register other than X0, a 5 bit unsigned
80// immediate, or -1. -1 means VLMAX. This allows us to pick between VSETIVLI and
81// VSETVLI opcodes using the same pseudo instructions.
82def AVL : RegisterOperand<GPRNoX0> {
83  let OperandNamespace = "RISCVOp";
84  let OperandType = "OPERAND_AVL";
85}
86
87def vec_policy : RISCVOp {
88  let OperandType = "OPERAND_VEC_POLICY";
89}
90
91def sew : RISCVOp {
92  let OperandType = "OPERAND_SEW";
93}
94
95// SEW for mask only instructions like vmand and vmsbf. Should always be 0.
96def sew_mask : RISCVOp {
97  let OperandType = "OPERAND_SEW_MASK";
98}
99
100def vec_rm : RISCVOp {
101  let OperandType = "OPERAND_VEC_RM";
102}
103
104// X0 has special meaning for vsetvl/vsetvli.
105//  rd | rs1 |   AVL value | Effect on vl
106//--------------------------------------------------------------
107// !X0 |  X0 |       VLMAX | Set vl to VLMAX
108//  X0 |  X0 | Value in vl | Keep current vl, just change vtype.
109def VLOp : ComplexPattern<XLenVT, 1, "selectVLOp">;
110// FIXME: This is labelled as handling 's32', however the ComplexPattern it
111// refers to handles both i32 and i64 based on the HwMode. Currently this LLT
112// parameter appears to be ignored so this pattern works for both, however we
113// should add a LowLevelTypeByHwMode, and use that to define our XLenLLT instead
114// here.
115def GIVLOp : GIComplexOperandMatcher<s32, "renderVLOp">,
116             GIComplexPatternEquiv<VLOp>;
117
118def DecImm : SDNodeXForm<imm, [{
119  return CurDAG->getSignedTargetConstant(N->getSExtValue() - 1, SDLoc(N),
120                                         N->getValueType(0));
121}]>;
122
123defvar TAIL_AGNOSTIC = 1;
124defvar TU_MU = 0;
125defvar TA_MA = 3;
126
127//===----------------------------------------------------------------------===//
128// Utilities.
129//===----------------------------------------------------------------------===//
130
131class PseudoToVInst<string PseudoInst> {
132  defvar AffixSubsts = [["Pseudo", ""],
133                        ["_E64", ""],
134                        ["_E32", ""],
135                        ["_E16", ""],
136                        ["_E8", ""],
137                        ["FPR64", "F"],
138                        ["FPR32", "F"],
139                        ["FPR16", "F"],
140                        ["_TIED", ""],
141                        ["_MASK", ""],
142                        ["_B64", ""],
143                        ["_B32", ""],
144                        ["_B16", ""],
145                        ["_B8", ""],
146                        ["_B4", ""],
147                        ["_B2", ""],
148                        ["_B1", ""],
149                        ["_MF8", ""],
150                        ["_MF4", ""],
151                        ["_MF2", ""],
152                        ["_M1", ""],
153                        ["_M2", ""],
154                        ["_M4", ""],
155                        ["_M8", ""],
156                        ["_SE", ""],
157                        ["_RM", ""]
158                       ];
159  string VInst = !foldl(PseudoInst, AffixSubsts, Acc, AffixSubst,
160                        !subst(AffixSubst[0], AffixSubst[1], Acc));
161}
162
163// This class describes information associated to the LMUL.
164class LMULInfo<int lmul, int oct, VReg regclass, VReg wregclass,
165               VReg f2regclass, VReg f4regclass, VReg f8regclass, string mx> {
166  bits<3> value = lmul; // This is encoded as the vlmul field of vtype.
167  VReg vrclass = regclass;
168  VReg wvrclass = wregclass;
169  VReg f8vrclass = f8regclass;
170  VReg f4vrclass = f4regclass;
171  VReg f2vrclass = f2regclass;
172  string MX = mx;
173  int octuple = oct;
174}
175
176// Associate LMUL with tablegen records of register classes.
177def V_M1  : LMULInfo<0b000,  8,   VR,        VRM2,   VR,   VR, VR, "M1">;
178def V_M2  : LMULInfo<0b001, 16, VRM2,        VRM4,   VR,   VR, VR, "M2">;
179def V_M4  : LMULInfo<0b010, 32, VRM4,        VRM8, VRM2,   VR, VR, "M4">;
180def V_M8  : LMULInfo<0b011, 64, VRM8,/*NoVReg*/VR, VRM4, VRM2, VR, "M8">;
181
182def V_MF8 : LMULInfo<0b101, 1, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF8">;
183def V_MF4 : LMULInfo<0b110, 2, VR, VR,          VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF4">;
184def V_MF2 : LMULInfo<0b111, 4, VR, VR,          VR,          VR,/*NoVReg*/VR, "MF2">;
185
186// Used to iterate over all possible LMULs.
187defvar MxList = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
188// For floating point which don't need MF8.
189defvar MxListF = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
190
191// Used for widening and narrowing instructions as it doesn't contain M8.
192defvar MxListW = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4];
193// Used for widening reductions. It can contain M8 because wider operands are
194// scalar operands.
195defvar MxListWRed = MxList;
196// For floating point which don't need MF8.
197defvar MxListFW = [V_MF4, V_MF2, V_M1, V_M2, V_M4];
198// For widening floating-point Reduction as it doesn't contain MF8. It can
199// contain M8 because wider operands are scalar operands.
200defvar MxListFWRed = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
201
202// Use for zext/sext.vf2
203defvar MxListVF2 = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
204
205// Use for zext/sext.vf4 and vector crypto instructions
206defvar MxListVF4 = [V_MF2, V_M1, V_M2, V_M4, V_M8];
207
208// Use for zext/sext.vf8
209defvar MxListVF8 = [V_M1, V_M2, V_M4, V_M8];
210
211class MxSet<int eew> {
212  list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
213                           !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
214                           !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8],
215                           !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
216}
217
218class FPR_Info<int sew> {
219  RegisterClass fprclass = !cast<RegisterClass>("FPR" # sew);
220  string FX = "FPR" # sew;
221  int SEW = sew;
222  list<LMULInfo> MxList = MxSet<sew>.m;
223  list<LMULInfo> MxListFW = !if(!eq(sew, 64), [], !listremove(MxList, [V_M8]));
224}
225
226def SCALAR_F16 : FPR_Info<16>;
227def SCALAR_F32 : FPR_Info<32>;
228def SCALAR_F64 : FPR_Info<64>;
229
230// BF16 uses the same register class as F16.
231def SCALAR_BF16 : FPR_Info<16>;
232
233defvar FPList = [SCALAR_F16, SCALAR_F32, SCALAR_F64];
234
235// Used for widening instructions. It excludes F64.
236defvar FPListW = [SCALAR_F16, SCALAR_F32];
237
238// Used for widening bf16 instructions.
239defvar BFPListW = [SCALAR_BF16];
240
241class NFSet<LMULInfo m> {
242  defvar lmul = !shl(1, m.value);
243  list<int> L = NFList<lmul>.L;
244}
245
246class octuple_to_str<int octuple> {
247  string ret = !cond(!eq(octuple, 1): "MF8",
248                     !eq(octuple, 2): "MF4",
249                     !eq(octuple, 4): "MF2",
250                     !eq(octuple, 8): "M1",
251                     !eq(octuple, 16): "M2",
252                     !eq(octuple, 32): "M4",
253                     !eq(octuple, 64): "M8");
254}
255
256def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
257
258// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
259// We can't use X0 register because the AVL operands use GPRNoX0.
260// This must be kept in sync with RISCV::VLMaxSentinel.
261def VLMax : OutPatFrag<(ops), (XLenVT -1)>;
262
263def SelectScalarFPAsInt : ComplexPattern<fAny, 1, "selectScalarFPAsInt", [], [],
264                                         1>;
265
266// List of EEW.
267defvar EEWList = [8, 16, 32, 64];
268
269class SegRegClass<LMULInfo m, int nf> {
270  VReg RC = !cast<VReg>("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX,
271                                           !eq(m.value, V_MF4.value): V_M1.MX,
272                                           !eq(m.value, V_MF2.value): V_M1.MX,
273                                           true: m.MX));
274}
275
276//===----------------------------------------------------------------------===//
277// Vector register and vector group type information.
278//===----------------------------------------------------------------------===//
279
280class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, LMULInfo M,
281                ValueType Scal = XLenVT, RegisterClass ScalarReg = GPR> {
282  ValueType Vector = Vec;
283  ValueType Mask = Mas;
284  int SEW = Sew;
285  int Log2SEW = !logtwo(Sew);
286  VReg RegClass = M.vrclass;
287  LMULInfo LMul = M;
288  ValueType Scalar = Scal;
289  RegisterClass ScalarRegClass = ScalarReg;
290  // The pattern fragment which produces the AVL operand, representing the
291  // "natural" vector length for this type. For scalable vectors this is VLMax.
292  OutPatFrag AVL = VLMax;
293
294  string ScalarSuffix = !cond(!eq(Scal, XLenVT) : "X",
295                              !eq(Scal, f16) : "FPR16",
296                              !eq(Scal, bf16) : "FPR16",
297                              !eq(Scal, f32) : "FPR32",
298                              !eq(Scal, f64) : "FPR64");
299}
300
301class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew,
302                     LMULInfo M, ValueType Scal = XLenVT,
303                     RegisterClass ScalarReg = GPR>
304    : VTypeInfo<Vec, Mas, Sew, M, Scal, ScalarReg> {
305  ValueType VectorM1 = VecM1;
306}
307
308defset list<VTypeInfo> AllVectors = {
309  defset list<VTypeInfo> AllIntegerVectors = {
310    defset list<VTypeInfo> NoGroupIntegerVectors = {
311      defset list<VTypeInfo> FractionalGroupIntegerVectors = {
312        def VI8MF8:  VTypeInfo<vint8mf8_t,  vbool64_t, 8,  V_MF8>;
313        def VI8MF4:  VTypeInfo<vint8mf4_t,  vbool32_t, 8,  V_MF4>;
314        def VI8MF2:  VTypeInfo<vint8mf2_t,  vbool16_t, 8,  V_MF2>;
315        def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, V_MF4>;
316        def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, V_MF2>;
317        def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, V_MF2>;
318      }
319      def VI8M1:  VTypeInfo<vint8m1_t,  vbool8_t,   8, V_M1>;
320      def VI16M1: VTypeInfo<vint16m1_t, vbool16_t, 16, V_M1>;
321      def VI32M1: VTypeInfo<vint32m1_t, vbool32_t, 32, V_M1>;
322      def VI64M1: VTypeInfo<vint64m1_t, vbool64_t, 64, V_M1>;
323    }
324    defset list<GroupVTypeInfo> GroupIntegerVectors = {
325      def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, V_M2>;
326      def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, V_M4>;
327      def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, V_M8>;
328
329      def VI16M2: GroupVTypeInfo<vint16m2_t, vint16m1_t, vbool8_t, 16, V_M2>;
330      def VI16M4: GroupVTypeInfo<vint16m4_t, vint16m1_t, vbool4_t, 16, V_M4>;
331      def VI16M8: GroupVTypeInfo<vint16m8_t, vint16m1_t, vbool2_t, 16, V_M8>;
332
333      def VI32M2: GroupVTypeInfo<vint32m2_t, vint32m1_t, vbool16_t, 32, V_M2>;
334      def VI32M4: GroupVTypeInfo<vint32m4_t, vint32m1_t, vbool8_t,  32, V_M4>;
335      def VI32M8: GroupVTypeInfo<vint32m8_t, vint32m1_t, vbool4_t,  32, V_M8>;
336
337      def VI64M2: GroupVTypeInfo<vint64m2_t, vint64m1_t, vbool32_t, 64, V_M2>;
338      def VI64M4: GroupVTypeInfo<vint64m4_t, vint64m1_t, vbool16_t, 64, V_M4>;
339      def VI64M8: GroupVTypeInfo<vint64m8_t, vint64m1_t, vbool8_t,  64, V_M8>;
340    }
341  }
342
343  defset list<VTypeInfo> AllFloatVectors = {
344    defset list<VTypeInfo> NoGroupFloatVectors = {
345      defset list<VTypeInfo> FractionalGroupFloatVectors = {
346        def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, V_MF4, f16, FPR16>;
347        def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, V_MF2, f16, FPR16>;
348        def VF32MF2: VTypeInfo<vfloat32mf2_t, vbool64_t, 32, V_MF2, f32, FPR32>;
349      }
350      def VF16M1: VTypeInfo<vfloat16m1_t, vbool16_t, 16, V_M1, f16, FPR16>;
351      def VF32M1: VTypeInfo<vfloat32m1_t, vbool32_t, 32, V_M1, f32, FPR32>;
352      def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, V_M1, f64, FPR64>;
353    }
354
355    defset list<GroupVTypeInfo> GroupFloatVectors = {
356      def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16,
357                                 V_M2, f16, FPR16>;
358      def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16,
359                                 V_M4, f16, FPR16>;
360      def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16,
361                                 V_M8, f16, FPR16>;
362
363      def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32,
364                                 V_M2, f32, FPR32>;
365      def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t,  32,
366                                 V_M4, f32, FPR32>;
367      def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t,  32,
368                                 V_M8, f32, FPR32>;
369
370      def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64,
371                                 V_M2, f64, FPR64>;
372      def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64,
373                                 V_M4, f64, FPR64>;
374      def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t,  64,
375                                 V_M8, f64, FPR64>;
376    }
377  }
378
379  defset list<VTypeInfo> AllBFloatVectors = {
380    defset list<VTypeInfo> NoGroupBFloatVectors = {
381      defset list<VTypeInfo> FractionalGroupBFloatVectors = {
382        def VBF16MF4: VTypeInfo<vbfloat16mf4_t, vbool64_t, 16, V_MF4, bf16, FPR16>;
383        def VBF16MF2: VTypeInfo<vbfloat16mf2_t, vbool32_t, 16, V_MF2, bf16, FPR16>;
384      }
385      def VBF16M1:  VTypeInfo<vbfloat16m1_t, vbool16_t, 16, V_M1, bf16, FPR16>;
386    }
387
388    defset list<GroupVTypeInfo> GroupBFloatVectors = {
389      def VBF16M2: GroupVTypeInfo<vbfloat16m2_t, vbfloat16m1_t, vbool8_t, 16,
390                                  V_M2, bf16, FPR16>;
391      def VBF16M4: GroupVTypeInfo<vbfloat16m4_t, vbfloat16m1_t, vbool4_t, 16,
392                                  V_M4, bf16, FPR16>;
393      def VBF16M8: GroupVTypeInfo<vbfloat16m8_t, vbfloat16m1_t, vbool2_t, 16,
394                                  V_M8, bf16, FPR16>;
395    }
396  }
397}
398
399defvar AllFloatVectorsExceptFP16 = !filter(vti, AllFloatVectors, !ne(vti.Scalar, f16));
400defvar AllFP16Vectors = !filter(vti, AllFloatVectors, !eq(vti.Scalar, f16));
401
402// This functor is used to obtain the int vector type that has the same SEW and
403// multiplier as the input parameter type
404class GetIntVTypeInfo<VTypeInfo vti> {
405  // Equivalent integer vector type. Eg.
406  //   VI8M1 → VI8M1 (identity)
407  //   VF64M4 → VI64M4
408  VTypeInfo Vti = !cast<VTypeInfo>(!subst("VBF", "VI",
409                                          !subst("VF", "VI",
410                                                 !cast<string>(vti))));
411}
412
413class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
414  ValueType Mask = Mas;
415  // {SEW, VLMul} values set a valid VType to deal with this mask type.
416  // we assume SEW=1 and set corresponding LMUL. vsetvli insertion will
417  // look for SEW=1 to optimize based on surrounding instructions.
418  int SEW = 1;
419  int Log2SEW = 0;
420  LMULInfo LMul = M;
421  string BX = Bx; // Appendix of mask operations.
422  // The pattern fragment which produces the AVL operand, representing the
423  // "natural" vector length for this mask type. For scalable masks this is
424  // VLMax.
425  OutPatFrag AVL = VLMax;
426}
427
428defset list<MTypeInfo> AllMasks = {
429  // vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
430  def : MTypeInfo<vbool64_t, V_MF8, "B64">;
431  def : MTypeInfo<vbool32_t, V_MF4, "B32">;
432  def : MTypeInfo<vbool16_t, V_MF2, "B16">;
433  def : MTypeInfo<vbool8_t, V_M1, "B8">;
434  def : MTypeInfo<vbool4_t, V_M2, "B4">;
435  def : MTypeInfo<vbool2_t, V_M4, "B2">;
436  def : MTypeInfo<vbool1_t, V_M8, "B1">;
437}
438
439class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti> {
440  VTypeInfo Vti = vti;
441  VTypeInfo Wti = wti;
442}
443
444class VTypeInfoToFraction<VTypeInfo vti, VTypeInfo fti> {
445  VTypeInfo Vti = vti;
446  VTypeInfo Fti = fti;
447}
448
449defset list<VTypeInfoToWide> AllWidenableIntVectors = {
450  def : VTypeInfoToWide<VI8MF8,  VI16MF4>;
451  def : VTypeInfoToWide<VI8MF4,  VI16MF2>;
452  def : VTypeInfoToWide<VI8MF2,  VI16M1>;
453  def : VTypeInfoToWide<VI8M1,   VI16M2>;
454  def : VTypeInfoToWide<VI8M2,   VI16M4>;
455  def : VTypeInfoToWide<VI8M4,   VI16M8>;
456
457  def : VTypeInfoToWide<VI16MF4, VI32MF2>;
458  def : VTypeInfoToWide<VI16MF2, VI32M1>;
459  def : VTypeInfoToWide<VI16M1,  VI32M2>;
460  def : VTypeInfoToWide<VI16M2,  VI32M4>;
461  def : VTypeInfoToWide<VI16M4,  VI32M8>;
462
463  def : VTypeInfoToWide<VI32MF2, VI64M1>;
464  def : VTypeInfoToWide<VI32M1,  VI64M2>;
465  def : VTypeInfoToWide<VI32M2,  VI64M4>;
466  def : VTypeInfoToWide<VI32M4,  VI64M8>;
467}
468
469defset list<VTypeInfoToWide> AllWidenableFloatVectors = {
470  def : VTypeInfoToWide<VF16MF4, VF32MF2>;
471  def : VTypeInfoToWide<VF16MF2, VF32M1>;
472  def : VTypeInfoToWide<VF16M1, VF32M2>;
473  def : VTypeInfoToWide<VF16M2, VF32M4>;
474  def : VTypeInfoToWide<VF16M4, VF32M8>;
475
476  def : VTypeInfoToWide<VF32MF2, VF64M1>;
477  def : VTypeInfoToWide<VF32M1, VF64M2>;
478  def : VTypeInfoToWide<VF32M2, VF64M4>;
479  def : VTypeInfoToWide<VF32M4, VF64M8>;
480}
481
482defset list<VTypeInfoToFraction> AllFractionableVF2IntVectors = {
483  def : VTypeInfoToFraction<VI16MF4, VI8MF8>;
484  def : VTypeInfoToFraction<VI16MF2, VI8MF4>;
485  def : VTypeInfoToFraction<VI16M1, VI8MF2>;
486  def : VTypeInfoToFraction<VI16M2, VI8M1>;
487  def : VTypeInfoToFraction<VI16M4, VI8M2>;
488  def : VTypeInfoToFraction<VI16M8, VI8M4>;
489  def : VTypeInfoToFraction<VI32MF2, VI16MF4>;
490  def : VTypeInfoToFraction<VI32M1, VI16MF2>;
491  def : VTypeInfoToFraction<VI32M2, VI16M1>;
492  def : VTypeInfoToFraction<VI32M4, VI16M2>;
493  def : VTypeInfoToFraction<VI32M8, VI16M4>;
494  def : VTypeInfoToFraction<VI64M1, VI32MF2>;
495  def : VTypeInfoToFraction<VI64M2, VI32M1>;
496  def : VTypeInfoToFraction<VI64M4, VI32M2>;
497  def : VTypeInfoToFraction<VI64M8, VI32M4>;
498}
499
500defset list<VTypeInfoToFraction> AllFractionableVF4IntVectors = {
501  def : VTypeInfoToFraction<VI32MF2, VI8MF8>;
502  def : VTypeInfoToFraction<VI32M1, VI8MF4>;
503  def : VTypeInfoToFraction<VI32M2, VI8MF2>;
504  def : VTypeInfoToFraction<VI32M4, VI8M1>;
505  def : VTypeInfoToFraction<VI32M8, VI8M2>;
506  def : VTypeInfoToFraction<VI64M1, VI16MF4>;
507  def : VTypeInfoToFraction<VI64M2, VI16MF2>;
508  def : VTypeInfoToFraction<VI64M4, VI16M1>;
509  def : VTypeInfoToFraction<VI64M8, VI16M2>;
510}
511
512defset list<VTypeInfoToFraction> AllFractionableVF8IntVectors = {
513  def : VTypeInfoToFraction<VI64M1, VI8MF8>;
514  def : VTypeInfoToFraction<VI64M2, VI8MF4>;
515  def : VTypeInfoToFraction<VI64M4, VI8MF2>;
516  def : VTypeInfoToFraction<VI64M8, VI8M1>;
517}
518
519defset list<VTypeInfoToWide> AllWidenableIntToFloatVectors = {
520  def : VTypeInfoToWide<VI8MF8, VF16MF4>;
521  def : VTypeInfoToWide<VI8MF4, VF16MF2>;
522  def : VTypeInfoToWide<VI8MF2, VF16M1>;
523  def : VTypeInfoToWide<VI8M1, VF16M2>;
524  def : VTypeInfoToWide<VI8M2, VF16M4>;
525  def : VTypeInfoToWide<VI8M4, VF16M8>;
526
527  def : VTypeInfoToWide<VI16MF4, VF32MF2>;
528  def : VTypeInfoToWide<VI16MF2, VF32M1>;
529  def : VTypeInfoToWide<VI16M1, VF32M2>;
530  def : VTypeInfoToWide<VI16M2, VF32M4>;
531  def : VTypeInfoToWide<VI16M4, VF32M8>;
532
533  def : VTypeInfoToWide<VI32MF2, VF64M1>;
534  def : VTypeInfoToWide<VI32M1, VF64M2>;
535  def : VTypeInfoToWide<VI32M2, VF64M4>;
536  def : VTypeInfoToWide<VI32M4, VF64M8>;
537}
538
539defset list<VTypeInfoToWide> AllWidenableBFloatToFloatVectors = {
540  def : VTypeInfoToWide<VBF16MF4, VF32MF2>;
541  def : VTypeInfoToWide<VBF16MF2, VF32M1>;
542  def : VTypeInfoToWide<VBF16M1, VF32M2>;
543  def : VTypeInfoToWide<VBF16M2, VF32M4>;
544  def : VTypeInfoToWide<VBF16M4, VF32M8>;
545}
546
547// This class holds the record of the RISCVVPseudoTable below.
548// This represents the information we need in codegen for each pseudo.
549// The definition should be consistent with `struct PseudoInfo` in
550// RISCVInstrInfo.h.
551class RISCVVPseudo {
552  Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
553  Instruction BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
554  // SEW = 0 is used to denote that the Pseudo is not SEW specific (or unknown).
555  bits<8> SEW = 0;
556  bit NeedBeInPseudoTable = 1;
557}
558
559// The actual table.
560def RISCVVPseudosTable : GenericTable {
561  let FilterClass = "RISCVVPseudo";
562  let FilterClassField = "NeedBeInPseudoTable";
563  let CppTypeName = "PseudoInfo";
564  let Fields = [ "Pseudo", "BaseInstr" ];
565  let PrimaryKey = [ "Pseudo" ];
566  let PrimaryKeyName = "getPseudoInfo";
567  let PrimaryKeyEarlyOut = true;
568}
569
570def RISCVVInversePseudosTable : GenericTable {
571  let FilterClass = "RISCVVPseudo";
572  let CppTypeName = "PseudoInfo";
573  let Fields = [ "Pseudo", "BaseInstr", "VLMul", "SEW"];
574  let PrimaryKey = [ "BaseInstr", "VLMul", "SEW"];
575  let PrimaryKeyName = "getBaseInfo";
576  let PrimaryKeyEarlyOut = true;
577}
578
579def RISCVVIntrinsicsTable : GenericTable {
580  let FilterClass = "RISCVVIntrinsic";
581  let CppTypeName = "RISCVVIntrinsicInfo";
582  let Fields = ["IntrinsicID", "ScalarOperand", "VLOperand"];
583  let PrimaryKey = ["IntrinsicID"];
584  let PrimaryKeyName = "getRISCVVIntrinsicInfo";
585}
586
587// Describes the relation of a masked pseudo to the unmasked variants.
588//    Note that all masked variants (in this table) have exactly one
589//    unmasked variant.  For all but compares, both the masked and
590//    unmasked variant have a passthru and policy operand.  For compares,
591//    neither has a policy op, and only the masked version has a passthru.
592class RISCVMaskedPseudo<bits<4> MaskIdx> {
593  Pseudo MaskedPseudo = !cast<Pseudo>(NAME);
594  Pseudo UnmaskedPseudo = !cast<Pseudo>(!subst("_MASK", "", NAME));
595  bits<4> MaskOpIdx = MaskIdx;
596}
597
598def RISCVMaskedPseudosTable : GenericTable {
599  let FilterClass = "RISCVMaskedPseudo";
600  let CppTypeName = "RISCVMaskedPseudoInfo";
601  let Fields = ["MaskedPseudo", "UnmaskedPseudo", "MaskOpIdx"];
602  let PrimaryKey = ["MaskedPseudo"];
603  let PrimaryKeyName = "getMaskedPseudoInfo";
604}
605
606class RISCVVLE<bit M, bit Str, bit F, bits<3> S, bits<3> L> {
607  bits<1> Masked = M;
608  bits<1> Strided = Str;
609  bits<1> FF = F;
610  bits<3> Log2SEW = S;
611  bits<3> LMUL = L;
612  Pseudo Pseudo = !cast<Pseudo>(NAME);
613}
614
615def lookupMaskedIntrinsicByUnmasked : SearchIndex {
616  let Table = RISCVMaskedPseudosTable;
617  let Key = ["UnmaskedPseudo"];
618}
619
620def RISCVVLETable : GenericTable {
621  let FilterClass = "RISCVVLE";
622  let CppTypeName = "VLEPseudo";
623  let Fields = ["Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
624  let PrimaryKey = ["Masked", "Strided", "FF", "Log2SEW", "LMUL"];
625  let PrimaryKeyName = "getVLEPseudo";
626}
627
628class RISCVVSE<bit M, bit Str, bits<3> S, bits<3> L> {
629  bits<1> Masked = M;
630  bits<1> Strided = Str;
631  bits<3> Log2SEW = S;
632  bits<3> LMUL = L;
633  Pseudo Pseudo = !cast<Pseudo>(NAME);
634}
635
636def RISCVVSETable : GenericTable {
637  let FilterClass = "RISCVVSE";
638  let CppTypeName = "VSEPseudo";
639  let Fields = ["Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
640  let PrimaryKey = ["Masked", "Strided", "Log2SEW", "LMUL"];
641  let PrimaryKeyName = "getVSEPseudo";
642}
643
644class RISCVVLX_VSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
645  bits<1> Masked = M;
646  bits<1> Ordered = O;
647  bits<3> Log2SEW = S;
648  bits<3> LMUL = L;
649  bits<3> IndexLMUL = IL;
650  Pseudo Pseudo = !cast<Pseudo>(NAME);
651}
652
653class RISCVVLX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
654  RISCVVLX_VSX<M, O, S, L, IL>;
655class RISCVVSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
656  RISCVVLX_VSX<M, O, S, L, IL>;
657
658class RISCVVLX_VSXTable : GenericTable {
659  let CppTypeName = "VLX_VSXPseudo";
660  let Fields = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
661  let PrimaryKey = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
662}
663
664def RISCVVLXTable : RISCVVLX_VSXTable {
665  let FilterClass = "RISCVVLX";
666  let PrimaryKeyName = "getVLXPseudo";
667}
668
669def RISCVVSXTable : RISCVVLX_VSXTable {
670  let FilterClass = "RISCVVSX";
671  let PrimaryKeyName = "getVSXPseudo";
672}
673
674class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<3> S, bits<3> L> {
675  bits<4> NF = N;
676  bits<1> Masked = M;
677  bits<1> Strided = Str;
678  bits<1> FF = F;
679  bits<3> Log2SEW = S;
680  bits<3> LMUL = L;
681  Pseudo Pseudo = !cast<Pseudo>(NAME);
682}
683
684def RISCVVLSEGTable : GenericTable {
685  let FilterClass = "RISCVVLSEG";
686  let CppTypeName = "VLSEGPseudo";
687  let Fields = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
688  let PrimaryKey = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL"];
689  let PrimaryKeyName = "getVLSEGPseudo";
690}
691
692class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
693  bits<4> NF = N;
694  bits<1> Masked = M;
695  bits<1> Ordered = O;
696  bits<3> Log2SEW = S;
697  bits<3> LMUL = L;
698  bits<3> IndexLMUL = IL;
699  Pseudo Pseudo = !cast<Pseudo>(NAME);
700}
701
702def RISCVVLXSEGTable : GenericTable {
703  let FilterClass = "RISCVVLXSEG";
704  let CppTypeName = "VLXSEGPseudo";
705  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
706  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
707  let PrimaryKeyName = "getVLXSEGPseudo";
708}
709
710class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<3> S, bits<3> L> {
711  bits<4> NF = N;
712  bits<1> Masked = M;
713  bits<1> Strided = Str;
714  bits<3> Log2SEW = S;
715  bits<3> LMUL = L;
716  Pseudo Pseudo = !cast<Pseudo>(NAME);
717}
718
719def RISCVVSSEGTable : GenericTable {
720  let FilterClass = "RISCVVSSEG";
721  let CppTypeName = "VSSEGPseudo";
722  let Fields = ["NF", "Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
723  let PrimaryKey = ["NF", "Masked", "Strided", "Log2SEW", "LMUL"];
724  let PrimaryKeyName = "getVSSEGPseudo";
725}
726
727class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
728  bits<4> NF = N;
729  bits<1> Masked = M;
730  bits<1> Ordered = O;
731  bits<3> Log2SEW = S;
732  bits<3> LMUL = L;
733  bits<3> IndexLMUL = IL;
734  Pseudo Pseudo = !cast<Pseudo>(NAME);
735}
736
737def RISCVVSXSEGTable : GenericTable {
738  let FilterClass = "RISCVVSXSEG";
739  let CppTypeName = "VSXSEGPseudo";
740  let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
741  let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
742  let PrimaryKeyName = "getVSXSEGPseudo";
743}
744
745//===----------------------------------------------------------------------===//
746// Helpers to define the different pseudo instructions.
747//===----------------------------------------------------------------------===//
748
749// The destination vector register group for a masked vector instruction cannot
750// overlap the source mask register (v0), unless the destination vector register
751// is being written with a mask value (e.g., comparisons) or the scalar result
752// of a reduction.
753class GetVRegNoV0<VReg VRegClass> {
754  VReg R = !cond(!eq(VRegClass, VR) : VRNoV0,
755                 !eq(VRegClass, VRM2) : VRM2NoV0,
756                 !eq(VRegClass, VRM4) : VRM4NoV0,
757                 !eq(VRegClass, VRM8) : VRM8NoV0,
758                 !eq(VRegClass, VRN2M1) : VRN2M1NoV0,
759                 !eq(VRegClass, VRN2M2) : VRN2M2NoV0,
760                 !eq(VRegClass, VRN2M4) : VRN2M4NoV0,
761                 !eq(VRegClass, VRN3M1) : VRN3M1NoV0,
762                 !eq(VRegClass, VRN3M2) : VRN3M2NoV0,
763                 !eq(VRegClass, VRN4M1) : VRN4M1NoV0,
764                 !eq(VRegClass, VRN4M2) : VRN4M2NoV0,
765                 !eq(VRegClass, VRN5M1) : VRN5M1NoV0,
766                 !eq(VRegClass, VRN6M1) : VRN6M1NoV0,
767                 !eq(VRegClass, VRN7M1) : VRN7M1NoV0,
768                 !eq(VRegClass, VRN8M1) : VRN8M1NoV0,
769                 true : VRegClass);
770}
771
772class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins, int sew = 0> :
773      Pseudo<outs, ins, []>, RISCVVPseudo {
774  let BaseInstr = instr;
775  let VLMul = m.value;
776  let SEW = sew;
777}
778
779class GetVTypePredicates<VTypeInfo vti> {
780  list<Predicate> Predicates = !cond(!eq(vti.Scalar, f16) : [HasVInstructionsF16],
781                                     !eq(vti.Scalar, bf16) : [HasVInstructionsBF16Minimal],
782                                     !eq(vti.Scalar, f32) : [HasVInstructionsAnyF],
783                                     !eq(vti.Scalar, f64) : [HasVInstructionsF64],
784                                     !eq(vti.SEW, 64) : [HasVInstructionsI64],
785                                     true : [HasVInstructions]);
786}
787
788class VPseudoUSLoadNoMask<VReg RetClass,
789                          int EEW,
790                          DAGOperand sewop = sew> :
791      Pseudo<(outs RetClass:$rd),
792             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, sewop:$sew,
793                  vec_policy:$policy), []>,
794      RISCVVPseudo,
795      RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
796  let mayLoad = 1;
797  let mayStore = 0;
798  let hasSideEffects = 0;
799  let HasVLOp = 1;
800  let HasSEWOp = 1;
801  let HasVecPolicyOp = 1;
802  let Constraints = "$rd = $dest";
803}
804
805class VPseudoUSLoadMask<VReg RetClass,
806                        int EEW> :
807      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
808             (ins GetVRegNoV0<RetClass>.R:$passthru,
809                  GPRMem:$rs1,
810                  VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
811      RISCVVPseudo,
812      RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
813  let mayLoad = 1;
814  let mayStore = 0;
815  let hasSideEffects = 0;
816  let Constraints = "$rd = $passthru";
817  let HasVLOp = 1;
818  let HasSEWOp = 1;
819  let HasVecPolicyOp = 1;
820  let UsesMaskPolicy = 1;
821}
822
823class VPseudoUSLoadFFNoMask<VReg RetClass,
824                            int EEW> :
825      Pseudo<(outs RetClass:$rd, GPR:$vl),
826             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl,
827                  sew:$sew, vec_policy:$policy), []>,
828      RISCVVPseudo,
829      RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
830  let mayLoad = 1;
831  let mayStore = 0;
832  let hasSideEffects = 0;
833  let HasVLOp = 1;
834  let HasSEWOp = 1;
835  let HasVecPolicyOp = 1;
836  let Constraints = "$rd = $dest";
837}
838
839class VPseudoUSLoadFFMask<VReg RetClass,
840                          int EEW> :
841      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
842             (ins GetVRegNoV0<RetClass>.R:$passthru,
843                  GPRMem:$rs1,
844                  VMaskOp:$vm, AVL:$avl, sew:$sew, vec_policy:$policy), []>,
845      RISCVVPseudo,
846      RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
847  let mayLoad = 1;
848  let mayStore = 0;
849  let hasSideEffects = 0;
850  let Constraints = "$rd = $passthru";
851  let HasVLOp = 1;
852  let HasSEWOp = 1;
853  let HasVecPolicyOp = 1;
854  let UsesMaskPolicy = 1;
855}
856
857class VPseudoSLoadNoMask<VReg RetClass,
858                         int EEW> :
859      Pseudo<(outs RetClass:$rd),
860             (ins RetClass:$dest, GPRMem:$rs1, GPR:$rs2, AVL:$vl,
861                  sew:$sew, vec_policy:$policy), []>,
862      RISCVVPseudo,
863      RISCVVLE</*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
864  let mayLoad = 1;
865  let mayStore = 0;
866  let hasSideEffects = 0;
867  let HasVLOp = 1;
868  let HasSEWOp = 1;
869  let HasVecPolicyOp = 1;
870  let Constraints = "$rd = $dest";
871}
872
873class VPseudoSLoadMask<VReg RetClass,
874                       int EEW> :
875      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
876             (ins GetVRegNoV0<RetClass>.R:$passthru,
877                  GPRMem:$rs1, GPR:$rs2,
878                  VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
879      RISCVVPseudo,
880      RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
881  let mayLoad = 1;
882  let mayStore = 0;
883  let hasSideEffects = 0;
884  let Constraints = "$rd = $passthru";
885  let HasVLOp = 1;
886  let HasSEWOp = 1;
887  let HasVecPolicyOp = 1;
888  let UsesMaskPolicy = 1;
889}
890
891class VPseudoILoadNoMask<VReg RetClass,
892                         VReg IdxClass,
893                         int EEW,
894                         bits<3> LMUL,
895                         bit Ordered,
896                         bit EarlyClobber,
897                         bits<2> TargetConstraintType = 1> :
898      Pseudo<(outs RetClass:$rd),
899             (ins RetClass:$dest, GPRMem:$rs1, IdxClass:$rs2, AVL:$vl,
900                  sew:$sew, vec_policy:$policy), []>,
901      RISCVVPseudo,
902      RISCVVLX</*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
903  let mayLoad = 1;
904  let mayStore = 0;
905  let hasSideEffects = 0;
906  let HasVLOp = 1;
907  let HasSEWOp = 1;
908  let HasVecPolicyOp = 1;
909  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $dest", "$rd = $dest");
910  let TargetOverlapConstraintType = TargetConstraintType;
911}
912
913class VPseudoILoadMask<VReg RetClass,
914                       VReg IdxClass,
915                       int EEW,
916                       bits<3> LMUL,
917                       bit Ordered,
918                       bit EarlyClobber,
919                       bits<2> TargetConstraintType = 1> :
920      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
921             (ins GetVRegNoV0<RetClass>.R:$passthru,
922                  GPRMem:$rs1, IdxClass:$rs2,
923                  VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
924      RISCVVPseudo,
925      RISCVVLX</*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
926  let mayLoad = 1;
927  let mayStore = 0;
928  let hasSideEffects = 0;
929  let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $passthru", "$rd = $passthru");
930  let TargetOverlapConstraintType = TargetConstraintType;
931  let HasVLOp = 1;
932  let HasSEWOp = 1;
933  let HasVecPolicyOp = 1;
934  let UsesMaskPolicy = 1;
935}
936
937class VPseudoUSStoreNoMask<VReg StClass,
938                           int EEW,
939                           DAGOperand sewop = sew> :
940      Pseudo<(outs),
941             (ins StClass:$rd, GPRMem:$rs1, AVL:$vl, sewop:$sew), []>,
942      RISCVVPseudo,
943      RISCVVSE</*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
944  let mayLoad = 0;
945  let mayStore = 1;
946  let hasSideEffects = 0;
947  let HasVLOp = 1;
948  let HasSEWOp = 1;
949}
950
951class VPseudoUSStoreMask<VReg StClass,
952                         int EEW> :
953      Pseudo<(outs),
954             (ins StClass:$rd, GPRMem:$rs1,
955                  VMaskOp:$vm, AVL:$vl, sew:$sew), []>,
956      RISCVVPseudo,
957      RISCVVSE</*Masked*/1, /*Strided*/0, !logtwo(EEW), VLMul> {
958  let mayLoad = 0;
959  let mayStore = 1;
960  let hasSideEffects = 0;
961  let HasVLOp = 1;
962  let HasSEWOp = 1;
963}
964
965class VPseudoSStoreNoMask<VReg StClass,
966                          int EEW> :
967      Pseudo<(outs),
968             (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2,
969                  AVL:$vl, sew:$sew), []>,
970      RISCVVPseudo,
971      RISCVVSE</*Masked*/0, /*Strided*/1, !logtwo(EEW), VLMul> {
972  let mayLoad = 0;
973  let mayStore = 1;
974  let hasSideEffects = 0;
975  let HasVLOp = 1;
976  let HasSEWOp = 1;
977}
978
979class VPseudoSStoreMask<VReg StClass,
980                        int EEW> :
981      Pseudo<(outs),
982             (ins StClass:$rd, GPRMem:$rs1, GPR:$rs2,
983                  VMaskOp:$vm, AVL:$vl, sew:$sew), []>,
984      RISCVVPseudo,
985      RISCVVSE</*Masked*/1, /*Strided*/1, !logtwo(EEW), VLMul> {
986  let mayLoad = 0;
987  let mayStore = 1;
988  let hasSideEffects = 0;
989  let HasVLOp = 1;
990  let HasSEWOp = 1;
991}
992
993class VPseudoNullaryNoMask<VReg RegClass> :
994      Pseudo<(outs RegClass:$rd),
995             (ins RegClass:$passthru,
996                  AVL:$vl, sew:$sew, vec_policy:$policy), []>,
997      RISCVVPseudo {
998  let mayLoad = 0;
999  let mayStore = 0;
1000  let hasSideEffects = 0;
1001  let Constraints = "$rd = $passthru";
1002  let HasVLOp = 1;
1003  let HasSEWOp = 1;
1004  let HasVecPolicyOp = 1;
1005}
1006
1007class VPseudoNullaryMask<VReg RegClass> :
1008      Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
1009             (ins GetVRegNoV0<RegClass>.R:$passthru,
1010                  VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1011      RISCVVPseudo {
1012  let mayLoad = 0;
1013  let mayStore = 0;
1014  let hasSideEffects = 0;
1015  let Constraints ="$rd = $passthru";
1016  let HasVLOp = 1;
1017  let HasSEWOp = 1;
1018  let UsesMaskPolicy = 1;
1019  let HasVecPolicyOp = 1;
1020}
1021
1022// Nullary for pseudo instructions. They are expanded in
1023// RISCVExpandPseudoInsts pass.
1024class VPseudoNullaryPseudoM<string BaseInst> :
1025      Pseudo<(outs VR:$rd), (ins AVL:$vl, sew_mask:$sew), []>,
1026      RISCVVPseudo {
1027  let mayLoad = 0;
1028  let mayStore = 0;
1029  let hasSideEffects = 0;
1030  let HasVLOp = 1;
1031  let HasSEWOp = 1;
1032  // BaseInstr is not used in RISCVExpandPseudoInsts pass.
1033  // Just fill a corresponding real v-inst to pass tablegen check.
1034  let BaseInstr = !cast<Instruction>(BaseInst);
1035  // We exclude them from RISCVVPseudoTable.
1036  let NeedBeInPseudoTable = 0;
1037}
1038
1039class VPseudoUnaryNoMask<DAGOperand RetClass,
1040                         DAGOperand OpClass,
1041                         string Constraint = "",
1042                         bits<2> TargetConstraintType = 1> :
1043      Pseudo<(outs RetClass:$rd),
1044             (ins RetClass:$passthru, OpClass:$rs2,
1045                  AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1046      RISCVVPseudo {
1047  let mayLoad = 0;
1048  let mayStore = 0;
1049  let hasSideEffects = 0;
1050  let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
1051  let TargetOverlapConstraintType = TargetConstraintType;
1052  let HasVLOp = 1;
1053  let HasSEWOp = 1;
1054  let HasVecPolicyOp = 1;
1055}
1056
1057class VPseudoUnaryNoMaskNoPolicy<DAGOperand RetClass,
1058                                 DAGOperand OpClass,
1059                                 string Constraint = "",
1060                                 bits<2> TargetConstraintType = 1> :
1061      Pseudo<(outs RetClass:$rd),
1062             (ins OpClass:$rs2, AVL:$vl, sew_mask:$sew), []>,
1063      RISCVVPseudo {
1064  let mayLoad = 0;
1065  let mayStore = 0;
1066  let hasSideEffects = 0;
1067  let Constraints = Constraint;
1068  let TargetOverlapConstraintType = TargetConstraintType;
1069  let HasVLOp = 1;
1070  let HasSEWOp = 1;
1071}
1072
1073class VPseudoUnaryNoMaskRoundingMode<DAGOperand RetClass,
1074                                     DAGOperand OpClass,
1075                                     string Constraint = "",
1076                                     bits<2> TargetConstraintType = 1> :
1077      Pseudo<(outs RetClass:$rd),
1078             (ins RetClass:$passthru, OpClass:$rs2, vec_rm:$rm,
1079                  AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1080      RISCVVPseudo {
1081  let mayLoad = 0;
1082  let mayStore = 0;
1083  let hasSideEffects = 0;
1084  let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
1085  let TargetOverlapConstraintType = TargetConstraintType;
1086  let HasVLOp = 1;
1087  let HasSEWOp = 1;
1088  let HasVecPolicyOp = 1;
1089  let HasRoundModeOp = 1;
1090  let UsesVXRM = 0;
1091  let hasPostISelHook = 1;
1092}
1093
1094class VPseudoUnaryMask<VReg RetClass,
1095                       VReg OpClass,
1096                       string Constraint = "",
1097                       bits<2> TargetConstraintType = 1,
1098                       DAGOperand sewop = sew> :
1099      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1100             (ins GetVRegNoV0<RetClass>.R:$passthru, OpClass:$rs2,
1101                  VMaskOp:$vm, AVL:$vl, sewop:$sew, vec_policy:$policy), []>,
1102      RISCVVPseudo {
1103  let mayLoad = 0;
1104  let mayStore = 0;
1105  let hasSideEffects = 0;
1106  let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
1107  let TargetOverlapConstraintType = TargetConstraintType;
1108  let HasVLOp = 1;
1109  let HasSEWOp = 1;
1110  let HasVecPolicyOp = 1;
1111  let UsesMaskPolicy = 1;
1112}
1113
1114class VPseudoUnaryMaskRoundingMode<VReg RetClass,
1115                                   VReg OpClass,
1116                                   string Constraint = "",
1117                                   bits<2> TargetConstraintType = 1> :
1118      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1119             (ins GetVRegNoV0<RetClass>.R:$passthru, OpClass:$rs2,
1120                  VMaskOp:$vm, vec_rm:$rm,
1121                  AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1122      RISCVVPseudo {
1123  let mayLoad = 0;
1124  let mayStore = 0;
1125  let hasSideEffects = 0;
1126  let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
1127  let TargetOverlapConstraintType = TargetConstraintType;
1128  let HasVLOp = 1;
1129  let HasSEWOp = 1;
1130  let HasVecPolicyOp = 1;
1131  let UsesMaskPolicy = 1;
1132  let HasRoundModeOp = 1;
1133  let UsesVXRM = 0;
1134  let hasPostISelHook = 1;
1135}
1136
1137class VPseudoUnaryMask_NoExcept<VReg RetClass,
1138                                VReg OpClass,
1139                                string Constraint = ""> :
1140      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1141             (ins GetVRegNoV0<RetClass>.R:$passthru, OpClass:$rs2,
1142                  VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []> {
1143  let mayLoad = 0;
1144  let mayStore = 0;
1145  let hasSideEffects = 0;
1146  let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
1147  let HasVLOp = 1;
1148  let HasSEWOp = 1;
1149  let HasVecPolicyOp = 1;
1150  let UsesMaskPolicy = 1;
1151  let usesCustomInserter = 1;
1152}
1153
1154class VPseudoUnaryNoMaskGPROut :
1155      Pseudo<(outs GPR:$rd),
1156             (ins VR:$rs2, AVL:$vl, sew_mask:$sew), []>,
1157      RISCVVPseudo {
1158  let mayLoad = 0;
1159  let mayStore = 0;
1160  let hasSideEffects = 0;
1161  let HasVLOp = 1;
1162  let HasSEWOp = 1;
1163}
1164
1165class VPseudoUnaryMaskGPROut :
1166      Pseudo<(outs GPR:$rd),
1167             (ins VR:$rs1, VMaskOp:$vm, AVL:$vl, sew_mask:$sew), []>,
1168      RISCVVPseudo {
1169  let mayLoad = 0;
1170  let mayStore = 0;
1171  let hasSideEffects = 0;
1172  let HasVLOp = 1;
1173  let HasSEWOp = 1;
1174}
1175
1176// Mask can be V0~V31
1177class VPseudoUnaryAnyMask<VReg RetClass,
1178                          VReg Op1Class> :
1179      Pseudo<(outs RetClass:$rd),
1180             (ins RetClass:$passthru, Op1Class:$rs2,
1181                  VR:$vm, AVL:$vl, sew:$sew), []>,
1182      RISCVVPseudo {
1183  let mayLoad = 0;
1184  let mayStore = 0;
1185  let hasSideEffects = 0;
1186  let Constraints = "@earlyclobber $rd, $rd = $passthru";
1187  let HasVLOp = 1;
1188  let HasSEWOp = 1;
1189}
1190
1191class VPseudoBinaryNoMask<VReg RetClass,
1192                          VReg Op1Class,
1193                          DAGOperand Op2Class,
1194                          string Constraint,
1195                          bits<2> TargetConstraintType = 1,
1196                          DAGOperand sewop = sew> :
1197      Pseudo<(outs RetClass:$rd),
1198             (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, sewop:$sew), []>,
1199      RISCVVPseudo {
1200  let mayLoad = 0;
1201  let mayStore = 0;
1202  let hasSideEffects = 0;
1203  let Constraints = Constraint;
1204  let TargetOverlapConstraintType = TargetConstraintType;
1205  let HasVLOp = 1;
1206  let HasSEWOp = 1;
1207}
1208
1209class VPseudoBinaryNoMaskPolicy<VReg RetClass,
1210                                VReg Op1Class,
1211                                DAGOperand Op2Class,
1212                                string Constraint,
1213                                bits<2> TargetConstraintType = 1> :
1214      Pseudo<(outs RetClass:$rd),
1215             (ins RetClass:$passthru, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
1216                  sew:$sew, vec_policy:$policy), []>,
1217      RISCVVPseudo {
1218  let mayLoad = 0;
1219  let mayStore = 0;
1220  let hasSideEffects = 0;
1221  let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
1222  let TargetOverlapConstraintType = TargetConstraintType;
1223  let HasVLOp = 1;
1224  let HasSEWOp = 1;
1225  let HasVecPolicyOp = 1;
1226}
1227
1228class VPseudoBinaryNoMaskRoundingMode<VReg RetClass,
1229                                      VReg Op1Class,
1230                                      DAGOperand Op2Class,
1231                                      string Constraint,
1232                                      bit UsesVXRM_ = 1,
1233                                      bits<2> TargetConstraintType = 1> :
1234      Pseudo<(outs RetClass:$rd),
1235             (ins RetClass:$passthru, Op1Class:$rs2, Op2Class:$rs1, vec_rm:$rm,
1236                  AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1237      RISCVVPseudo {
1238  let mayLoad = 0;
1239  let mayStore = 0;
1240  let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
1241  let TargetOverlapConstraintType = TargetConstraintType;
1242  let HasVLOp = 1;
1243  let HasSEWOp = 1;
1244  let HasVecPolicyOp = 1;
1245  let HasRoundModeOp = 1;
1246  let UsesVXRM = UsesVXRM_;
1247  let hasPostISelHook = !not(UsesVXRM_);
1248}
1249
1250class VPseudoBinaryMaskPolicyRoundingMode<VReg RetClass,
1251                                          RegisterClass Op1Class,
1252                                          DAGOperand Op2Class,
1253                                          string Constraint,
1254                                          bit UsesVXRM_,
1255                                          bits<2> TargetConstraintType = 1> :
1256      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1257             (ins GetVRegNoV0<RetClass>.R:$passthru,
1258                  Op1Class:$rs2, Op2Class:$rs1,
1259                  VMaskOp:$vm, vec_rm:$rm, AVL:$vl,
1260                  sew:$sew, vec_policy:$policy), []>,
1261      RISCVVPseudo {
1262  let mayLoad = 0;
1263  let mayStore = 0;
1264  let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
1265  let TargetOverlapConstraintType = TargetConstraintType;
1266  let HasVLOp = 1;
1267  let HasSEWOp = 1;
1268  let HasVecPolicyOp = 1;
1269  let UsesMaskPolicy = 1;
1270  let HasRoundModeOp = 1;
1271  let UsesVXRM = UsesVXRM_;
1272  let hasPostISelHook = !not(UsesVXRM_);
1273}
1274
1275// Special version of VPseudoBinaryNoMask where we pretend the first source is
1276// tied to the destination.
1277// This allows maskedoff and rs2 to be the same register.
1278class VPseudoTiedBinaryNoMask<VReg RetClass,
1279                              DAGOperand Op2Class,
1280                              string Constraint,
1281                              bits<2> TargetConstraintType = 1> :
1282      Pseudo<(outs RetClass:$rd),
1283             (ins RetClass:$rs2, Op2Class:$rs1, AVL:$vl, sew:$sew,
1284                  vec_policy:$policy), []>,
1285      RISCVVPseudo {
1286  let mayLoad = 0;
1287  let mayStore = 0;
1288  let hasSideEffects = 0;
1289  let Constraints = !interleave([Constraint, "$rd = $rs2"], ",");
1290  let TargetOverlapConstraintType = TargetConstraintType;
1291  let HasVLOp = 1;
1292  let HasSEWOp = 1;
1293  let HasVecPolicyOp = 1;
1294  let isConvertibleToThreeAddress = 1;
1295  let IsTiedPseudo = 1;
1296}
1297
1298class VPseudoTiedBinaryNoMaskRoundingMode<VReg RetClass,
1299                                          DAGOperand Op2Class,
1300                                          string Constraint,
1301                                          bits<2> TargetConstraintType = 1> :
1302      Pseudo<(outs RetClass:$rd),
1303             (ins RetClass:$rs2, Op2Class:$rs1,
1304                  vec_rm:$rm,
1305                  AVL:$vl, sew:$sew,
1306                  vec_policy:$policy), []>,
1307      RISCVVPseudo {
1308  let mayLoad = 0;
1309  let mayStore = 0;
1310  let hasSideEffects = 0;
1311  let Constraints = !interleave([Constraint, "$rd = $rs2"], ",");
1312  let TargetOverlapConstraintType = TargetConstraintType;
1313  let HasVLOp = 1;
1314  let HasSEWOp = 1;
1315  let HasVecPolicyOp = 1;
1316  let isConvertibleToThreeAddress = 1;
1317  let IsTiedPseudo = 1;
1318  let HasRoundModeOp = 1;
1319  let UsesVXRM = 0;
1320  let hasPostISelHook = 1;
1321}
1322
1323class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1324                          bit Ordered>:
1325      Pseudo<(outs),
1326             (ins StClass:$rd, GPRMem:$rs1, IdxClass:$rs2, AVL:$vl,
1327                  sew:$sew),[]>,
1328      RISCVVPseudo,
1329      RISCVVSX</*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1330  let mayLoad = 0;
1331  let mayStore = 1;
1332  let hasSideEffects = 0;
1333  let HasVLOp = 1;
1334  let HasSEWOp = 1;
1335}
1336
1337class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1338                        bit Ordered>:
1339      Pseudo<(outs),
1340             (ins StClass:$rd, GPRMem:$rs1, IdxClass:$rs2,
1341                  VMaskOp:$vm, AVL:$vl, sew:$sew),[]>,
1342      RISCVVPseudo,
1343      RISCVVSX</*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1344  let mayLoad = 0;
1345  let mayStore = 1;
1346  let hasSideEffects = 0;
1347  let HasVLOp = 1;
1348  let HasSEWOp = 1;
1349}
1350
1351class VPseudoBinaryMaskPolicy<VReg RetClass,
1352                              RegisterClass Op1Class,
1353                              DAGOperand Op2Class,
1354                              string Constraint,
1355                              bits<2> TargetConstraintType = 1> :
1356      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1357             (ins GetVRegNoV0<RetClass>.R:$passthru,
1358                  Op1Class:$rs2, Op2Class:$rs1,
1359                  VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1360      RISCVVPseudo {
1361  let mayLoad = 0;
1362  let mayStore = 0;
1363  let hasSideEffects = 0;
1364  let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
1365  let TargetOverlapConstraintType = TargetConstraintType;
1366  let HasVLOp = 1;
1367  let HasSEWOp = 1;
1368  let HasVecPolicyOp = 1;
1369  let UsesMaskPolicy = 1;
1370}
1371
1372class VPseudoTernaryMaskPolicy<VReg RetClass,
1373                               RegisterClass Op1Class,
1374                               DAGOperand Op2Class> :
1375      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1376             (ins GetVRegNoV0<RetClass>.R:$passthru,
1377                  Op1Class:$rs2, Op2Class:$rs1,
1378                  VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1379      RISCVVPseudo {
1380  let mayLoad = 0;
1381  let mayStore = 0;
1382  let hasSideEffects = 0;
1383  let Constraints = "$rd = $passthru";
1384  let HasVLOp = 1;
1385  let HasSEWOp = 1;
1386  let HasVecPolicyOp = 1;
1387}
1388
1389class VPseudoTernaryMaskPolicyRoundingMode<VReg RetClass,
1390                                           RegisterClass Op1Class,
1391                                           DAGOperand Op2Class> :
1392      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1393             (ins GetVRegNoV0<RetClass>.R:$passthru,
1394                  Op1Class:$rs2, Op2Class:$rs1,
1395                  VMaskOp:$vm,
1396                  vec_rm:$rm,
1397                  AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1398      RISCVVPseudo {
1399  let mayLoad = 0;
1400  let mayStore = 0;
1401  let hasSideEffects = 0;
1402  let Constraints = "$rd = $passthru";
1403  let HasVLOp = 1;
1404  let HasSEWOp = 1;
1405  let HasVecPolicyOp = 1;
1406  let HasRoundModeOp = 1;
1407  let UsesVXRM = 0;
1408  let hasPostISelHook = 1;
1409}
1410
1411// Like VPseudoBinaryMaskPolicy, but output can be V0 and there is no policy.
1412class VPseudoBinaryMOutMask<VReg RetClass,
1413                            RegisterClass Op1Class,
1414                            DAGOperand Op2Class,
1415                            string Constraint,
1416                            bits<2> TargetConstraintType = 1> :
1417      Pseudo<(outs RetClass:$rd),
1418             (ins RetClass:$passthru,
1419                  Op1Class:$rs2, Op2Class:$rs1,
1420                  VMaskOp:$vm, AVL:$vl, sew:$sew), []>,
1421      RISCVVPseudo {
1422  let mayLoad = 0;
1423  let mayStore = 0;
1424  let hasSideEffects = 0;
1425  let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
1426  let TargetOverlapConstraintType = TargetConstraintType;
1427  let HasVLOp = 1;
1428  let HasSEWOp = 1;
1429  let UsesMaskPolicy = 1;
1430}
1431
1432// Special version of VPseudoBinaryMaskPolicy where we pretend the first source
1433// is tied to the destination so we can workaround the earlyclobber constraint.
1434// This allows maskedoff and rs2 to be the same register.
1435class VPseudoTiedBinaryMask<VReg RetClass,
1436                            DAGOperand Op2Class,
1437                            string Constraint,
1438                            bits<2> TargetConstraintType = 1> :
1439      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1440             (ins GetVRegNoV0<RetClass>.R:$passthru,
1441                  Op2Class:$rs1,
1442                  VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1443      RISCVVPseudo {
1444  let mayLoad = 0;
1445  let mayStore = 0;
1446  let hasSideEffects = 0;
1447  let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
1448  let TargetOverlapConstraintType = TargetConstraintType;
1449  let HasVLOp = 1;
1450  let HasSEWOp = 1;
1451  let HasVecPolicyOp = 1;
1452  let UsesMaskPolicy = 1;
1453  let IsTiedPseudo = 1;
1454}
1455
1456class VPseudoTiedBinaryMaskRoundingMode<VReg RetClass,
1457                                        DAGOperand Op2Class,
1458                                        string Constraint,
1459                                        bits<2> TargetConstraintType = 1> :
1460      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1461             (ins GetVRegNoV0<RetClass>.R:$passthru,
1462                  Op2Class:$rs1,
1463                  VMaskOp:$vm,
1464                  vec_rm:$rm,
1465                  AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1466      RISCVVPseudo {
1467  let mayLoad = 0;
1468  let mayStore = 0;
1469  let hasSideEffects = 0;
1470  let Constraints = !interleave([Constraint, "$rd = $passthru"], ",");
1471  let TargetOverlapConstraintType = TargetConstraintType;
1472  let HasVLOp = 1;
1473  let HasSEWOp = 1;
1474  let HasVecPolicyOp = 1;
1475  let UsesMaskPolicy = 1;
1476  let IsTiedPseudo = 1;
1477  let HasRoundModeOp = 1;
1478  let UsesVXRM = 0;
1479  let hasPostISelHook = 1;
1480}
1481
1482class VPseudoBinaryCarry<VReg RetClass,
1483                         VReg Op1Class,
1484                         DAGOperand Op2Class,
1485                         LMULInfo MInfo,
1486                         bit CarryIn,
1487                         string Constraint,
1488                         bits<2> TargetConstraintType = 1> :
1489      Pseudo<(outs RetClass:$rd),
1490             !if(CarryIn,
1491                (ins Op1Class:$rs2, Op2Class:$rs1,
1492                     VMV0:$carry, AVL:$vl, sew:$sew),
1493                (ins Op1Class:$rs2, Op2Class:$rs1,
1494                     AVL:$vl, sew:$sew)), []>,
1495      RISCVVPseudo {
1496  let mayLoad = 0;
1497  let mayStore = 0;
1498  let hasSideEffects = 0;
1499  let Constraints = Constraint;
1500  let TargetOverlapConstraintType = TargetConstraintType;
1501  let HasVLOp = 1;
1502  let HasSEWOp = 1;
1503  let VLMul = MInfo.value;
1504}
1505
1506class VPseudoTiedBinaryCarryIn<VReg RetClass,
1507                               VReg Op1Class,
1508                               DAGOperand Op2Class,
1509                               LMULInfo MInfo,
1510                               bits<2> TargetConstraintType = 1> :
1511      Pseudo<(outs RetClass:$rd),
1512             (ins RetClass:$passthru, Op1Class:$rs2, Op2Class:$rs1,
1513                  VMV0:$carry, AVL:$vl, sew:$sew), []>,
1514      RISCVVPseudo {
1515  let mayLoad = 0;
1516  let mayStore = 0;
1517  let hasSideEffects = 0;
1518  let Constraints = "$rd = $passthru";
1519  let TargetOverlapConstraintType = TargetConstraintType;
1520  let HasVLOp = 1;
1521  let HasSEWOp = 1;
1522  let HasVecPolicyOp = 0;
1523  let VLMul = MInfo.value;
1524}
1525
1526class VPseudoTernaryNoMask<VReg RetClass,
1527                           RegisterClass Op1Class,
1528                           DAGOperand Op2Class,
1529                           string Constraint> :
1530      Pseudo<(outs RetClass:$rd),
1531             (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1532                  AVL:$vl, sew:$sew), []>,
1533      RISCVVPseudo {
1534  let mayLoad = 0;
1535  let mayStore = 0;
1536  let hasSideEffects = 0;
1537  let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
1538  let HasVLOp = 1;
1539  let HasSEWOp = 1;
1540}
1541
1542class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
1543                                     RegisterClass Op1Class,
1544                                     DAGOperand Op2Class,
1545                                     string Constraint = "",
1546                                     bits<2> TargetConstraintType = 1> :
1547      Pseudo<(outs RetClass:$rd),
1548             (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1549                  AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1550      RISCVVPseudo {
1551  let mayLoad = 0;
1552  let mayStore = 0;
1553  let hasSideEffects = 0;
1554  let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
1555  let TargetOverlapConstraintType = TargetConstraintType;
1556  let HasVecPolicyOp = 1;
1557  let HasVLOp = 1;
1558  let HasSEWOp = 1;
1559}
1560
1561class VPseudoTernaryNoMaskWithPolicyRoundingMode<VReg RetClass,
1562                                                 RegisterClass Op1Class,
1563                                                 DAGOperand Op2Class,
1564                                                 string Constraint = "",
1565                                                 bits<2> TargetConstraintType = 1> :
1566      Pseudo<(outs RetClass:$rd),
1567             (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
1568                  vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1569      RISCVVPseudo {
1570  let mayLoad = 0;
1571  let mayStore = 0;
1572  let hasSideEffects = 0;
1573  let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
1574  let TargetOverlapConstraintType = TargetConstraintType;
1575  let HasVecPolicyOp = 1;
1576  let HasVLOp = 1;
1577  let HasSEWOp = 1;
1578  let HasRoundModeOp = 1;
1579  let UsesVXRM = 0;
1580  let hasPostISelHook = 1;
1581}
1582
1583class VPseudoUSSegLoadNoMask<VReg RetClass,
1584                             int EEW,
1585                             bits<4> NF> :
1586      Pseudo<(outs RetClass:$rd),
1587             (ins RetClass:$dest, GPRMem:$rs1, AVL:$vl,
1588                  sew:$sew, vec_policy:$policy), []>,
1589      RISCVVPseudo,
1590      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
1591  let mayLoad = 1;
1592  let mayStore = 0;
1593  let hasSideEffects = 0;
1594  let HasVLOp = 1;
1595  let HasSEWOp = 1;
1596  let HasVecPolicyOp = 1;
1597  let Constraints = "$rd = $dest";
1598}
1599
1600class VPseudoUSSegLoadMask<VReg RetClass,
1601                           int EEW,
1602                           bits<4> NF> :
1603      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1604             (ins GetVRegNoV0<RetClass>.R:$passthru, GPRMem:$rs1,
1605                  VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
1606      RISCVVPseudo,
1607      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
1608  let mayLoad = 1;
1609  let mayStore = 0;
1610  let hasSideEffects = 0;
1611  let Constraints = "$rd = $passthru";
1612  let HasVLOp = 1;
1613  let HasSEWOp = 1;
1614  let HasVecPolicyOp = 1;
1615  let UsesMaskPolicy = 1;
1616}
1617
1618class VPseudoUSSegLoadFFNoMask<VReg RetClass,
1619                               int EEW,
1620                               bits<4> NF> :
1621      Pseudo<(outs RetClass:$rd, GPR:$vl),
1622             (ins RetClass:$dest, GPRMem:$rs1, AVL:$avl,
1623                  sew:$sew, vec_policy:$policy), []>,
1624      RISCVVPseudo,
1625      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
1626  let mayLoad = 1;
1627  let mayStore = 0;
1628  let hasSideEffects = 0;
1629  let HasVLOp = 1;
1630  let HasSEWOp = 1;
1631  let HasVecPolicyOp = 1;
1632  let Constraints = "$rd = $dest";
1633}
1634
1635class VPseudoUSSegLoadFFMask<VReg RetClass,
1636                             int EEW,
1637                             bits<4> NF> :
1638      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
1639             (ins GetVRegNoV0<RetClass>.R:$passthru, GPRMem:$rs1,
1640                  VMaskOp:$vm, AVL:$avl, sew:$sew, vec_policy:$policy), []>,
1641      RISCVVPseudo,
1642      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
1643  let mayLoad = 1;
1644  let mayStore = 0;
1645  let hasSideEffects = 0;
1646  let Constraints = "$rd = $passthru";
1647  let HasVLOp = 1;
1648  let HasSEWOp = 1;
1649  let HasVecPolicyOp = 1;
1650  let UsesMaskPolicy = 1;
1651}
1652
1653class VPseudoSSegLoadNoMask<VReg RetClass,
1654                            int EEW,
1655                            bits<4> NF> :
1656      Pseudo<(outs RetClass:$rd),
1657             (ins RetClass:$passthru, GPRMem:$rs1, GPR:$offset, AVL:$vl,
1658                 sew:$sew, vec_policy:$policy), []>,
1659      RISCVVPseudo,
1660      RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
1661  let mayLoad = 1;
1662  let mayStore = 0;
1663  let hasSideEffects = 0;
1664  let HasVLOp = 1;
1665  let HasSEWOp = 1;
1666  let HasVecPolicyOp = 1;
1667  let Constraints = "$rd = $passthru";
1668}
1669
1670class VPseudoSSegLoadMask<VReg RetClass,
1671                          int EEW,
1672                          bits<4> NF> :
1673      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1674             (ins GetVRegNoV0<RetClass>.R:$passthru, GPRMem:$rs1,
1675                  GPR:$offset, VMaskOp:$vm, AVL:$vl, sew:$sew,
1676                  vec_policy:$policy), []>,
1677      RISCVVPseudo,
1678      RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
1679  let mayLoad = 1;
1680  let mayStore = 0;
1681  let hasSideEffects = 0;
1682  let Constraints = "$rd = $passthru";
1683  let HasVLOp = 1;
1684  let HasSEWOp = 1;
1685  let HasVecPolicyOp = 1;
1686  let UsesMaskPolicy = 1;
1687}
1688
1689class VPseudoISegLoadNoMask<VReg RetClass,
1690                            VReg IdxClass,
1691                            int EEW,
1692                            bits<3> LMUL,
1693                            bits<4> NF,
1694                            bit Ordered> :
1695      Pseudo<(outs RetClass:$rd),
1696             (ins RetClass:$passthru, GPRMem:$rs1, IdxClass:$offset, AVL:$vl,
1697                  sew:$sew, vec_policy:$policy), []>,
1698      RISCVVPseudo,
1699      RISCVVLXSEG<NF, /*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1700  let mayLoad = 1;
1701  let mayStore = 0;
1702  let hasSideEffects = 0;
1703  // For vector indexed segment loads, the destination vector register groups
1704  // cannot overlap the source vector register group
1705  let Constraints = "@earlyclobber $rd, $rd = $passthru";
1706  let HasVLOp = 1;
1707  let HasSEWOp = 1;
1708  let HasVecPolicyOp = 1;
1709}
1710
1711class VPseudoISegLoadMask<VReg RetClass,
1712                          VReg IdxClass,
1713                          int EEW,
1714                          bits<3> LMUL,
1715                          bits<4> NF,
1716                          bit Ordered> :
1717      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1718             (ins GetVRegNoV0<RetClass>.R:$passthru, GPRMem:$rs1,
1719                  IdxClass:$offset, VMaskOp:$vm, AVL:$vl, sew:$sew,
1720                  vec_policy:$policy), []>,
1721      RISCVVPseudo,
1722      RISCVVLXSEG<NF, /*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1723  let mayLoad = 1;
1724  let mayStore = 0;
1725  let hasSideEffects = 0;
1726  // For vector indexed segment loads, the destination vector register groups
1727  // cannot overlap the source vector register group
1728  let Constraints = "@earlyclobber $rd, $rd = $passthru";
1729  let HasVLOp = 1;
1730  let HasSEWOp = 1;
1731  let HasVecPolicyOp = 1;
1732  let UsesMaskPolicy = 1;
1733}
1734
1735class VPseudoUSSegStoreNoMask<VReg ValClass,
1736                              int EEW,
1737                              bits<4> NF> :
1738      Pseudo<(outs),
1739             (ins ValClass:$rd, GPRMem:$rs1, AVL:$vl, sew:$sew), []>,
1740      RISCVVPseudo,
1741      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
1742  let mayLoad = 0;
1743  let mayStore = 1;
1744  let hasSideEffects = 0;
1745  let HasVLOp = 1;
1746  let HasSEWOp = 1;
1747}
1748
1749class VPseudoUSSegStoreMask<VReg ValClass,
1750                            int EEW,
1751                            bits<4> NF> :
1752      Pseudo<(outs),
1753             (ins ValClass:$rd, GPRMem:$rs1,
1754                  VMaskOp:$vm, AVL:$vl, sew:$sew), []>,
1755      RISCVVPseudo,
1756      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, !logtwo(EEW), VLMul> {
1757  let mayLoad = 0;
1758  let mayStore = 1;
1759  let hasSideEffects = 0;
1760  let HasVLOp = 1;
1761  let HasSEWOp = 1;
1762}
1763
1764class VPseudoSSegStoreNoMask<VReg ValClass,
1765                             int EEW,
1766                             bits<4> NF> :
1767      Pseudo<(outs),
1768             (ins ValClass:$rd, GPRMem:$rs1, GPR:$offset,
1769                  AVL:$vl, sew:$sew), []>,
1770      RISCVVPseudo,
1771      RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, !logtwo(EEW), VLMul> {
1772  let mayLoad = 0;
1773  let mayStore = 1;
1774  let hasSideEffects = 0;
1775  let HasVLOp = 1;
1776  let HasSEWOp = 1;
1777}
1778
1779class VPseudoSSegStoreMask<VReg ValClass,
1780                           int EEW,
1781                           bits<4> NF> :
1782      Pseudo<(outs),
1783             (ins ValClass:$rd, GPRMem:$rs1, GPR: $offset,
1784                  VMaskOp:$vm, AVL:$vl, sew:$sew), []>,
1785      RISCVVPseudo,
1786      RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, !logtwo(EEW), VLMul> {
1787  let mayLoad = 0;
1788  let mayStore = 1;
1789  let hasSideEffects = 0;
1790  let HasVLOp = 1;
1791  let HasSEWOp = 1;
1792}
1793
1794class VPseudoISegStoreNoMask<VReg ValClass,
1795                             VReg IdxClass,
1796                             int EEW,
1797                             bits<3> LMUL,
1798                             bits<4> NF,
1799                             bit Ordered> :
1800      Pseudo<(outs),
1801             (ins ValClass:$rd, GPRMem:$rs1, IdxClass: $index,
1802                  AVL:$vl, sew:$sew), []>,
1803      RISCVVPseudo,
1804      RISCVVSXSEG<NF, /*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1805  let mayLoad = 0;
1806  let mayStore = 1;
1807  let hasSideEffects = 0;
1808  let HasVLOp = 1;
1809  let HasSEWOp = 1;
1810}
1811
1812class VPseudoISegStoreMask<VReg ValClass,
1813                           VReg IdxClass,
1814                           int EEW,
1815                           bits<3> LMUL,
1816                           bits<4> NF,
1817                           bit Ordered> :
1818      Pseudo<(outs),
1819             (ins ValClass:$rd, GPRMem:$rs1, IdxClass: $index,
1820                  VMaskOp:$vm, AVL:$vl, sew:$sew), []>,
1821      RISCVVPseudo,
1822      RISCVVSXSEG<NF, /*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1823  let mayLoad = 0;
1824  let mayStore = 1;
1825  let hasSideEffects = 0;
1826  let HasVLOp = 1;
1827  let HasSEWOp = 1;
1828}
1829
1830multiclass VPseudoUSLoad {
1831  foreach eew = EEWList in {
1832    foreach lmul = MxSet<eew>.m in {
1833      defvar LInfo = lmul.MX;
1834      defvar vreg = lmul.vrclass;
1835      let VLMul = lmul.value, SEW=eew in {
1836        def "E" # eew # "_V_" # LInfo :
1837          VPseudoUSLoadNoMask<vreg, eew>,
1838          VLESched<LInfo>;
1839        def "E" # eew # "_V_" # LInfo # "_MASK" :
1840          VPseudoUSLoadMask<vreg, eew>,
1841          RISCVMaskedPseudo<MaskIdx=2>,
1842          VLESched<LInfo>;
1843      }
1844    }
1845  }
1846}
1847
1848multiclass VPseudoFFLoad {
1849  foreach eew = EEWList in {
1850    foreach lmul = MxSet<eew>.m in {
1851      defvar LInfo = lmul.MX;
1852      defvar vreg = lmul.vrclass;
1853      let VLMul = lmul.value, SEW=eew in {
1854        def "E" # eew # "FF_V_" # LInfo:
1855          VPseudoUSLoadFFNoMask<vreg, eew>,
1856          VLFSched<LInfo>;
1857        def "E" # eew # "FF_V_" # LInfo # "_MASK":
1858          VPseudoUSLoadFFMask<vreg, eew>,
1859          RISCVMaskedPseudo<MaskIdx=2>,
1860          VLFSched<LInfo>;
1861      }
1862    }
1863  }
1864}
1865
1866multiclass VPseudoLoadMask {
1867  foreach mti = AllMasks in {
1868    defvar mx = mti.LMul.MX;
1869    defvar WriteVLDM_MX = !cast<SchedWrite>("WriteVLDM_" # mx);
1870    let VLMul = mti.LMul.value in {
1871      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, EEW=1, sewop=sew_mask>,
1872        Sched<[WriteVLDM_MX, ReadVLDX]>;
1873    }
1874  }
1875}
1876
1877multiclass VPseudoSLoad {
1878  foreach eew = EEWList in {
1879    foreach lmul = MxSet<eew>.m in {
1880      defvar LInfo = lmul.MX;
1881      defvar vreg = lmul.vrclass;
1882      let VLMul = lmul.value, SEW=eew in {
1883        def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask<vreg, eew>,
1884                                        VLSSched<eew, LInfo>;
1885        def "E" # eew # "_V_" # LInfo # "_MASK" :
1886          VPseudoSLoadMask<vreg, eew>,
1887          RISCVMaskedPseudo<MaskIdx=3>,
1888          VLSSched<eew, LInfo>;
1889      }
1890    }
1891  }
1892}
1893
1894multiclass VPseudoILoad<bit Ordered> {
1895  foreach idxEEW = EEWList in {
1896    foreach dataEEW = EEWList in {
1897      foreach dataEMUL = MxSet<dataEEW>.m in {
1898        defvar dataEMULOctuple = dataEMUL.octuple;
1899        // Calculate emul = eew * lmul / sew
1900        defvar idxEMULOctuple =
1901          !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
1902        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
1903          defvar DataLInfo = dataEMUL.MX;
1904          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
1905          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
1906          defvar Vreg = dataEMUL.vrclass;
1907          defvar IdxVreg = idxEMUL.vrclass;
1908          defvar HasConstraint = !ne(dataEEW, idxEEW);
1909          defvar TypeConstraints =
1910            !if(!eq(dataEEW, idxEEW), 1, !if(!gt(dataEEW, idxEEW), !if(!ge(idxEMULOctuple, 8), 3, 1), 2));
1911          let VLMul = dataEMUL.value in {
1912            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
1913              VPseudoILoadNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered, HasConstraint, TypeConstraints>,
1914              VLXSched<dataEEW, Ordered, DataLInfo, IdxLInfo>;
1915            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
1916              VPseudoILoadMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered, HasConstraint, TypeConstraints>,
1917              RISCVMaskedPseudo<MaskIdx=3>,
1918              VLXSched<dataEEW, Ordered, DataLInfo, IdxLInfo>;
1919          }
1920        }
1921      }
1922    }
1923  }
1924}
1925
1926multiclass VPseudoUSStore {
1927  foreach eew = EEWList in {
1928    foreach lmul = MxSet<eew>.m in {
1929      defvar LInfo = lmul.MX;
1930      defvar vreg = lmul.vrclass;
1931      let VLMul = lmul.value, SEW=eew in {
1932        def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask<vreg, eew>,
1933                                        VSESched<LInfo>;
1934        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg, eew>,
1935                                                  RISCVMaskedPseudo<MaskIdx=2>,
1936                                                  VSESched<LInfo>;
1937      }
1938    }
1939  }
1940}
1941
1942multiclass VPseudoStoreMask {
1943  foreach mti = AllMasks in {
1944    defvar mx = mti.LMul.MX;
1945    defvar WriteVSTM_MX = !cast<SchedWrite>("WriteVSTM_" # mx);
1946    let VLMul = mti.LMul.value in {
1947      def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, EEW=1, sewop=sew_mask>,
1948        Sched<[WriteVSTM_MX, ReadVSTX]>;
1949    }
1950  }
1951}
1952
1953multiclass VPseudoSStore {
1954  foreach eew = EEWList in {
1955    foreach lmul = MxSet<eew>.m in {
1956      defvar LInfo = lmul.MX;
1957      defvar vreg = lmul.vrclass;
1958      let VLMul = lmul.value, SEW=eew in {
1959        def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask<vreg, eew>,
1960                                        VSSSched<eew, LInfo>;
1961        def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg, eew>,
1962                                                  RISCVMaskedPseudo<MaskIdx=3>,
1963                                                  VSSSched<eew, LInfo>;
1964      }
1965    }
1966  }
1967}
1968
1969multiclass VPseudoIStore<bit Ordered> {
1970  foreach idxEEW = EEWList in {
1971    foreach dataEEW = EEWList in {
1972      foreach dataEMUL = MxSet<dataEEW>.m in {
1973        defvar dataEMULOctuple = dataEMUL.octuple;
1974        // Calculate emul = eew * lmul / sew
1975        defvar idxEMULOctuple =
1976          !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
1977        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
1978          defvar DataLInfo = dataEMUL.MX;
1979          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
1980          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
1981          defvar Vreg = dataEMUL.vrclass;
1982          defvar IdxVreg = idxEMUL.vrclass;
1983          let VLMul = dataEMUL.value in {
1984            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
1985              VPseudoIStoreNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered>,
1986              VSXSched<dataEEW, Ordered, DataLInfo, IdxLInfo>;
1987            def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
1988              VPseudoIStoreMask<Vreg, IdxVreg, idxEEW, idxEMUL.value, Ordered>,
1989              RISCVMaskedPseudo<MaskIdx=3>,
1990              VSXSched<dataEEW, Ordered, DataLInfo, IdxLInfo>;
1991          }
1992        }
1993      }
1994    }
1995  }
1996}
1997
1998multiclass VPseudoVPOP_M {
1999  foreach mti = AllMasks in {
2000    defvar mx = mti.LMul.MX;
2001    let VLMul = mti.LMul.value in {
2002      def "_M_" # mti.BX : VPseudoUnaryNoMaskGPROut,
2003          SchedBinary<"WriteVMPopV", "ReadVMPopV", "ReadVMPopV", mx>;
2004      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMaskGPROut,
2005          RISCVMaskedPseudo<MaskIdx=1>,
2006          SchedBinary<"WriteVMPopV", "ReadVMPopV", "ReadVMPopV", mx>;
2007    }
2008  }
2009}
2010
2011multiclass VPseudoV1ST_M {
2012  foreach mti = AllMasks in {
2013    defvar mx = mti.LMul.MX;
2014    let VLMul = mti.LMul.value in {
2015      def "_M_" #mti.BX : VPseudoUnaryNoMaskGPROut,
2016          SchedBinary<"WriteVMFFSV", "ReadVMFFSV", "ReadVMFFSV", mx>;
2017      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMaskGPROut,
2018          RISCVMaskedPseudo<MaskIdx=1>,
2019          SchedBinary<"WriteVMFFSV", "ReadVMFFSV", "ReadVMFFSV", mx>;
2020    }
2021  }
2022}
2023
2024multiclass VPseudoVSFS_M {
2025  defvar constraint = "@earlyclobber $rd";
2026  foreach mti = AllMasks in {
2027    defvar mx = mti.LMul.MX;
2028    let VLMul = mti.LMul.value in {
2029      def "_M_" # mti.BX : VPseudoUnaryNoMaskNoPolicy<VR, VR, constraint>,
2030                           SchedUnary<"WriteVMSFSV", "ReadVMSFSV", mx,
2031                                      forcePassthruRead=true>;
2032      let ForceTailAgnostic = true in
2033      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint,
2034                                                      sewop = sew_mask>,
2035                                     SchedUnary<"WriteVMSFSV", "ReadVMSFSV", mx,
2036                                                forcePassthruRead=true>;
2037    }
2038  }
2039}
2040
2041multiclass VPseudoVID_V {
2042  foreach m = MxList in {
2043    defvar mx = m.MX;
2044    let VLMul = m.value in {
2045      def "_V_" # mx : VPseudoNullaryNoMask<m.vrclass>,
2046                         SchedNullary<"WriteVIdxV", mx, forcePassthruRead=true>;
2047      def "_V_" # mx # "_MASK" : VPseudoNullaryMask<m.vrclass>,
2048                                   RISCVMaskedPseudo<MaskIdx=1>,
2049                                   SchedNullary<"WriteVIdxV", mx,
2050                                                forcePassthruRead=true>;
2051    }
2052  }
2053}
2054
2055multiclass VPseudoNullaryPseudoM <string BaseInst> {
2056  foreach mti = AllMasks in {
2057    let VLMul = mti.LMul.value in {
2058      def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">,
2059        SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mti.LMul.MX>;
2060    }
2061  }
2062}
2063
2064multiclass VPseudoVIOTA_M {
2065  defvar constraint = "@earlyclobber $rd";
2066  foreach m = MxList in {
2067    defvar mx = m.MX;
2068    let VLMul = m.value in {
2069      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, VR, constraint>,
2070                       SchedUnary<"WriteVIotaV", "ReadVIotaV", mx,
2071                                  forcePassthruRead=true>;
2072      def "_" # mx # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>,
2073                                 RISCVMaskedPseudo<MaskIdx=2>,
2074                                 SchedUnary<"WriteVIotaV", "ReadVIotaV", mx,
2075                                            forcePassthruRead=true>;
2076    }
2077  }
2078}
2079
2080multiclass VPseudoVCPR_V {
2081  foreach m = MxList in {
2082    defvar mx = m.MX;
2083    defvar sews = SchedSEWSet<mx>.val;
2084    let VLMul = m.value in
2085      foreach e = sews in {
2086        defvar suffix = "_" # m.MX # "_E" # e;
2087        let SEW = e in
2088        def _VM # suffix
2089          : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>,
2090            SchedBinary<"WriteVCompressV", "ReadVCompressV", "ReadVCompressV",
2091                        mx, e>;
2092      }
2093  }
2094}
2095
2096multiclass VPseudoBinary<VReg RetClass,
2097                         VReg Op1Class,
2098                         DAGOperand Op2Class,
2099                         LMULInfo MInfo,
2100                         string Constraint = "",
2101                         int sew = 0,
2102                         bits<2> TargetConstraintType = 1,
2103                         bit Commutable = 0> {
2104  let VLMul = MInfo.value, SEW=sew, isCommutable = Commutable in {
2105    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
2106    def suffix : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class,
2107                                           Constraint, TargetConstraintType>;
2108    def suffix # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
2109                                                   Constraint, TargetConstraintType>,
2110                           RISCVMaskedPseudo<MaskIdx=3>;
2111  }
2112}
2113
2114multiclass VPseudoBinaryRoundingMode<VReg RetClass,
2115                                     VReg Op1Class,
2116                                     DAGOperand Op2Class,
2117                                     LMULInfo MInfo,
2118                                     string Constraint = "",
2119                                     int sew = 0,
2120                                     bit UsesVXRM = 1,
2121                                     bits<2> TargetConstraintType = 1,
2122                                     bit Commutable = 0> {
2123  let VLMul = MInfo.value, SEW=sew, isCommutable = Commutable in {
2124    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
2125    def suffix : VPseudoBinaryNoMaskRoundingMode<RetClass, Op1Class, Op2Class,
2126                                                 Constraint, UsesVXRM,
2127                                                 TargetConstraintType>;
2128    def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode<RetClass,
2129                                                               Op1Class,
2130                                                               Op2Class,
2131                                                               Constraint,
2132                                                               UsesVXRM,
2133                                                               TargetConstraintType>,
2134                           RISCVMaskedPseudo<MaskIdx=3>;
2135  }
2136}
2137
2138
2139multiclass VPseudoBinaryM<VReg RetClass,
2140                          VReg Op1Class,
2141                          DAGOperand Op2Class,
2142                          LMULInfo MInfo,
2143                          string Constraint = "",
2144                          bits<2> TargetConstraintType = 1,
2145                          bit Commutable = 0> {
2146  let VLMul = MInfo.value, isCommutable = Commutable in {
2147    def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
2148                                             Constraint, TargetConstraintType>;
2149    let ForceTailAgnostic = true in
2150    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask<RetClass, Op1Class,
2151                                                         Op2Class, Constraint, TargetConstraintType>,
2152                                   RISCVMaskedPseudo<MaskIdx=3>;
2153  }
2154}
2155
2156multiclass VPseudoBinaryEmul<VReg RetClass,
2157                             VReg Op1Class,
2158                             DAGOperand Op2Class,
2159                             LMULInfo lmul,
2160                             LMULInfo emul,
2161                             string Constraint = "",
2162                             int sew> {
2163  let VLMul = lmul.value, SEW=sew in {
2164    defvar suffix = !if(sew, "_" # lmul.MX # "_E" # sew, "_" # lmul.MX);
2165    def suffix # "_" # emul.MX : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class,
2166                                                           Constraint>;
2167    def suffix # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
2168                                                                          Constraint>,
2169                                                  RISCVMaskedPseudo<MaskIdx=3>;
2170  }
2171}
2172
2173multiclass VPseudoTiedBinary<VReg RetClass,
2174                             DAGOperand Op2Class,
2175                             LMULInfo MInfo,
2176                             string Constraint = "",
2177                             bits<2> TargetConstraintType = 1> {
2178  let VLMul = MInfo.value in {
2179    def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask<RetClass, Op2Class,
2180                                                          Constraint, TargetConstraintType>;
2181    def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask<RetClass, Op2Class,
2182                                                         Constraint, TargetConstraintType>,
2183                                        RISCVMaskedPseudo<MaskIdx=2>;
2184  }
2185}
2186
2187multiclass VPseudoTiedBinaryRoundingMode<VReg RetClass,
2188                                         DAGOperand Op2Class,
2189                                         LMULInfo MInfo,
2190                                         string Constraint = "",
2191                                         int sew = 0,
2192                                         bits<2> TargetConstraintType = 1> {
2193    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
2194    let VLMul = MInfo.value in {
2195    def suffix # "_TIED":
2196      VPseudoTiedBinaryNoMaskRoundingMode<RetClass, Op2Class, Constraint, TargetConstraintType>;
2197    def suffix # "_MASK_TIED" :
2198      VPseudoTiedBinaryMaskRoundingMode<RetClass, Op2Class, Constraint, TargetConstraintType>,
2199      RISCVMaskedPseudo<MaskIdx=2>;
2200  }
2201}
2202
2203
2204multiclass VPseudoBinaryV_VV<LMULInfo m, string Constraint = "", int sew = 0, bit Commutable = 0> {
2205  defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint, sew, Commutable=Commutable>;
2206}
2207
2208multiclass VPseudoBinaryV_VV_RM<LMULInfo m, string Constraint = "", bit Commutable = 0> {
2209  defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m, Constraint,
2210                                       Commutable=Commutable>;
2211}
2212
2213multiclass VPseudoBinaryFV_VV_RM<LMULInfo m, int sew> {
2214  defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m,
2215                                       "", sew, UsesVXRM=0>;
2216}
2217
2218multiclass VPseudoVGTR_EI16_VV {
2219  defvar constraint = "@earlyclobber $rd";
2220  foreach m = MxList in {
2221    defvar mx = m.MX;
2222    foreach sew = EEWList in {
2223      defvar dataEMULOctuple = m.octuple;
2224      // emul = lmul * 16 / sew
2225      defvar idxEMULOctuple = !srl(!mul(dataEMULOctuple, 16), !logtwo(sew));
2226      if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
2227        defvar emulMX = octuple_to_str<idxEMULOctuple>.ret;
2228        defvar emul = !cast<LMULInfo>("V_" # emulMX);
2229        defvar sews = SchedSEWSet<mx>.val;
2230        foreach e = sews in {
2231          defm _VV
2232              : VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul,
2233                                  constraint, e>,
2234                SchedBinary<"WriteVRGatherEI16VV", "ReadVRGatherEI16VV_data",
2235                            "ReadVRGatherEI16VV_index", mx, e, forcePassthruRead=true>;
2236        }
2237      }
2238    }
2239  }
2240}
2241
2242multiclass VPseudoBinaryV_VX<LMULInfo m, string Constraint = "", int sew = 0> {
2243  defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint, sew>;
2244}
2245
2246multiclass VPseudoBinaryV_VX_RM<LMULInfo m, string Constraint = ""> {
2247  defm "_VX" : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, GPR, m, Constraint>;
2248}
2249
2250multiclass VPseudoVSLD1_VX<string Constraint = ""> {
2251  foreach m = MxList in {
2252    defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>,
2253                 SchedBinary<"WriteVISlide1X", "ReadVISlideV", "ReadVISlideX",
2254                             m.MX, forcePassthruRead=true>;
2255  }
2256}
2257
2258multiclass VPseudoBinaryV_VF<LMULInfo m, FPR_Info f, int sew> {
2259  defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass,
2260                                   f.fprclass, m, "", sew>;
2261}
2262
2263multiclass VPseudoBinaryV_VF_RM<LMULInfo m, FPR_Info f, int sew> {
2264  defm "_V" # f.FX : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass,
2265                                               f.fprclass, m, "", sew,
2266                                               UsesVXRM=0>;
2267}
2268
2269multiclass VPseudoVSLD1_VF<string Constraint = ""> {
2270  foreach f = FPList in {
2271    foreach m = f.MxList in {
2272      defm "_V" #f.FX
2273          : VPseudoBinary<m.vrclass, m.vrclass, f.fprclass, m, Constraint>,
2274            SchedBinary<"WriteVFSlide1F", "ReadVFSlideV", "ReadVFSlideF", m.MX,
2275                      forcePassthruRead=true>;
2276    }
2277  }
2278}
2279
2280multiclass VPseudoBinaryV_VI<Operand ImmType, LMULInfo m, string Constraint = ""> {
2281  defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
2282}
2283
2284multiclass VPseudoBinaryV_VI_RM<Operand ImmType, LMULInfo m, string Constraint = ""> {
2285  defm _VI : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, ImmType, m, Constraint>;
2286}
2287
2288multiclass VPseudoVALU_MM<bit Commutable = 0> {
2289  foreach mti = AllMasks in {
2290    let VLMul = mti.LMul.value, isCommutable = Commutable in {
2291      def "_MM_" # mti.BX : VPseudoBinaryNoMask<VR, VR, VR, "", sewop = sew_mask>,
2292        SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mti.LMul.MX>;
2293    }
2294  }
2295}
2296
2297// We use earlyclobber here due to
2298// * The destination EEW is smaller than the source EEW and the overlap is
2299//   in the lowest-numbered part of the source register group is legal.
2300//   Otherwise, it is illegal.
2301// * The destination EEW is greater than the source EEW, the source EMUL is
2302//   at least 1, and the overlap is in the highest-numbered part of the
2303//   destination register group is legal. Otherwise, it is illegal.
2304multiclass VPseudoBinaryW_VV<LMULInfo m, bit Commutable = 0> {
2305  defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
2306                           "@earlyclobber $rd", TargetConstraintType=3,
2307                           Commutable=Commutable>;
2308}
2309
2310multiclass VPseudoBinaryW_VV_RM<LMULInfo m, int sew> {
2311  defm _VV : VPseudoBinaryRoundingMode<m.wvrclass, m.vrclass, m.vrclass, m,
2312                                      "@earlyclobber $rd", sew, UsesVXRM=0,
2313                                      TargetConstraintType=3>;
2314}
2315
2316multiclass VPseudoBinaryW_VX<LMULInfo m> {
2317  defm "_VX" : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
2318                             "@earlyclobber $rd", TargetConstraintType=3>;
2319}
2320
2321multiclass VPseudoBinaryW_VI<Operand ImmType, LMULInfo m> {
2322  defm "_VI" : VPseudoBinary<m.wvrclass, m.vrclass, ImmType, m,
2323                             "@earlyclobber $rd", TargetConstraintType=3>;
2324}
2325
2326multiclass VPseudoBinaryW_VF_RM<LMULInfo m, FPR_Info f, int sew> {
2327  defm "_V" # f.FX : VPseudoBinaryRoundingMode<m.wvrclass, m.vrclass,
2328                                               f.fprclass, m,
2329                                               "@earlyclobber $rd", sew,
2330                                               UsesVXRM=0,
2331                                               TargetConstraintType=3>;
2332}
2333
2334multiclass VPseudoBinaryW_WV<LMULInfo m> {
2335  defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
2336                           "@earlyclobber $rd", TargetConstraintType=3>;
2337  defm _WV : VPseudoTiedBinary<m.wvrclass, m.vrclass, m,
2338                               "@earlyclobber $rd", TargetConstraintType=3>;
2339}
2340
2341multiclass VPseudoBinaryW_WV_RM<LMULInfo m, int sew> {
2342  defm _WV : VPseudoBinaryRoundingMode<m.wvrclass, m.wvrclass, m.vrclass, m,
2343                                       "@earlyclobber $rd", sew, UsesVXRM = 0,
2344                                       TargetConstraintType = 3>;
2345  defm _WV : VPseudoTiedBinaryRoundingMode<m.wvrclass, m.vrclass, m,
2346                                           "@earlyclobber $rd", sew,
2347                                           TargetConstraintType = 3>;
2348}
2349
2350multiclass VPseudoBinaryW_WX<LMULInfo m> {
2351  defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m, /*Constraint*/ "", TargetConstraintType=3>;
2352}
2353
2354multiclass VPseudoBinaryW_WF_RM<LMULInfo m, FPR_Info f, int sew> {
2355  defm "_W" # f.FX : VPseudoBinaryRoundingMode<m.wvrclass, m.wvrclass,
2356                                               f.fprclass, m,
2357                                               Constraint="",
2358                                               sew=sew,
2359                                               UsesVXRM=0,
2360                                               TargetConstraintType=3>;
2361}
2362
2363// Narrowing instructions like vnsrl/vnsra/vnclip(u) don't need @earlyclobber
2364// if the source and destination have an LMUL<=1. This matches this overlap
2365// exception from the spec.
2366// "The destination EEW is smaller than the source EEW and the overlap is in the
2367//  lowest-numbered part of the source register group."
2368multiclass VPseudoBinaryV_WV<LMULInfo m> {
2369  defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m,
2370                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""),
2371                           TargetConstraintType=2>;
2372}
2373
2374multiclass VPseudoBinaryV_WV_RM<LMULInfo m> {
2375  defm _WV : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, m.vrclass, m,
2376                                       !if(!ge(m.octuple, 8),
2377                                       "@earlyclobber $rd", ""),
2378                                       TargetConstraintType=2>;
2379}
2380
2381multiclass VPseudoBinaryV_WX<LMULInfo m> {
2382  defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
2383                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""),
2384                           TargetConstraintType=2>;
2385}
2386
2387multiclass VPseudoBinaryV_WX_RM<LMULInfo m> {
2388  defm _WX : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, GPR, m,
2389                                       !if(!ge(m.octuple, 8),
2390                                       "@earlyclobber $rd", ""),
2391                                       TargetConstraintType=2>;
2392}
2393
2394multiclass VPseudoBinaryV_WI<LMULInfo m> {
2395  defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
2396                           !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""),
2397                           TargetConstraintType=2>;
2398}
2399
2400multiclass VPseudoBinaryV_WI_RM<LMULInfo m> {
2401  defm _WI : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, uimm5, m,
2402                                       !if(!ge(m.octuple, 8),
2403                                       "@earlyclobber $rd", ""),
2404                                       TargetConstraintType=2>;
2405}
2406
2407// For vadc and vsbc, the instruction encoding is reserved if the destination
2408// vector register is v0.
2409// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
2410multiclass VPseudoBinaryV_VM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2411                             string Constraint = "",
2412                             bit Commutable = 0,
2413                             bits<2> TargetConstraintType = 1> {
2414  let isCommutable = Commutable in
2415  def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX :
2416    VPseudoBinaryCarry<!if(CarryOut, VR,
2417                       !if(!and(CarryIn, !not(CarryOut)),
2418                           GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2419                       m.vrclass, m.vrclass, m, CarryIn, Constraint, TargetConstraintType>;
2420}
2421
2422multiclass VPseudoTiedBinaryV_VM<LMULInfo m, bit Commutable = 0> {
2423  let isCommutable = Commutable in
2424  def "_VVM" # "_" # m.MX:
2425    VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2426                             m.vrclass, m.vrclass, m>;
2427}
2428
2429multiclass VPseudoBinaryV_XM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2430                             string Constraint = "", bits<2> TargetConstraintType = 1> {
2431  def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX :
2432    VPseudoBinaryCarry<!if(CarryOut, VR,
2433                       !if(!and(CarryIn, !not(CarryOut)),
2434                           GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2435                       m.vrclass, GPR, m, CarryIn, Constraint, TargetConstraintType>;
2436}
2437
2438multiclass VPseudoTiedBinaryV_XM<LMULInfo m> {
2439  def "_VXM" # "_" # m.MX:
2440    VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2441                             m.vrclass, GPR, m>;
2442}
2443
2444multiclass VPseudoVMRG_FM {
2445  foreach f = FPList in {
2446    foreach m = f.MxList in {
2447      defvar mx = m.MX;
2448      def "_V" # f.FX # "M_" # mx
2449          : VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R, m.vrclass,
2450                                     f.fprclass, m>,
2451          SchedBinary<"WriteVFMergeV", "ReadVFMergeV", "ReadVFMergeF", mx,
2452                      forceMasked=1, forcePassthruRead=true>;
2453    }
2454  }
2455}
2456
2457multiclass VPseudoBinaryV_IM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
2458                             string Constraint = "", bits<2> TargetConstraintType = 1> {
2459  def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX :
2460    VPseudoBinaryCarry<!if(CarryOut, VR,
2461                       !if(!and(CarryIn, !not(CarryOut)),
2462                           GetVRegNoV0<m.vrclass>.R, m.vrclass)),
2463                       m.vrclass, simm5, m, CarryIn, Constraint, TargetConstraintType>;
2464}
2465
2466multiclass VPseudoTiedBinaryV_IM<LMULInfo m> {
2467  def "_VIM" # "_" # m.MX:
2468    VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
2469                             m.vrclass, simm5, m>;
2470}
2471
2472multiclass VPseudoUnaryVMV_V_X_I {
2473  foreach m = MxList in {
2474    let VLMul = m.value in {
2475      defvar mx = m.MX;
2476      let VLMul = m.value in {
2477        def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2478                         SchedUnary<"WriteVIMovV", "ReadVIMovV", mx,
2479                                    forcePassthruRead=true>;
2480        let isReMaterializable = 1 in
2481        def "_X_" # mx : VPseudoUnaryNoMask<m.vrclass, GPR>,
2482                         SchedUnary<"WriteVIMovX", "ReadVIMovX", mx,
2483                                    forcePassthruRead=true>;
2484        let isReMaterializable = 1 in
2485        def "_I_" # mx : VPseudoUnaryNoMask<m.vrclass, simm5>,
2486                         SchedNullary<"WriteVIMovI", mx,
2487                                      forcePassthruRead=true>;
2488      }
2489    }
2490  }
2491}
2492
2493multiclass VPseudoVMV_F {
2494  foreach f = FPList in {
2495    foreach m = f.MxList in {
2496      defvar mx = m.MX;
2497      let VLMul = m.value in {
2498        def "_" # f.FX # "_" # mx :
2499          VPseudoUnaryNoMask<m.vrclass, f.fprclass>,
2500          SchedUnary<"WriteVFMovV", "ReadVFMovF", mx, forcePassthruRead=true>;
2501      }
2502    }
2503  }
2504}
2505
2506multiclass VPseudoVCLS_V {
2507  foreach m = MxListF in {
2508    defvar mx = m.MX;
2509    let VLMul = m.value in {
2510      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2511                       SchedUnary<"WriteVFClassV", "ReadVFClassV", mx,
2512                                  forcePassthruRead=true>;
2513      def "_V_" # mx # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
2514                                 RISCVMaskedPseudo<MaskIdx=2>,
2515                                 SchedUnary<"WriteVFClassV", "ReadVFClassV", mx,
2516                                            forcePassthruRead=true>;
2517    }
2518  }
2519}
2520
2521multiclass VPseudoVSQR_V_RM {
2522  foreach m = MxListF in {
2523    defvar mx = m.MX;
2524    defvar sews = SchedSEWSet<m.MX, isF=1>.val;
2525
2526    let VLMul = m.value in
2527      foreach e = sews in {
2528        defvar suffix = "_" # mx # "_E" # e;
2529        let SEW = e in {
2530          def "_V" # suffix : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
2531                              SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e,
2532                                         forcePassthruRead=true>;
2533          def "_V" #suffix # "_MASK"
2534              : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
2535                RISCVMaskedPseudo<MaskIdx = 2>,
2536                SchedUnary<"WriteVFSqrtV", "ReadVFSqrtV", mx, e,
2537                           forcePassthruRead=true>;
2538        }
2539      }
2540  }
2541}
2542
2543multiclass VPseudoVRCP_V {
2544  foreach m = MxListF in {
2545    defvar mx = m.MX;
2546    foreach e = SchedSEWSet<mx, isF=1>.val in {
2547      let VLMul = m.value in {
2548        def "_V_" # mx # "_E" # e
2549            : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
2550              SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forcePassthruRead=true>;
2551        def "_V_" # mx # "_E" # e # "_MASK"
2552            : VPseudoUnaryMask<m.vrclass, m.vrclass>,
2553              RISCVMaskedPseudo<MaskIdx = 2>,
2554              SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forcePassthruRead=true>;
2555      }
2556    }
2557  }
2558}
2559
2560multiclass VPseudoVRCP_V_RM {
2561  foreach m = MxListF in {
2562    defvar mx = m.MX;
2563    foreach e = SchedSEWSet<mx, isF=1>.val in {
2564      let VLMul = m.value in {
2565        def "_V_" # mx # "_E" # e
2566            : VPseudoUnaryNoMaskRoundingMode<m.vrclass, m.vrclass>,
2567              SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forcePassthruRead=true>;
2568        def "_V_" # mx # "_E" # e # "_MASK"
2569            : VPseudoUnaryMaskRoundingMode<m.vrclass, m.vrclass>,
2570              RISCVMaskedPseudo<MaskIdx = 2>,
2571              SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, e, forcePassthruRead=true>;
2572      }
2573    }
2574  }
2575}
2576
2577multiclass PseudoVEXT_VF2 {
2578  defvar constraints = "@earlyclobber $rd";
2579  foreach m = MxListVF2 in {
2580    defvar mx = m.MX;
2581    defvar CurrTypeConstraints = !if(!or(!eq(mx, "MF4"), !eq(mx, "MF2"), !eq(mx, "M1")), 1, 3);
2582    let VLMul = m.value in {
2583      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints, CurrTypeConstraints>,
2584                     SchedUnary<"WriteVExtV", "ReadVExtV", mx, forcePassthruRead=true>;
2585      def "_" # mx # "_MASK" :
2586        VPseudoUnaryMask<m.vrclass, m.f2vrclass, constraints, CurrTypeConstraints>,
2587        RISCVMaskedPseudo<MaskIdx=2>,
2588        SchedUnary<"WriteVExtV", "ReadVExtV", mx, forcePassthruRead=true>;
2589    }
2590  }
2591}
2592
2593multiclass PseudoVEXT_VF4 {
2594  defvar constraints = "@earlyclobber $rd";
2595  foreach m = MxListVF4 in {
2596    defvar mx = m.MX;
2597    defvar CurrTypeConstraints = !if(!or(!eq(mx, "MF2"), !eq(mx, "M1"), !eq(mx, "M2")), 1, 3);
2598    let VLMul = m.value in {
2599      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints, CurrTypeConstraints>,
2600                     SchedUnary<"WriteVExtV", "ReadVExtV", mx, forcePassthruRead=true>;
2601      def "_" # mx # "_MASK" :
2602        VPseudoUnaryMask<m.vrclass, m.f4vrclass, constraints, CurrTypeConstraints>,
2603        RISCVMaskedPseudo<MaskIdx=2>,
2604        SchedUnary<"WriteVExtV", "ReadVExtV", mx, forcePassthruRead=true>;
2605    }
2606  }
2607}
2608
2609multiclass PseudoVEXT_VF8 {
2610  defvar constraints = "@earlyclobber $rd";
2611  foreach m = MxListVF8 in {
2612    defvar mx = m.MX;
2613    defvar CurrTypeConstraints = !if(!or(!eq(mx, "M1"), !eq(mx, "M2"), !eq(mx, "M4")), 1, 3);
2614    let VLMul = m.value in {
2615      def "_" # mx : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints, CurrTypeConstraints>,
2616                     SchedUnary<"WriteVExtV", "ReadVExtV", mx, forcePassthruRead=true>;
2617      def "_" # mx # "_MASK" :
2618        VPseudoUnaryMask<m.vrclass, m.f8vrclass, constraints, CurrTypeConstraints>,
2619        RISCVMaskedPseudo<MaskIdx=2>,
2620        SchedUnary<"WriteVExtV", "ReadVExtV", mx, forcePassthruRead=true>;
2621    }
2622  }
2623}
2624
2625// The destination EEW is 1 since "For the purposes of register group overlap
2626// constraints, mask elements have EEW=1."
2627// The source EEW is 8, 16, 32, or 64.
2628// When the destination EEW is different from source EEW, we need to use
2629// @earlyclobber to avoid the overlap between destination and source registers.
2630// We don't need @earlyclobber for LMUL<=1 since that matches this overlap
2631// exception from the spec
2632// "The destination EEW is smaller than the source EEW and the overlap is in the
2633//  lowest-numbered part of the source register group".
2634// With LMUL<=1 the source and dest occupy a single register so any overlap
2635// is in the lowest-numbered part.
2636multiclass VPseudoBinaryM_VV<LMULInfo m, bits<2> TargetConstraintType = 1,
2637                             bit Commutable = 0> {
2638  defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m,
2639                            !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""),
2640                            TargetConstraintType, Commutable=Commutable>;
2641}
2642
2643multiclass VPseudoBinaryM_VX<LMULInfo m, bits<2> TargetConstraintType = 1> {
2644  defm "_VX" :
2645    VPseudoBinaryM<VR, m.vrclass, GPR, m,
2646                   !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>;
2647}
2648
2649multiclass VPseudoBinaryM_VF<LMULInfo m, FPR_Info f, bits<2> TargetConstraintType = 1> {
2650  defm "_V" # f.FX :
2651    VPseudoBinaryM<VR, m.vrclass, f.fprclass, m,
2652                   !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>;
2653}
2654
2655multiclass VPseudoBinaryM_VI<LMULInfo m, bits<2> TargetConstraintType = 1> {
2656  defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m,
2657                            !if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>;
2658}
2659
2660multiclass VPseudoVGTR_VV_VX_VI {
2661  defvar constraint = "@earlyclobber $rd";
2662  foreach m = MxList in {
2663    defvar mx = m.MX;
2664    defm "" : VPseudoBinaryV_VX<m, constraint>,
2665              SchedBinary<"WriteVRGatherVX", "ReadVRGatherVX_data",
2666                          "ReadVRGatherVX_index", mx, forcePassthruRead=true>;
2667    defm "" : VPseudoBinaryV_VI<uimm5, m, constraint>,
2668              SchedUnary<"WriteVRGatherVI", "ReadVRGatherVI_data", mx,
2669                         forcePassthruRead=true>;
2670
2671    defvar sews = SchedSEWSet<mx>.val;
2672    foreach e = sews in {
2673      defm "" : VPseudoBinaryV_VV<m, constraint, e>,
2674                SchedBinary<"WriteVRGatherVV", "ReadVRGatherVV_data",
2675                              "ReadVRGatherVV_index", mx, e, forcePassthruRead=true>;
2676    }
2677  }
2678}
2679
2680multiclass VPseudoVSALU_VV_VX_VI<bit Commutable = 0> {
2681  foreach m = MxList in {
2682    defvar mx = m.MX;
2683    defm "" : VPseudoBinaryV_VV<m, Commutable=Commutable>,
2684              SchedBinary<"WriteVSALUV", "ReadVSALUV", "ReadVSALUX", mx,
2685                          forcePassthruRead=true>;
2686    defm "" : VPseudoBinaryV_VX<m>,
2687              SchedBinary<"WriteVSALUX", "ReadVSALUV", "ReadVSALUX", mx,
2688                          forcePassthruRead=true>;
2689    defm "" : VPseudoBinaryV_VI<simm5, m>,
2690              SchedUnary<"WriteVSALUI", "ReadVSALUV", mx, forcePassthruRead=true>;
2691  }
2692}
2693
2694
2695multiclass VPseudoVSHT_VV_VX_VI {
2696  foreach m = MxList in {
2697    defvar mx = m.MX;
2698    defm "" : VPseudoBinaryV_VV<m>,
2699              SchedBinary<"WriteVShiftV", "ReadVShiftV", "ReadVShiftV", mx,
2700                          forcePassthruRead=true>;
2701    defm "" : VPseudoBinaryV_VX<m>,
2702              SchedBinary<"WriteVShiftX", "ReadVShiftV", "ReadVShiftX", mx,
2703                          forcePassthruRead=true>;
2704    defm "" : VPseudoBinaryV_VI<uimm5, m>,
2705              SchedUnary<"WriteVShiftI", "ReadVShiftV", mx, forcePassthruRead=true>;
2706  }
2707}
2708
2709multiclass VPseudoVSSHT_VV_VX_VI_RM {
2710  foreach m = MxList in {
2711    defvar mx = m.MX;
2712    defm "" : VPseudoBinaryV_VV_RM<m>,
2713              SchedBinary<"WriteVSShiftV", "ReadVSShiftV", "ReadVSShiftV", mx,
2714                          forcePassthruRead=true>;
2715    defm "" : VPseudoBinaryV_VX_RM<m>,
2716              SchedBinary<"WriteVSShiftX", "ReadVSShiftV", "ReadVSShiftX", mx,
2717                          forcePassthruRead=true>;
2718    defm "" : VPseudoBinaryV_VI_RM<uimm5, m>,
2719              SchedUnary<"WriteVSShiftI", "ReadVSShiftV", mx, forcePassthruRead=true>;
2720  }
2721}
2722
2723multiclass VPseudoVALU_VV_VX_VI<bit Commutable = 0> {
2724  foreach m = MxList in {
2725    defvar mx = m.MX;
2726    defm "" : VPseudoBinaryV_VV<m, Commutable=Commutable>,
2727            SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", mx,
2728                        forcePassthruRead=true>;
2729    defm "" : VPseudoBinaryV_VX<m>,
2730            SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", mx,
2731                        forcePassthruRead=true>;
2732    defm "" : VPseudoBinaryV_VI<simm5, m>,
2733            SchedUnary<"WriteVIALUI", "ReadVIALUV", mx, forcePassthruRead=true>;
2734  }
2735}
2736
2737multiclass VPseudoVSALU_VV_VX {
2738  foreach m = MxList in {
2739    defvar mx = m.MX;
2740    defm "" : VPseudoBinaryV_VV<m>,
2741              SchedBinary<"WriteVSALUV", "ReadVSALUV", "ReadVSALUV", mx,
2742                          forcePassthruRead=true>;
2743    defm "" : VPseudoBinaryV_VX<m>,
2744              SchedBinary<"WriteVSALUX", "ReadVSALUV", "ReadVSALUX", mx,
2745                          forcePassthruRead=true>;
2746  }
2747}
2748
2749multiclass VPseudoVSMUL_VV_VX_RM {
2750  foreach m = MxList in {
2751    defvar mx = m.MX;
2752    defm "" : VPseudoBinaryV_VV_RM<m, Commutable=1>,
2753              SchedBinary<"WriteVSMulV", "ReadVSMulV", "ReadVSMulV", mx,
2754                          forcePassthruRead=true>;
2755    defm "" : VPseudoBinaryV_VX_RM<m>,
2756              SchedBinary<"WriteVSMulX", "ReadVSMulV", "ReadVSMulX", mx,
2757                          forcePassthruRead=true>;
2758  }
2759}
2760
2761multiclass VPseudoVAALU_VV_VX_RM<bit Commutable = 0> {
2762  foreach m = MxList in {
2763    defvar mx = m.MX;
2764    defm "" : VPseudoBinaryV_VV_RM<m, Commutable=Commutable>,
2765              SchedBinary<"WriteVAALUV", "ReadVAALUV", "ReadVAALUV", mx,
2766                          forcePassthruRead=true>;
2767    defm "" : VPseudoBinaryV_VX_RM<m>,
2768              SchedBinary<"WriteVAALUX", "ReadVAALUV", "ReadVAALUX", mx,
2769                          forcePassthruRead=true>;
2770  }
2771}
2772
2773multiclass VPseudoVMINMAX_VV_VX {
2774  foreach m = MxList in {
2775    defvar mx = m.MX;
2776    defm "" : VPseudoBinaryV_VV<m, Commutable=1>,
2777              SchedBinary<"WriteVIMinMaxV", "ReadVIMinMaxV", "ReadVIMinMaxV", mx>;
2778    defm "" : VPseudoBinaryV_VX<m>,
2779              SchedBinary<"WriteVIMinMaxX", "ReadVIMinMaxV", "ReadVIMinMaxX", mx>;
2780  }
2781}
2782
2783multiclass VPseudoVMUL_VV_VX<bit Commutable = 0> {
2784  foreach m = MxList in {
2785    defvar mx = m.MX;
2786    defm "" : VPseudoBinaryV_VV<m, Commutable=Commutable>,
2787              SchedBinary<"WriteVIMulV", "ReadVIMulV", "ReadVIMulV", mx>;
2788    defm "" : VPseudoBinaryV_VX<m>,
2789              SchedBinary<"WriteVIMulX", "ReadVIMulV", "ReadVIMulX", mx>;
2790  }
2791}
2792
2793multiclass VPseudoVDIV_VV_VX {
2794  foreach m = MxList in {
2795    defvar mx = m.MX;
2796    defvar sews = SchedSEWSet<mx>.val;
2797    foreach e = sews in {
2798      defm "" : VPseudoBinaryV_VV<m, "", e>,
2799                SchedBinary<"WriteVIDivV", "ReadVIDivV", "ReadVIDivV", mx, e>;
2800      defm "" : VPseudoBinaryV_VX<m, "", e>,
2801                SchedBinary<"WriteVIDivX", "ReadVIDivV", "ReadVIDivX", mx, e>;
2802    }
2803  }
2804}
2805
2806multiclass VPseudoVFMUL_VV_VF_RM {
2807  foreach m = MxListF in {
2808    foreach e = SchedSEWSet<m.MX, isF=1>.val in
2809      defm "" : VPseudoBinaryFV_VV_RM<m, e>,
2810                SchedBinary<"WriteVFMulV", "ReadVFMulV", "ReadVFMulV", m.MX, e,
2811                            forcePassthruRead=true>;
2812  }
2813
2814  foreach f = FPList in {
2815    foreach m = f.MxList in {
2816      defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
2817                SchedBinary<"WriteVFMulF", "ReadVFMulV", "ReadVFMulF", m.MX,
2818                            f.SEW, forcePassthruRead=true>;
2819    }
2820  }
2821}
2822
2823multiclass VPseudoVFDIV_VV_VF_RM {
2824  foreach m = MxListF in {
2825    defvar mx = m.MX;
2826    defvar sews = SchedSEWSet<mx, isF=1>.val;
2827    foreach e = sews in {
2828      defm "" : VPseudoBinaryFV_VV_RM<m, e>,
2829                SchedBinary<"WriteVFDivV", "ReadVFDivV", "ReadVFDivV", mx, e,
2830                            forcePassthruRead=true>;
2831    }
2832  }
2833
2834  foreach f = FPList in {
2835    foreach m = f.MxList in {
2836      defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
2837                SchedBinary<"WriteVFDivF", "ReadVFDivV", "ReadVFDivF", m.MX, f.SEW,
2838                            forcePassthruRead=true>;
2839    }
2840  }
2841}
2842
2843multiclass VPseudoVFRDIV_VF_RM {
2844  foreach f = FPList in {
2845    foreach m = f.MxList in {
2846      defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
2847                SchedBinary<"WriteVFDivF", "ReadVFDivV", "ReadVFDivF", m.MX, f.SEW,
2848                            forcePassthruRead=true>;
2849    }
2850  }
2851}
2852
2853multiclass VPseudoVALU_VV_VX {
2854 foreach m = MxList in {
2855    defm "" : VPseudoBinaryV_VV<m>,
2856            SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX,
2857                        forcePassthruRead=true>;
2858    defm "" : VPseudoBinaryV_VX<m>,
2859            SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX,
2860                        forcePassthruRead=true>;
2861  }
2862}
2863
2864multiclass VPseudoVSGNJ_VV_VF {
2865  foreach m = MxListF in {
2866    foreach e = SchedSEWSet<m.MX, isF=1>.val in
2867    defm "" : VPseudoBinaryV_VV<m, sew=e>,
2868              SchedBinary<"WriteVFSgnjV", "ReadVFSgnjV", "ReadVFSgnjV", m.MX,
2869                          e, forcePassthruRead=true>;
2870  }
2871
2872  foreach f = FPList in {
2873    foreach m = f.MxList in {
2874      defm "" : VPseudoBinaryV_VF<m, f, sew=f.SEW>,
2875                SchedBinary<"WriteVFSgnjF", "ReadVFSgnjV", "ReadVFSgnjF", m.MX,
2876                            f.SEW, forcePassthruRead=true>;
2877    }
2878  }
2879}
2880
2881multiclass VPseudoVMAX_VV_VF {
2882  foreach m = MxListF in {
2883    foreach e = SchedSEWSet<m.MX, isF=1>.val in
2884      defm "" : VPseudoBinaryV_VV<m, sew=e>,
2885                SchedBinary<"WriteVFMinMaxV", "ReadVFMinMaxV", "ReadVFMinMaxV",
2886                            m.MX, e, forcePassthruRead=true>;
2887  }
2888
2889  foreach f = FPList in {
2890    foreach m = f.MxList in {
2891      defm "" : VPseudoBinaryV_VF<m, f, sew=f.SEW>,
2892                SchedBinary<"WriteVFMinMaxF", "ReadVFMinMaxV", "ReadVFMinMaxF",
2893                            m.MX, f.SEW, forcePassthruRead=true>;
2894    }
2895  }
2896}
2897
2898multiclass VPseudoVALU_VV_VF_RM {
2899  foreach m = MxListF in {
2900    foreach e = SchedSEWSet<m.MX, isF=1>.val in
2901      defm "" : VPseudoBinaryFV_VV_RM<m, e>,
2902                SchedBinary<"WriteVFALUV", "ReadVFALUV", "ReadVFALUV", m.MX, e,
2903                            forcePassthruRead=true>;
2904  }
2905
2906  foreach f = FPList in {
2907    foreach m = f.MxList in {
2908      defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
2909                SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
2910                            f.SEW, forcePassthruRead=true>;
2911    }
2912  }
2913}
2914
2915multiclass VPseudoVALU_VF_RM {
2916  foreach f = FPList in {
2917    foreach m = f.MxList in {
2918      defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
2919                SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
2920                            f.SEW, forcePassthruRead=true>;
2921    }
2922  }
2923}
2924
2925multiclass VPseudoVALU_VX_VI {
2926  foreach m = MxList in {
2927    defvar mx = m.MX;
2928    defm "" : VPseudoBinaryV_VX<m>,
2929              SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", mx,
2930                          forcePassthruRead=true>;
2931    defm "" : VPseudoBinaryV_VI<simm5, m>,
2932              SchedUnary<"WriteVIALUI", "ReadVIALUV", mx, forcePassthruRead=true>;
2933  }
2934}
2935
2936multiclass VPseudoVWALU_VV_VX<bit Commutable = 0> {
2937  foreach m = MxListW in {
2938    defvar mx = m.MX;
2939    defm "" : VPseudoBinaryW_VV<m, Commutable=Commutable>,
2940              SchedBinary<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV", mx,
2941                          forcePassthruRead=true>;
2942    defm "" : VPseudoBinaryW_VX<m>,
2943              SchedBinary<"WriteVIWALUX", "ReadVIWALUV", "ReadVIWALUX", mx,
2944                          forcePassthruRead=true>;
2945  }
2946}
2947
2948multiclass VPseudoVWMUL_VV_VX<bit Commutable = 0> {
2949  foreach m = MxListW in {
2950    defvar mx = m.MX;
2951    defm "" : VPseudoBinaryW_VV<m, Commutable=Commutable>,
2952              SchedBinary<"WriteVIWMulV", "ReadVIWMulV", "ReadVIWMulV", mx,
2953                          forcePassthruRead=true>;
2954    defm "" : VPseudoBinaryW_VX<m>,
2955              SchedBinary<"WriteVIWMulX", "ReadVIWMulV", "ReadVIWMulX", mx,
2956                          forcePassthruRead=true>;
2957  }
2958}
2959
2960multiclass VPseudoVWMUL_VV_VF_RM {
2961  foreach m = MxListFW in {
2962    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
2963    defm "" : VPseudoBinaryW_VV_RM<m, sew=e>,
2964              SchedBinary<"WriteVFWMulV", "ReadVFWMulV", "ReadVFWMulV", m.MX,
2965                          e, forcePassthruRead=true>;
2966  }
2967
2968  foreach f = FPListW in {
2969    foreach m = f.MxListFW in {
2970      defm "" : VPseudoBinaryW_VF_RM<m, f, sew=f.SEW>,
2971                SchedBinary<"WriteVFWMulF", "ReadVFWMulV", "ReadVFWMulF", m.MX,
2972                          f.SEW, forcePassthruRead=true>;
2973    }
2974  }
2975}
2976
2977multiclass VPseudoVWALU_WV_WX {
2978  foreach m = MxListW in {
2979    defvar mx = m.MX;
2980    defm "" : VPseudoBinaryW_WV<m>,
2981              SchedBinary<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV", mx,
2982                          forcePassthruRead=true>;
2983    defm "" : VPseudoBinaryW_WX<m>,
2984              SchedBinary<"WriteVIWALUX", "ReadVIWALUV", "ReadVIWALUX", mx,
2985                          forcePassthruRead=true>;
2986  }
2987}
2988
2989multiclass VPseudoVFWALU_VV_VF_RM {
2990  foreach m = MxListFW in {
2991    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
2992      defm "" : VPseudoBinaryW_VV_RM<m, sew=e>,
2993                SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX,
2994                            e, forcePassthruRead=true>;
2995  }
2996
2997  foreach f = FPListW in {
2998    foreach m = f.MxListFW in {
2999      defm "" : VPseudoBinaryW_VF_RM<m, f, sew=f.SEW>,
3000                SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX,
3001                          f.SEW, forcePassthruRead=true>;
3002    }
3003  }
3004}
3005
3006multiclass VPseudoVFWALU_WV_WF_RM {
3007  foreach m = MxListFW in {
3008    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3009      defm "" : VPseudoBinaryW_WV_RM<m, sew=e>,
3010                SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX,
3011                            e, forcePassthruRead=true>;
3012  }
3013  foreach f = FPListW in {
3014    foreach m = f.MxListFW in {
3015      defm "" : VPseudoBinaryW_WF_RM<m, f, sew=f.SEW>,
3016                SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX,
3017                            f.SEW, forcePassthruRead=true>;
3018    }
3019  }
3020}
3021
3022multiclass VPseudoVMRG_VM_XM_IM {
3023  foreach m = MxList in {
3024    defvar mx = m.MX;
3025    def "_VVM" # "_" # m.MX:
3026      VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
3027                               m.vrclass, m.vrclass, m>,
3028      SchedBinary<"WriteVIMergeV", "ReadVIMergeV", "ReadVIMergeV", mx,
3029                          forcePassthruRead=true>;
3030    def "_VXM" # "_" # m.MX:
3031      VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
3032                               m.vrclass, GPR, m>,
3033      SchedBinary<"WriteVIMergeX", "ReadVIMergeV", "ReadVIMergeX", mx,
3034                          forcePassthruRead=true>;
3035    def "_VIM" # "_" # m.MX:
3036      VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
3037                               m.vrclass, simm5, m>,
3038      SchedUnary<"WriteVIMergeI", "ReadVIMergeV", mx,
3039                          forcePassthruRead=true>;
3040  }
3041}
3042
3043multiclass VPseudoVCALU_VM_XM_IM {
3044  foreach m = MxList in {
3045    defvar mx = m.MX;
3046    defm "" : VPseudoTiedBinaryV_VM<m, Commutable=1>,
3047              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
3048                          forcePassthruRead=true>;
3049    defm "" : VPseudoTiedBinaryV_XM<m>,
3050              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
3051                          forcePassthruRead=true>;
3052    defm "" : VPseudoTiedBinaryV_IM<m>,
3053              SchedUnary<"WriteVICALUI", "ReadVICALUV", mx,
3054                          forcePassthruRead=true>;
3055  }
3056}
3057
3058multiclass VPseudoVCALU_VM_XM {
3059  foreach m = MxList in {
3060    defvar mx = m.MX;
3061    defm "" : VPseudoTiedBinaryV_VM<m>,
3062              SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
3063                          forcePassthruRead=true>;
3064    defm "" : VPseudoTiedBinaryV_XM<m>,
3065              SchedBinary<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX", mx,
3066                          forcePassthruRead=true>;
3067  }
3068}
3069
3070multiclass VPseudoVCALUM_VM_XM_IM {
3071  defvar constraint = "@earlyclobber $rd";
3072  foreach m = MxList in {
3073    defvar mx = m.MX;
3074    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=1, Constraint=constraint,
3075                                Commutable=1, TargetConstraintType=2>,
3076              SchedBinary<"WriteVICALUMV", "ReadVICALUV", "ReadVICALUV", mx, forceMasked=1,
3077                          forcePassthruRead=true>;
3078    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=1, Constraint=constraint, TargetConstraintType=2>,
3079              SchedBinary<"WriteVICALUMX", "ReadVICALUV", "ReadVICALUX", mx, forceMasked=1,
3080                          forcePassthruRead=true>;
3081    defm "" : VPseudoBinaryV_IM<m, CarryOut=1, CarryIn=1, Constraint=constraint, TargetConstraintType=2>,
3082              SchedUnary<"WriteVICALUMI", "ReadVICALUV", mx, forceMasked=1,
3083                          forcePassthruRead=true>;
3084  }
3085}
3086
3087multiclass VPseudoVCALUM_VM_XM {
3088  defvar constraint = "@earlyclobber $rd";
3089  foreach m = MxList in {
3090    defvar mx = m.MX;
3091    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=1, Constraint=constraint,
3092                                TargetConstraintType=2>,
3093              SchedBinary<"WriteVICALUMV", "ReadVICALUV", "ReadVICALUV", mx, forceMasked=1,
3094                          forcePassthruRead=true>;
3095    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=1, Constraint=constraint,
3096                                TargetConstraintType=2>,
3097              SchedBinary<"WriteVICALUMX", "ReadVICALUV", "ReadVICALUX", mx, forceMasked=1,
3098                          forcePassthruRead=true>;
3099  }
3100}
3101
3102multiclass VPseudoVCALUM_V_X_I {
3103  defvar constraint = "@earlyclobber $rd";
3104  foreach m = MxList in {
3105    defvar mx = m.MX;
3106    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=0, Constraint=constraint,
3107                                Commutable=1, TargetConstraintType=2>,
3108              SchedBinary<"WriteVICALUMV", "ReadVICALUV", "ReadVICALUV", mx,
3109                          forcePassthruRead=true>;
3110    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=0, Constraint=constraint, TargetConstraintType=2>,
3111              SchedBinary<"WriteVICALUMX", "ReadVICALUV", "ReadVICALUX", mx,
3112                          forcePassthruRead=true>;
3113    defm "" : VPseudoBinaryV_IM<m, CarryOut=1, CarryIn=0, Constraint=constraint>,
3114              SchedUnary<"WriteVICALUMI", "ReadVICALUV", mx,
3115                          forcePassthruRead=true>;
3116  }
3117}
3118
3119multiclass VPseudoVCALUM_V_X {
3120  defvar constraint = "@earlyclobber $rd";
3121  foreach m = MxList in {
3122    defvar mx = m.MX;
3123    defm "" : VPseudoBinaryV_VM<m, CarryOut=1, CarryIn=0, Constraint=constraint, TargetConstraintType=2>,
3124              SchedBinary<"WriteVICALUMV", "ReadVICALUV", "ReadVICALUV", mx,
3125                          forcePassthruRead=true>;
3126    defm "" : VPseudoBinaryV_XM<m, CarryOut=1, CarryIn=0, Constraint=constraint, TargetConstraintType=2>,
3127              SchedBinary<"WriteVICALUMX", "ReadVICALUV", "ReadVICALUX", mx,
3128                          forcePassthruRead=true>;
3129  }
3130}
3131
3132multiclass VPseudoVNCLP_WV_WX_WI_RM {
3133  foreach m = MxListW in {
3134    defvar mx = m.MX;
3135    defm "" : VPseudoBinaryV_WV_RM<m>,
3136              SchedBinary<"WriteVNClipV", "ReadVNClipV", "ReadVNClipV", mx,
3137                          forcePassthruRead=true>;
3138    defm "" : VPseudoBinaryV_WX_RM<m>,
3139              SchedBinary<"WriteVNClipX", "ReadVNClipV", "ReadVNClipX", mx,
3140                          forcePassthruRead=true>;
3141    defm "" : VPseudoBinaryV_WI_RM<m>,
3142              SchedUnary<"WriteVNClipI", "ReadVNClipV", mx,
3143                          forcePassthruRead=true>;
3144  }
3145}
3146
3147multiclass VPseudoVNSHT_WV_WX_WI {
3148  foreach m = MxListW in {
3149    defvar mx = m.MX;
3150    defm "" : VPseudoBinaryV_WV<m>,
3151              SchedBinary<"WriteVNShiftV", "ReadVNShiftV", "ReadVNShiftV", mx,
3152                          forcePassthruRead=true>;
3153    defm "" : VPseudoBinaryV_WX<m>,
3154              SchedBinary<"WriteVNShiftX", "ReadVNShiftV", "ReadVNShiftX", mx,
3155                          forcePassthruRead=true>;
3156    defm "" : VPseudoBinaryV_WI<m>,
3157              SchedUnary<"WriteVNShiftI", "ReadVNShiftV", mx,
3158                          forcePassthruRead=true>;
3159  }
3160}
3161
3162multiclass VPseudoTernaryWithTailPolicy<VReg RetClass,
3163                                          RegisterClass Op1Class,
3164                                          DAGOperand Op2Class,
3165                                          LMULInfo MInfo,
3166                                          int sew> {
3167  let VLMul = MInfo.value, SEW=sew in {
3168    defvar mx = MInfo.MX;
3169    def "_" # mx # "_E" # sew : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class>;
3170    def "_" # mx # "_E" # sew # "_MASK" : VPseudoTernaryMaskPolicy<RetClass, Op1Class, Op2Class>,
3171                                          RISCVMaskedPseudo<MaskIdx=3>;
3172  }
3173}
3174
3175multiclass VPseudoTernaryWithTailPolicyRoundingMode<VReg RetClass,
3176                                                    RegisterClass Op1Class,
3177                                                    DAGOperand Op2Class,
3178                                                    LMULInfo MInfo,
3179                                                    int sew> {
3180  let VLMul = MInfo.value, SEW=sew in {
3181    defvar mx = MInfo.MX;
3182    def "_" # mx # "_E" # sew
3183        : VPseudoTernaryNoMaskWithPolicyRoundingMode<RetClass, Op1Class,
3184                                                     Op2Class>;
3185    def "_" # mx # "_E" # sew # "_MASK"
3186        : VPseudoTernaryMaskPolicyRoundingMode<RetClass, Op1Class,
3187                                               Op2Class>,
3188          RISCVMaskedPseudo<MaskIdx=3>;
3189  }
3190}
3191
3192multiclass VPseudoTernaryWithPolicy<VReg RetClass,
3193                                    RegisterClass Op1Class,
3194                                    DAGOperand Op2Class,
3195                                    LMULInfo MInfo,
3196                                    string Constraint = "",
3197                                    bit Commutable = 0,
3198                                    bits<2> TargetConstraintType = 1> {
3199  let VLMul = MInfo.value in {
3200    let isCommutable = Commutable in
3201    def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint, TargetConstraintType>;
3202    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint, TargetConstraintType>,
3203                                   RISCVMaskedPseudo<MaskIdx=3>;
3204  }
3205}
3206
3207multiclass VPseudoTernaryWithPolicyRoundingMode<VReg RetClass,
3208                                                RegisterClass Op1Class,
3209                                                DAGOperand Op2Class,
3210                                                LMULInfo MInfo,
3211                                                string Constraint = "",
3212                                                int sew = 0,
3213                                                bit Commutable = 0,
3214                                                bits<2> TargetConstraintType = 1> {
3215  let VLMul = MInfo.value in {
3216    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
3217    let isCommutable = Commutable in
3218    def suffix :
3219        VPseudoTernaryNoMaskWithPolicyRoundingMode<RetClass, Op1Class,
3220                                                   Op2Class, Constraint,
3221                                                   TargetConstraintType>;
3222    def suffix # "_MASK" :
3223        VPseudoBinaryMaskPolicyRoundingMode<RetClass, Op1Class,
3224                                            Op2Class, Constraint,
3225                                            UsesVXRM_=0,
3226                                            TargetConstraintType=TargetConstraintType>,
3227                                   RISCVMaskedPseudo<MaskIdx=3>;
3228  }
3229}
3230
3231multiclass VPseudoTernaryV_VV_AAXA<LMULInfo m> {
3232  defm _VV : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, m.vrclass, m,
3233                                      Commutable=1>;
3234}
3235
3236multiclass VPseudoTernaryV_VV_AAXA_RM<LMULInfo m, int sew> {
3237  defm _VV : VPseudoTernaryWithPolicyRoundingMode<m.vrclass, m.vrclass, m.vrclass, m,
3238                                                  sew=sew, Commutable=1>;
3239}
3240
3241multiclass VPseudoTernaryV_VX_AAXA<LMULInfo m> {
3242  defm "_VX" : VPseudoTernaryWithPolicy<m.vrclass, GPR, m.vrclass, m,
3243                                        Commutable=1>;
3244}
3245
3246multiclass VPseudoTernaryV_VF_AAXA_RM<LMULInfo m, FPR_Info f,
3247                                      int sew> {
3248  defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.vrclass, f.fprclass,
3249                                                          m.vrclass, m,
3250                                                          sew=sew, Commutable=1>;
3251}
3252
3253multiclass VPseudoTernaryW_VV<LMULInfo m, bit Commutable = 0> {
3254  defvar constraint = "@earlyclobber $rd";
3255  defm _VV : VPseudoTernaryWithPolicy<m.wvrclass, m.vrclass, m.vrclass, m,
3256                                      constraint, Commutable=Commutable, TargetConstraintType=3>;
3257}
3258
3259multiclass VPseudoTernaryW_VV_RM<LMULInfo m, int sew> {
3260  defvar constraint = "@earlyclobber $rd";
3261  defm _VV : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, m.vrclass, m.vrclass, m,
3262                                                  constraint, sew,
3263                                                  TargetConstraintType=3>;
3264}
3265
3266multiclass VPseudoTernaryW_VX<LMULInfo m> {
3267  defvar constraint = "@earlyclobber $rd";
3268  defm "_VX" : VPseudoTernaryWithPolicy<m.wvrclass, GPR, m.vrclass, m,
3269                                        constraint, TargetConstraintType=3>;
3270}
3271
3272multiclass VPseudoTernaryW_VF_RM<LMULInfo m, FPR_Info f, int sew> {
3273  defvar constraint = "@earlyclobber $rd";
3274  defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, f.fprclass,
3275                                                          m.vrclass, m, constraint,
3276                                                          sew=sew,
3277                                                          TargetConstraintType=3>;
3278}
3279
3280multiclass VPseudoVSLDVWithPolicy<VReg RetClass,
3281                                  RegisterClass Op1Class,
3282                                  DAGOperand Op2Class,
3283                                  LMULInfo MInfo,
3284                                  string Constraint = ""> {
3285  let VLMul = MInfo.value in {
3286    def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
3287    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint>,
3288                                   RISCVMaskedPseudo<MaskIdx=3>;
3289  }
3290}
3291
3292multiclass VPseudoVSLDV_VX<LMULInfo m, string Constraint = ""> {
3293  defm _VX : VPseudoVSLDVWithPolicy<m.vrclass, m.vrclass, GPR, m, Constraint>;
3294}
3295
3296multiclass VPseudoVSLDV_VI<LMULInfo m, string Constraint = ""> {
3297  defm _VI : VPseudoVSLDVWithPolicy<m.vrclass, m.vrclass, uimm5, m, Constraint>;
3298}
3299
3300multiclass VPseudoVMAC_VV_VX_AAXA {
3301  foreach m = MxList in {
3302    defvar mx = m.MX;
3303    defm "" : VPseudoTernaryV_VV_AAXA<m>,
3304              SchedTernary<"WriteVIMulAddV", "ReadVIMulAddV", "ReadVIMulAddV",
3305                           "ReadVIMulAddV", mx>;
3306    defm "" : VPseudoTernaryV_VX_AAXA<m>,
3307              SchedTernary<"WriteVIMulAddX", "ReadVIMulAddV", "ReadVIMulAddX",
3308                           "ReadVIMulAddV", mx>;
3309  }
3310}
3311
3312multiclass VPseudoVMAC_VV_VF_AAXA_RM {
3313  foreach m = MxListF in {
3314    foreach e = SchedSEWSet<m.MX, isF=1>.val in
3315      defm "" : VPseudoTernaryV_VV_AAXA_RM<m, sew=e>,
3316                SchedTernary<"WriteVFMulAddV", "ReadVFMulAddV", "ReadVFMulAddV",
3317                             "ReadVFMulAddV", m.MX, e>;
3318  }
3319
3320  foreach f = FPList in {
3321    foreach m = f.MxList in {
3322      defm "" : VPseudoTernaryV_VF_AAXA_RM<m, f, sew=f.SEW>,
3323                SchedTernary<"WriteVFMulAddF", "ReadVFMulAddV", "ReadVFMulAddF",
3324                             "ReadVFMulAddV", m.MX, f.SEW>;
3325    }
3326  }
3327}
3328
3329multiclass VPseudoVSLD_VX_VI<bit slidesUp = false, string Constraint = ""> {
3330  defvar WriteSlideX = !if(slidesUp, "WriteVSlideUpX", "WriteVSlideDownX");
3331  foreach m = MxList in {
3332    defvar mx = m.MX;
3333    defm "" : VPseudoVSLDV_VX<m, Constraint>,
3334              SchedTernary<WriteSlideX, "ReadVISlideV", "ReadVISlideV",
3335                           "ReadVISlideX", mx>;
3336    defm "" : VPseudoVSLDV_VI<m, Constraint>,
3337              SchedBinary<"WriteVSlideI", "ReadVISlideV", "ReadVISlideV", mx>;
3338  }
3339}
3340
3341multiclass VPseudoVWMAC_VV_VX<bit Commutable = 0> {
3342  foreach m = MxListW in {
3343    defvar mx = m.MX;
3344    defm "" : VPseudoTernaryW_VV<m, Commutable=Commutable>,
3345              SchedTernary<"WriteVIWMulAddV", "ReadVIWMulAddV", "ReadVIWMulAddV",
3346                           "ReadVIWMulAddV", mx>;
3347    defm "" : VPseudoTernaryW_VX<m>,
3348              SchedTernary<"WriteVIWMulAddX", "ReadVIWMulAddV", "ReadVIWMulAddX",
3349                           "ReadVIWMulAddV", mx>;
3350  }
3351}
3352
3353multiclass VPseudoVWMAC_VX {
3354  foreach m = MxListW in {
3355    defm "" : VPseudoTernaryW_VX<m>,
3356              SchedTernary<"WriteVIWMulAddX", "ReadVIWMulAddV", "ReadVIWMulAddX",
3357                           "ReadVIWMulAddV", m.MX>;
3358  }
3359}
3360
3361multiclass VPseudoVWMAC_VV_VF_RM {
3362  foreach m = MxListFW in {
3363    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3364      defm "" : VPseudoTernaryW_VV_RM<m, sew=e>,
3365                SchedTernary<"WriteVFWMulAddV", "ReadVFWMulAddV",
3366                             "ReadVFWMulAddV", "ReadVFWMulAddV", m.MX, e>;
3367  }
3368
3369  foreach f = FPListW in {
3370    foreach m = f.MxListFW in {
3371      defm "" : VPseudoTernaryW_VF_RM<m, f, sew=f.SEW>,
3372                SchedTernary<"WriteVFWMulAddF", "ReadVFWMulAddV",
3373                             "ReadVFWMulAddF", "ReadVFWMulAddV", m.MX, f.SEW>;
3374    }
3375  }
3376}
3377
3378multiclass VPseudoVWMAC_VV_VF_BF_RM {
3379  foreach m = MxListFW in {
3380    defvar mx = m.MX;
3381    foreach e = SchedSEWSet<mx, isF=1, isWidening=1>.val in
3382      defm "" : VPseudoTernaryW_VV_RM<m, sew=e>,
3383                SchedTernary<"WriteVFWMulAddV", "ReadVFWMulAddV",
3384                             "ReadVFWMulAddV", "ReadVFWMulAddV", mx, e>;
3385  }
3386
3387  foreach f = BFPListW in {
3388    foreach m = f.MxListFW in {
3389      defvar mx = m.MX;
3390      defm "" : VPseudoTernaryW_VF_RM<m, f, sew=f.SEW>,
3391                SchedTernary<"WriteVFWMulAddF", "ReadVFWMulAddV",
3392                             "ReadVFWMulAddF", "ReadVFWMulAddV", mx, f.SEW>;
3393    }
3394  }
3395}
3396
3397multiclass VPseudoVCMPM_VV_VX_VI<bit Commutable = 0> {
3398  foreach m = MxList in {
3399    defvar mx = m.MX;
3400    defm "" : VPseudoBinaryM_VV<m, TargetConstraintType=2, Commutable=Commutable>,
3401              SchedBinary<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV", mx>;
3402    defm "" : VPseudoBinaryM_VX<m, TargetConstraintType=2>,
3403              SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>;
3404    defm "" : VPseudoBinaryM_VI<m, TargetConstraintType=2>,
3405              SchedUnary<"WriteVICmpI", "ReadVICmpV", mx>;
3406  }
3407}
3408
3409multiclass VPseudoVCMPM_VV_VX {
3410  foreach m = MxList in {
3411    defvar mx = m.MX;
3412    defm "" : VPseudoBinaryM_VV<m, TargetConstraintType=2>,
3413              SchedBinary<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV", mx>;
3414    defm "" : VPseudoBinaryM_VX<m, TargetConstraintType=2>,
3415              SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>;
3416  }
3417}
3418
3419multiclass VPseudoVCMPM_VV_VF {
3420  foreach m = MxListF in {
3421    defm "" : VPseudoBinaryM_VV<m, TargetConstraintType=2>,
3422              SchedBinary<"WriteVFCmpV", "ReadVFCmpV", "ReadVFCmpV", m.MX>;
3423  }
3424
3425  foreach f = FPList in {
3426    foreach m = f.MxList in {
3427      defm "" : VPseudoBinaryM_VF<m, f, TargetConstraintType=2>,
3428                SchedBinary<"WriteVFCmpF", "ReadVFCmpV", "ReadVFCmpF", m.MX>;
3429    }
3430  }
3431}
3432
3433multiclass VPseudoVCMPM_VF {
3434  foreach f = FPList in {
3435    foreach m = f.MxList in {
3436      defm "" : VPseudoBinaryM_VF<m, f, TargetConstraintType=2>,
3437                SchedBinary<"WriteVFCmpF", "ReadVFCmpV", "ReadVFCmpF", m.MX>;
3438    }
3439  }
3440}
3441
3442multiclass VPseudoVCMPM_VX_VI {
3443  foreach m = MxList in {
3444    defvar mx = m.MX;
3445    defm "" : VPseudoBinaryM_VX<m, TargetConstraintType=2>,
3446              SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>;
3447    defm "" : VPseudoBinaryM_VI<m, TargetConstraintType=2>,
3448              SchedUnary<"WriteVICmpI", "ReadVICmpV", mx>;
3449  }
3450}
3451
3452multiclass VPseudoVRED_VS {
3453  foreach m = MxList in {
3454    defvar mx = m.MX;
3455    foreach e = SchedSEWSet<mx>.val in {
3456      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3457                 SchedReduction<"WriteVIRedV_From", "ReadVIRedV", mx, e>;
3458    }
3459  }
3460}
3461
3462multiclass VPseudoVREDMINMAX_VS {
3463  foreach m = MxList in {
3464    defvar mx = m.MX;
3465    foreach e = SchedSEWSet<mx>.val in {
3466      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3467                 SchedReduction<"WriteVIRedMinMaxV_From", "ReadVIRedV", mx, e>;
3468    }
3469  }
3470}
3471
3472multiclass VPseudoVWRED_VS {
3473  foreach m = MxListWRed in {
3474    defvar mx = m.MX;
3475    foreach e = SchedSEWSet<mx, isWidening=1>.val in {
3476      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3477                 SchedReduction<"WriteVIWRedV_From", "ReadVIWRedV", mx, e>;
3478    }
3479  }
3480}
3481
3482multiclass VPseudoVFRED_VS_RM {
3483  foreach m = MxListF in {
3484    defvar mx = m.MX;
3485    foreach e = SchedSEWSet<mx, isF=1>.val in {
3486      defm _VS
3487          : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3488                                                     V_M1.vrclass, m, e>,
3489            SchedReduction<"WriteVFRedV_From", "ReadVFRedV", mx, e>;
3490    }
3491  }
3492}
3493
3494multiclass VPseudoVFREDMINMAX_VS {
3495  foreach m = MxListF in {
3496    defvar mx = m.MX;
3497    foreach e = SchedSEWSet<mx, isF=1>.val in {
3498      defm _VS : VPseudoTernaryWithTailPolicy<V_M1.vrclass, m.vrclass, V_M1.vrclass, m, e>,
3499                 SchedReduction<"WriteVFRedMinMaxV_From", "ReadVFRedV", mx, e>;
3500    }
3501  }
3502}
3503
3504multiclass VPseudoVFREDO_VS_RM {
3505  foreach m = MxListF in {
3506    defvar mx = m.MX;
3507    foreach e = SchedSEWSet<mx, isF=1>.val in {
3508      defm _VS : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3509                                                          V_M1.vrclass, m, e>,
3510                 SchedReduction<"WriteVFRedOV_From", "ReadVFRedOV", mx, e>;
3511    }
3512  }
3513}
3514
3515multiclass VPseudoVFWRED_VS_RM {
3516  foreach m = MxListFWRed in {
3517    defvar mx = m.MX;
3518    foreach e = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
3519      defm _VS
3520          : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3521                                                     V_M1.vrclass, m, e>,
3522            SchedReduction<"WriteVFWRedV_From", "ReadVFWRedV", mx, e>;
3523    }
3524  }
3525}
3526
3527multiclass VPseudoVFWREDO_VS_RM {
3528  foreach m = MxListFWRed in {
3529    defvar mx = m.MX;
3530    foreach e = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
3531      defm _VS
3532          : VPseudoTernaryWithTailPolicyRoundingMode<V_M1.vrclass, m.vrclass,
3533                                                     V_M1.vrclass, m, e>,
3534            SchedReduction<"WriteVFWRedOV_From", "ReadVFWRedV", mx, e>;
3535    }
3536  }
3537}
3538
3539multiclass VPseudoConversion<VReg RetClass,
3540                             VReg Op1Class,
3541                             LMULInfo MInfo,
3542                             string Constraint = "",
3543                             int sew = 0,
3544                             bits<2> TargetConstraintType = 1> {
3545  defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
3546  let VLMul = MInfo.value, SEW=sew in {
3547    def suffix : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint, TargetConstraintType>;
3548    def suffix # "_MASK" : VPseudoUnaryMask<RetClass, Op1Class,
3549                                            Constraint, TargetConstraintType>,
3550                           RISCVMaskedPseudo<MaskIdx=2>;
3551  }
3552}
3553
3554multiclass VPseudoConversionRoundingMode<VReg RetClass,
3555                             VReg Op1Class,
3556                             LMULInfo MInfo,
3557                             string Constraint = "",
3558                             int sew = 0,
3559                             bits<2> TargetConstraintType = 1> {
3560  let VLMul = MInfo.value, SEW=sew in {
3561    defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
3562    def suffix : VPseudoUnaryNoMaskRoundingMode<RetClass, Op1Class, Constraint, TargetConstraintType>;
3563    def suffix # "_MASK" : VPseudoUnaryMaskRoundingMode<RetClass, Op1Class,
3564                                                        Constraint,
3565                                                        TargetConstraintType>,
3566                           RISCVMaskedPseudo<MaskIdx=2>;
3567  }
3568}
3569
3570multiclass VPseudoConversionNoExcept<VReg RetClass,
3571                                     VReg Op1Class,
3572                                     LMULInfo MInfo,
3573                                     string Constraint = ""> {
3574  let VLMul = MInfo.value in {
3575    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask_NoExcept<RetClass, Op1Class, Constraint>;
3576  }
3577}
3578
3579multiclass VPseudoVCVTI_V {
3580  foreach m = MxListF in {
3581    defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>,
3582              SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
3583                         forcePassthruRead=true>;
3584  }
3585}
3586
3587multiclass VPseudoVCVTI_V_RM {
3588  foreach m = MxListF in {
3589    defm _V : VPseudoConversionRoundingMode<m.vrclass, m.vrclass, m>,
3590              SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
3591                         forcePassthruRead=true>;
3592  }
3593}
3594
3595multiclass VPseudoVFROUND_NOEXCEPT_V {
3596  foreach m = MxListF in {
3597    defm _V : VPseudoConversionNoExcept<m.vrclass, m.vrclass, m>,
3598              SchedUnary<"WriteVFCvtFToIV", "ReadVFCvtFToIV", m.MX,
3599                         forcePassthruRead=true>;
3600  }
3601}
3602
3603multiclass VPseudoVCVTF_V_RM {
3604  foreach m = MxListF in {
3605    foreach e = SchedSEWSet<m.MX, isF=1>.val in
3606      defm _V : VPseudoConversionRoundingMode<m.vrclass, m.vrclass, m, sew=e>,
3607                SchedUnary<"WriteVFCvtIToFV", "ReadVFCvtIToFV", m.MX, e,
3608                           forcePassthruRead=true>;
3609  }
3610}
3611
3612multiclass VPseudoVWCVTI_V {
3613  defvar constraint = "@earlyclobber $rd";
3614  foreach m = MxListFW in {
3615    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, TargetConstraintType=3>,
3616              SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX,
3617                         forcePassthruRead=true>;
3618  }
3619}
3620
3621multiclass VPseudoVWCVTI_V_RM {
3622  defvar constraint = "@earlyclobber $rd";
3623  foreach m = MxListFW in {
3624    defm _V : VPseudoConversionRoundingMode<m.wvrclass, m.vrclass, m, constraint, TargetConstraintType=3>,
3625              SchedUnary<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV", m.MX,
3626                         forcePassthruRead=true>;
3627  }
3628}
3629
3630multiclass VPseudoVWCVTF_V {
3631  defvar constraint = "@earlyclobber $rd";
3632  foreach m = MxListW in {
3633    foreach e = SchedSEWSet<m.MX, isF=0, isWidening=1>.val in
3634      defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, sew=e,
3635                                  TargetConstraintType=3>,
3636                SchedUnary<"WriteVFWCvtIToFV", "ReadVFWCvtIToFV", m.MX, e,
3637                           forcePassthruRead=true>;
3638  }
3639}
3640
3641multiclass VPseudoVWCVTD_V {
3642  defvar constraint = "@earlyclobber $rd";
3643  foreach m = MxListFW in {
3644    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3645      defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, sew=e,
3646                                  TargetConstraintType=3>,
3647                SchedUnary<"WriteVFWCvtFToFV", "ReadVFWCvtFToFV", m.MX, e,
3648                           forcePassthruRead=true>;
3649  }
3650}
3651
3652multiclass VPseudoVNCVTI_W {
3653  defvar constraint = "@earlyclobber $rd";
3654  foreach m = MxListW in {
3655    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
3656              SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX,
3657                         forcePassthruRead=true>;
3658  }
3659}
3660
3661multiclass VPseudoVNCVTI_W_RM {
3662  defvar constraint = "@earlyclobber $rd";
3663  foreach m = MxListW in {
3664    defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m, constraint, TargetConstraintType=2>,
3665              SchedUnary<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV", m.MX,
3666                         forcePassthruRead=true>;
3667  }
3668}
3669
3670multiclass VPseudoVNCVTF_W_RM {
3671  defvar constraint = "@earlyclobber $rd";
3672  foreach m = MxListFW in {
3673    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3674      defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m,
3675                                              constraint, sew=e,
3676                                              TargetConstraintType=2>,
3677                SchedUnary<"WriteVFNCvtIToFV", "ReadVFNCvtIToFV", m.MX, e,
3678                           forcePassthruRead=true>;
3679  }
3680}
3681
3682multiclass VPseudoVNCVTD_W {
3683  defvar constraint = "@earlyclobber $rd";
3684  foreach m = MxListFW in {
3685    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3686      defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint, sew=e,
3687                                  TargetConstraintType=2>,
3688                SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, e,
3689                           forcePassthruRead=true>;
3690  }
3691}
3692
3693multiclass VPseudoVNCVTD_W_RM {
3694  defvar constraint = "@earlyclobber $rd";
3695  foreach m = MxListFW in {
3696    foreach e = SchedSEWSet<m.MX, isF=1, isWidening=1>.val in
3697      defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m,
3698                                              constraint, sew=e,
3699                                              TargetConstraintType=2>,
3700                SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, e,
3701                           forcePassthruRead=true>;
3702  }
3703}
3704
3705multiclass VPseudoUSSegLoad {
3706  foreach eew = EEWList in {
3707    foreach lmul = MxSet<eew>.m in {
3708      defvar LInfo = lmul.MX;
3709      let VLMul = lmul.value, SEW=eew in {
3710        foreach nf = NFSet<lmul>.L in {
3711          defvar vreg = SegRegClass<lmul, nf>.RC;
3712          def nf # "E" # eew # "_V_" # LInfo :
3713            VPseudoUSSegLoadNoMask<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
3714          def nf # "E" # eew # "_V_" # LInfo # "_MASK" :
3715            VPseudoUSSegLoadMask<vreg, eew, nf>, RISCVMaskedPseudo<MaskIdx=2>,
3716            VLSEGSched<nf, eew, LInfo>;
3717        }
3718      }
3719    }
3720  }
3721}
3722
3723multiclass VPseudoUSSegLoadFF {
3724  foreach eew = EEWList in {
3725    foreach lmul = MxSet<eew>.m in {
3726      defvar LInfo = lmul.MX;
3727      let VLMul = lmul.value, SEW=eew in {
3728        foreach nf = NFSet<lmul>.L in {
3729          defvar vreg = SegRegClass<lmul, nf>.RC;
3730          def nf # "E" # eew # "FF_V_" # LInfo :
3731            VPseudoUSSegLoadFFNoMask<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
3732          def nf # "E" # eew # "FF_V_" # LInfo # "_MASK" :
3733            VPseudoUSSegLoadFFMask<vreg, eew, nf>, RISCVMaskedPseudo<MaskIdx=2>,
3734            VLSEGFFSched<nf, eew, LInfo>;
3735        }
3736      }
3737    }
3738  }
3739}
3740
3741multiclass VPseudoSSegLoad {
3742  foreach eew = EEWList in {
3743    foreach lmul = MxSet<eew>.m in {
3744      defvar LInfo = lmul.MX;
3745      let VLMul = lmul.value, SEW=eew in {
3746        foreach nf = NFSet<lmul>.L in {
3747          defvar vreg = SegRegClass<lmul, nf>.RC;
3748          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>,
3749                                               VLSSEGSched<nf, eew, LInfo>;
3750          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>,
3751                                                         RISCVMaskedPseudo<MaskIdx=3>,
3752                                                         VLSSEGSched<nf, eew, LInfo>;
3753        }
3754      }
3755    }
3756  }
3757}
3758
3759multiclass VPseudoISegLoad<bit Ordered> {
3760  foreach idxEEW = EEWList in {
3761    foreach dataEEW = EEWList in {
3762      foreach dataEMUL = MxSet<dataEEW>.m in {
3763        defvar dataEMULOctuple = dataEMUL.octuple;
3764        // Calculate emul = eew * lmul / sew
3765        defvar idxEMULOctuple = !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
3766        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
3767          defvar DataLInfo = dataEMUL.MX;
3768          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
3769          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
3770          defvar DataVreg = dataEMUL.vrclass;
3771          defvar IdxVreg = idxEMUL.vrclass;
3772          let VLMul = dataEMUL.value in {
3773            foreach nf = NFSet<dataEMUL>.L in {
3774              defvar Vreg = SegRegClass<dataEMUL, nf>.RC;
3775              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
3776                VPseudoISegLoadNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3777                                      nf, Ordered>,
3778                VLXSEGSched<nf, dataEEW, Ordered, DataLInfo>;
3779              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
3780                VPseudoISegLoadMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3781                                    nf, Ordered>,
3782                RISCVMaskedPseudo<MaskIdx=3>,
3783                VLXSEGSched<nf, dataEEW, Ordered, DataLInfo>;
3784            }
3785          }
3786        }
3787      }
3788    }
3789  }
3790}
3791
3792multiclass VPseudoUSSegStore {
3793  foreach eew = EEWList in {
3794    foreach lmul = MxSet<eew>.m in {
3795      defvar LInfo = lmul.MX;
3796      let VLMul = lmul.value, SEW=eew in {
3797        foreach nf = NFSet<lmul>.L in {
3798          defvar vreg = SegRegClass<lmul, nf>.RC;
3799          def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>,
3800                                               VSSEGSched<nf, eew, LInfo>;
3801          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>,
3802                                                         RISCVMaskedPseudo<MaskIdx=2>,
3803                                                         VSSEGSched<nf, eew, LInfo>;
3804        }
3805      }
3806    }
3807  }
3808}
3809
3810multiclass VPseudoSSegStore {
3811  foreach eew = EEWList in {
3812    foreach lmul = MxSet<eew>.m in {
3813      defvar LInfo = lmul.MX;
3814      let VLMul = lmul.value, SEW=eew in {
3815        foreach nf = NFSet<lmul>.L in {
3816          defvar vreg = SegRegClass<lmul, nf>.RC;
3817          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>,
3818                                               VSSSEGSched<nf, eew, LInfo>;
3819          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>,
3820                                                         RISCVMaskedPseudo<MaskIdx=3>,
3821                                                         VSSSEGSched<nf, eew, LInfo>;
3822        }
3823      }
3824    }
3825  }
3826}
3827
3828multiclass VPseudoISegStore<bit Ordered> {
3829  foreach idxEEW = EEWList in {
3830    foreach dataEEW = EEWList in {
3831      foreach dataEMUL = MxSet<dataEEW>.m in {
3832        defvar dataEMULOctuple = dataEMUL.octuple;
3833        // Calculate emul = eew * lmul / sew
3834        defvar idxEMULOctuple = !srl(!mul(idxEEW, dataEMULOctuple), !logtwo(dataEEW));
3835        if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
3836          defvar DataLInfo = dataEMUL.MX;
3837          defvar IdxLInfo = octuple_to_str<idxEMULOctuple>.ret;
3838          defvar idxEMUL = !cast<LMULInfo>("V_" # IdxLInfo);
3839          defvar DataVreg = dataEMUL.vrclass;
3840          defvar IdxVreg = idxEMUL.vrclass;
3841          let VLMul = dataEMUL.value in {
3842            foreach nf = NFSet<dataEMUL>.L in {
3843              defvar Vreg = SegRegClass<dataEMUL, nf>.RC;
3844              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo :
3845                VPseudoISegStoreNoMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3846                                       nf, Ordered>,
3847                VSXSEGSched<nf, idxEEW, Ordered, DataLInfo>;
3848              def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" :
3849                VPseudoISegStoreMask<Vreg, IdxVreg, idxEEW, idxEMUL.value,
3850                                     nf, Ordered>,
3851                RISCVMaskedPseudo<MaskIdx=3>,
3852                VSXSEGSched<nf, idxEEW, Ordered, DataLInfo>;
3853            }
3854          }
3855        }
3856      }
3857    }
3858  }
3859}
3860
3861//===----------------------------------------------------------------------===//
3862// Helpers to define the intrinsic patterns.
3863//===----------------------------------------------------------------------===//
3864
3865class VPatUnaryNoMask<string intrinsic_name,
3866                      string inst,
3867                      string kind,
3868                      ValueType result_type,
3869                      ValueType op2_type,
3870                      int log2sew,
3871                      LMULInfo vlmul,
3872                      VReg result_reg_class,
3873                      VReg op2_reg_class,
3874                      bit isSEWAware = 0> :
3875  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3876                   (result_type result_reg_class:$passthru),
3877                   (op2_type op2_reg_class:$rs2),
3878                   VLOpFrag)),
3879                   (!cast<Instruction>(
3880                     !if(isSEWAware,
3881                         inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
3882                         inst#"_"#kind#"_"#vlmul.MX))
3883                   (result_type result_reg_class:$passthru),
3884                   (op2_type op2_reg_class:$rs2),
3885                   GPR:$vl, log2sew, TU_MU)>;
3886
3887class VPatUnaryNoMaskRoundingMode<string intrinsic_name,
3888                                  string inst,
3889                                  string kind,
3890                                  ValueType result_type,
3891                                  ValueType op2_type,
3892                                  int log2sew,
3893                                  LMULInfo vlmul,
3894                                  VReg result_reg_class,
3895                                  VReg op2_reg_class,
3896                                  bit isSEWAware = 0> :
3897  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3898                   (result_type result_reg_class:$passthru),
3899                   (op2_type op2_reg_class:$rs2),
3900                   (XLenVT timm:$round),
3901                   VLOpFrag)),
3902                   (!cast<Instruction>(
3903                      !if(isSEWAware,
3904                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
3905                          inst#"_"#kind#"_"#vlmul.MX))
3906                   (result_type result_reg_class:$passthru),
3907                   (op2_type op2_reg_class:$rs2),
3908                   (XLenVT timm:$round),
3909                   GPR:$vl, log2sew, TU_MU)>;
3910
3911class VPatUnaryNoMaskRTZ<string intrinsic_name,
3912                         string inst,
3913                         string kind,
3914                         ValueType result_type,
3915                         ValueType op2_type,
3916                         int log2sew,
3917                         LMULInfo vlmul,
3918                         VReg result_reg_class,
3919                         VReg op2_reg_class,
3920                         bit isSEWAware = 0> :
3921  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
3922                   (result_type result_reg_class:$passthru),
3923                   (op2_type op2_reg_class:$rs2),
3924                   (XLenVT 0b001),
3925                   VLOpFrag)),
3926                   (!cast<Instruction>(
3927                      !if(isSEWAware,
3928                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
3929                          inst#"_"#kind#"_"#vlmul.MX))
3930                   (result_type result_reg_class:$passthru),
3931                   (op2_type op2_reg_class:$rs2),
3932                   GPR:$vl, log2sew, TU_MU)>;
3933
3934class VPatUnaryMask<string intrinsic_name,
3935                    string inst,
3936                    string kind,
3937                    ValueType result_type,
3938                    ValueType op2_type,
3939                    ValueType mask_type,
3940                    int log2sew,
3941                    LMULInfo vlmul,
3942                    VReg result_reg_class,
3943                    VReg op2_reg_class,
3944                    bit isSEWAware = 0> :
3945  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3946                   (result_type result_reg_class:$passthru),
3947                   (op2_type op2_reg_class:$rs2),
3948                   (mask_type V0),
3949                   VLOpFrag, (XLenVT timm:$policy))),
3950                   (!cast<Instruction>(
3951                      !if(isSEWAware,
3952                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
3953                          inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
3954                   (result_type result_reg_class:$passthru),
3955                   (op2_type op2_reg_class:$rs2),
3956                   (mask_type V0), GPR:$vl, log2sew, (XLenVT timm:$policy))>;
3957
3958class VPatUnaryMaskRoundingMode<string intrinsic_name,
3959                                string inst,
3960                                string kind,
3961                                ValueType result_type,
3962                                ValueType op2_type,
3963                                ValueType mask_type,
3964                                int log2sew,
3965                                LMULInfo vlmul,
3966                                VReg result_reg_class,
3967                                VReg op2_reg_class,
3968                                bit isSEWAware = 0> :
3969  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3970                   (result_type result_reg_class:$passthru),
3971                   (op2_type op2_reg_class:$rs2),
3972                   (mask_type V0),
3973                   (XLenVT timm:$round),
3974                   VLOpFrag, (XLenVT timm:$policy))),
3975                   (!cast<Instruction>(
3976                      !if(isSEWAware,
3977                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
3978                          inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
3979                   (result_type result_reg_class:$passthru),
3980                   (op2_type op2_reg_class:$rs2),
3981                   (mask_type V0),
3982                   (XLenVT timm:$round),
3983                   GPR:$vl, log2sew, (XLenVT timm:$policy))>;
3984
3985class VPatUnaryMaskRTZ<string intrinsic_name,
3986                       string inst,
3987                       string kind,
3988                       ValueType result_type,
3989                       ValueType op2_type,
3990                       ValueType mask_type,
3991                       int log2sew,
3992                       LMULInfo vlmul,
3993                       VReg result_reg_class,
3994                       VReg op2_reg_class,
3995                       bit isSEWAware = 0> :
3996  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
3997                   (result_type result_reg_class:$passthru),
3998                   (op2_type op2_reg_class:$rs2),
3999                   (mask_type V0),
4000                   (XLenVT 0b001),
4001                   VLOpFrag, (XLenVT timm:$policy))),
4002                   (!cast<Instruction>(
4003                      !if(isSEWAware,
4004                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
4005                          inst#"_"#kind#"_"#vlmul.MX#"_MASK"))
4006                   (result_type result_reg_class:$passthru),
4007                   (op2_type op2_reg_class:$rs2),
4008                   (mask_type V0),
4009                   GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4010
4011class VPatMaskUnaryNoMask<string intrinsic_name,
4012                          string inst,
4013                          MTypeInfo mti> :
4014  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
4015                (mti.Mask VR:$rs2),
4016                VLOpFrag)),
4017                (!cast<Instruction>(inst#"_M_"#mti.BX)
4018                (mti.Mask VR:$rs2),
4019                GPR:$vl, mti.Log2SEW)>;
4020
4021class VPatMaskUnaryMask<string intrinsic_name,
4022                        string inst,
4023                        MTypeInfo mti> :
4024  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
4025                (mti.Mask VR:$passthru),
4026                (mti.Mask VR:$rs2),
4027                (mti.Mask V0),
4028                VLOpFrag)),
4029                (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
4030                (mti.Mask VR:$passthru),
4031                (mti.Mask VR:$rs2),
4032                (mti.Mask V0), GPR:$vl, mti.Log2SEW, TU_MU)>;
4033
4034class VPatUnaryAnyMask<string intrinsic,
4035                       string inst,
4036                       string kind,
4037                       ValueType result_type,
4038                       ValueType op1_type,
4039                       ValueType mask_type,
4040                       int log2sew,
4041                       LMULInfo vlmul,
4042                       VReg result_reg_class,
4043                       VReg op1_reg_class> :
4044  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4045                   (result_type result_reg_class:$passthru),
4046                   (op1_type op1_reg_class:$rs1),
4047                   (mask_type VR:$rs2),
4048                   VLOpFrag)),
4049                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
4050                   (result_type result_reg_class:$passthru),
4051                   (op1_type op1_reg_class:$rs1),
4052                   (mask_type VR:$rs2),
4053                   GPR:$vl, log2sew)>;
4054
4055class VPatBinaryM<string intrinsic_name,
4056                  string inst,
4057                  ValueType result_type,
4058                  ValueType op1_type,
4059                  ValueType op2_type,
4060                  int sew,
4061                  VReg op1_reg_class,
4062                  DAGOperand op2_kind> :
4063  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4064                   (op1_type op1_reg_class:$rs1),
4065                   (op2_type op2_kind:$rs2),
4066                   VLOpFrag)),
4067                   (!cast<Instruction>(inst)
4068                   (op1_type op1_reg_class:$rs1),
4069                   (op2_type op2_kind:$rs2),
4070                   GPR:$vl, sew)>;
4071
4072class VPatBinaryNoMaskTU<string intrinsic_name,
4073                         string inst,
4074                         ValueType result_type,
4075                         ValueType op1_type,
4076                         ValueType op2_type,
4077                         int sew,
4078                         VReg result_reg_class,
4079                         VReg op1_reg_class,
4080                         DAGOperand op2_kind> :
4081  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4082                   (result_type result_reg_class:$passthru),
4083                   (op1_type op1_reg_class:$rs1),
4084                   (op2_type op2_kind:$rs2),
4085                   VLOpFrag)),
4086                   (!cast<Instruction>(inst)
4087                   (result_type result_reg_class:$passthru),
4088                   (op1_type op1_reg_class:$rs1),
4089                   (op2_type op2_kind:$rs2),
4090                   GPR:$vl, sew, TU_MU)>;
4091
4092class VPatBinaryNoMaskTURoundingMode<string intrinsic_name,
4093                                     string inst,
4094                                     ValueType result_type,
4095                                     ValueType op1_type,
4096                                     ValueType op2_type,
4097                                     int sew,
4098                                     VReg result_reg_class,
4099                                     VReg op1_reg_class,
4100                                     DAGOperand op2_kind> :
4101  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4102                   (result_type result_reg_class:$passthru),
4103                   (op1_type op1_reg_class:$rs1),
4104                   (op2_type op2_kind:$rs2),
4105                   (XLenVT timm:$round),
4106                   VLOpFrag)),
4107                   (!cast<Instruction>(inst)
4108                   (result_type result_reg_class:$passthru),
4109                   (op1_type op1_reg_class:$rs1),
4110                   (op2_type op2_kind:$rs2),
4111                   (XLenVT timm:$round),
4112                   GPR:$vl, sew, TU_MU)>;
4113
4114
4115// Same as VPatBinaryM but source operands are swapped.
4116class VPatBinaryMSwapped<string intrinsic_name,
4117                         string inst,
4118                         ValueType result_type,
4119                         ValueType op1_type,
4120                         ValueType op2_type,
4121                         int sew,
4122                         VReg op1_reg_class,
4123                         DAGOperand op2_kind> :
4124  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4125                   (op2_type op2_kind:$rs2),
4126                   (op1_type op1_reg_class:$rs1),
4127                   VLOpFrag)),
4128                   (!cast<Instruction>(inst)
4129                   (op1_type op1_reg_class:$rs1),
4130                   (op2_type op2_kind:$rs2),
4131                   GPR:$vl, sew)>;
4132
4133class VPatBinaryMask<string intrinsic_name,
4134                     string inst,
4135                     ValueType result_type,
4136                     ValueType op1_type,
4137                     ValueType op2_type,
4138                     ValueType mask_type,
4139                     int sew,
4140                     VReg result_reg_class,
4141                     VReg op1_reg_class,
4142                     DAGOperand op2_kind> :
4143  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4144                   (result_type result_reg_class:$passthru),
4145                   (op1_type op1_reg_class:$rs1),
4146                   (op2_type op2_kind:$rs2),
4147                   (mask_type V0),
4148                   VLOpFrag)),
4149                   (!cast<Instruction>(inst#"_MASK")
4150                   (result_type result_reg_class:$passthru),
4151                   (op1_type op1_reg_class:$rs1),
4152                   (op2_type op2_kind:$rs2),
4153                   (mask_type V0), GPR:$vl, sew)>;
4154
4155class VPatBinaryMaskPolicy<string intrinsic_name,
4156                           string inst,
4157                           ValueType result_type,
4158                           ValueType op1_type,
4159                           ValueType op2_type,
4160                           ValueType mask_type,
4161                           int sew,
4162                           VReg result_reg_class,
4163                           VReg op1_reg_class,
4164                           DAGOperand op2_kind> :
4165  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4166                   (result_type result_reg_class:$passthru),
4167                   (op1_type op1_reg_class:$rs1),
4168                   (op2_type op2_kind:$rs2),
4169                   (mask_type V0),
4170                   VLOpFrag, (XLenVT timm:$policy))),
4171                   (!cast<Instruction>(inst#"_MASK")
4172                   (result_type result_reg_class:$passthru),
4173                   (op1_type op1_reg_class:$rs1),
4174                   (op2_type op2_kind:$rs2),
4175                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
4176
4177class VPatBinaryMaskPolicyRoundingMode<string intrinsic_name,
4178                                       string inst,
4179                                       ValueType result_type,
4180                                       ValueType op1_type,
4181                                       ValueType op2_type,
4182                                       ValueType mask_type,
4183                                       int sew,
4184                                       VReg result_reg_class,
4185                                       VReg op1_reg_class,
4186                                       DAGOperand op2_kind> :
4187  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4188                   (result_type result_reg_class:$passthru),
4189                   (op1_type op1_reg_class:$rs1),
4190                   (op2_type op2_kind:$rs2),
4191                   (mask_type V0),
4192                   (XLenVT timm:$round),
4193                   VLOpFrag, (XLenVT timm:$policy))),
4194                   (!cast<Instruction>(inst#"_MASK")
4195                   (result_type result_reg_class:$passthru),
4196                   (op1_type op1_reg_class:$rs1),
4197                   (op2_type op2_kind:$rs2),
4198                   (mask_type V0),
4199                   (XLenVT timm:$round),
4200                   GPR:$vl, sew, (XLenVT timm:$policy))>;
4201
4202// Same as VPatBinaryMask but source operands are swapped.
4203class VPatBinaryMaskSwapped<string intrinsic_name,
4204                            string inst,
4205                            ValueType result_type,
4206                            ValueType op1_type,
4207                            ValueType op2_type,
4208                            ValueType mask_type,
4209                            int sew,
4210                            VReg result_reg_class,
4211                            VReg op1_reg_class,
4212                            DAGOperand op2_kind> :
4213  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4214                   (result_type result_reg_class:$passthru),
4215                   (op2_type op2_kind:$rs2),
4216                   (op1_type op1_reg_class:$rs1),
4217                   (mask_type V0),
4218                   VLOpFrag)),
4219                   (!cast<Instruction>(inst#"_MASK")
4220                   (result_type result_reg_class:$passthru),
4221                   (op1_type op1_reg_class:$rs1),
4222                   (op2_type op2_kind:$rs2),
4223                   (mask_type V0), GPR:$vl, sew)>;
4224
4225class VPatTiedBinaryNoMask<string intrinsic_name,
4226                           string inst,
4227                           ValueType result_type,
4228                           ValueType op2_type,
4229                           int sew,
4230                           VReg result_reg_class,
4231                           DAGOperand op2_kind> :
4232  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4233                   (result_type (undef)),
4234                   (result_type result_reg_class:$rs1),
4235                   (op2_type op2_kind:$rs2),
4236                   VLOpFrag)),
4237                   (!cast<Instruction>(inst#"_TIED")
4238                   (result_type result_reg_class:$rs1),
4239                   (op2_type op2_kind:$rs2),
4240                   GPR:$vl, sew, TAIL_AGNOSTIC)>;
4241
4242class VPatTiedBinaryNoMaskRoundingMode<string intrinsic_name,
4243                                       string inst,
4244                                       ValueType result_type,
4245                                       ValueType op2_type,
4246                                       int sew,
4247                                       VReg result_reg_class,
4248                                       DAGOperand op2_kind> :
4249  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4250                   (result_type (undef)),
4251                   (result_type result_reg_class:$rs1),
4252                   (op2_type op2_kind:$rs2),
4253                   (XLenVT timm:$round),
4254                   VLOpFrag)),
4255                   (!cast<Instruction>(inst#"_TIED")
4256                   (result_type result_reg_class:$rs1),
4257                   (op2_type op2_kind:$rs2),
4258                   (XLenVT timm:$round),
4259                   GPR:$vl, sew, TAIL_AGNOSTIC)>;
4260
4261class VPatTiedBinaryNoMaskTU<string intrinsic_name,
4262                             string inst,
4263                             ValueType result_type,
4264                             ValueType op2_type,
4265                             int sew,
4266                             VReg result_reg_class,
4267                             DAGOperand op2_kind> :
4268  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4269                   (result_type result_reg_class:$passthru),
4270                   (result_type result_reg_class:$passthru),
4271                   (op2_type op2_kind:$rs2),
4272                   VLOpFrag)),
4273                   (!cast<Instruction>(inst#"_TIED")
4274                   (result_type result_reg_class:$passthru),
4275                   (op2_type op2_kind:$rs2),
4276                   GPR:$vl, sew, TU_MU)>;
4277
4278class VPatTiedBinaryNoMaskTURoundingMode<string intrinsic_name,
4279                                         string inst,
4280                                         ValueType result_type,
4281                                         ValueType op2_type,
4282                                         int sew,
4283                                         VReg result_reg_class,
4284                                         DAGOperand op2_kind> :
4285  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
4286                   (result_type result_reg_class:$passthru),
4287                   (result_type result_reg_class:$passthru),
4288                   (op2_type op2_kind:$rs2),
4289                   (XLenVT timm:$round),
4290                   VLOpFrag)),
4291                   (!cast<Instruction>(inst#"_TIED")
4292                   (result_type result_reg_class:$passthru),
4293                   (op2_type op2_kind:$rs2),
4294                   (XLenVT timm:$round),
4295                   GPR:$vl, sew, TU_MU)>;
4296
4297class VPatTiedBinaryMask<string intrinsic_name,
4298                         string inst,
4299                         ValueType result_type,
4300                         ValueType op2_type,
4301                         ValueType mask_type,
4302                         int sew,
4303                         VReg result_reg_class,
4304                         DAGOperand op2_kind> :
4305  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4306                   (result_type result_reg_class:$passthru),
4307                   (result_type result_reg_class:$passthru),
4308                   (op2_type op2_kind:$rs2),
4309                   (mask_type V0),
4310                   VLOpFrag, (XLenVT timm:$policy))),
4311                   (!cast<Instruction>(inst#"_MASK_TIED")
4312                   (result_type result_reg_class:$passthru),
4313                   (op2_type op2_kind:$rs2),
4314                   (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
4315
4316class VPatTiedBinaryMaskRoundingMode<string intrinsic_name,
4317                                     string inst,
4318                                     ValueType result_type,
4319                                     ValueType op2_type,
4320                                     ValueType mask_type,
4321                                     int sew,
4322                                     VReg result_reg_class,
4323                                     DAGOperand op2_kind> :
4324  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
4325                   (result_type result_reg_class:$passthru),
4326                   (result_type result_reg_class:$passthru),
4327                   (op2_type op2_kind:$rs2),
4328                   (mask_type V0),
4329                   (XLenVT timm:$round),
4330                   VLOpFrag, (XLenVT timm:$policy))),
4331                   (!cast<Instruction>(inst#"_MASK_TIED")
4332                   (result_type result_reg_class:$passthru),
4333                   (op2_type op2_kind:$rs2),
4334                   (mask_type V0),
4335                   (XLenVT timm:$round),
4336                   GPR:$vl, sew, (XLenVT timm:$policy))>;
4337
4338class VPatTernaryNoMaskTU<string intrinsic,
4339                          string inst,
4340                          string kind,
4341                          ValueType result_type,
4342                          ValueType op1_type,
4343                          ValueType op2_type,
4344                          int log2sew,
4345                          LMULInfo vlmul,
4346                          VReg result_reg_class,
4347                          RegisterClass op1_reg_class,
4348                          DAGOperand op2_kind> :
4349  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4350                    (result_type result_reg_class:$rs3),
4351                    (op1_type op1_reg_class:$rs1),
4352                    (op2_type op2_kind:$rs2),
4353                    VLOpFrag)),
4354                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
4355                    result_reg_class:$rs3,
4356                    (op1_type op1_reg_class:$rs1),
4357                    op2_kind:$rs2,
4358                    GPR:$vl, log2sew, TU_MU)>;
4359
4360class VPatTernaryNoMaskTURoundingMode<string intrinsic,
4361                                      string inst,
4362                                      string kind,
4363                                      ValueType result_type,
4364                                      ValueType op1_type,
4365                                      ValueType op2_type,
4366                                      int log2sew,
4367                                      LMULInfo vlmul,
4368                                      VReg result_reg_class,
4369                                      RegisterClass op1_reg_class,
4370                                      DAGOperand op2_kind> :
4371  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4372                    (result_type result_reg_class:$rs3),
4373                    (op1_type op1_reg_class:$rs1),
4374                    (op2_type op2_kind:$rs2),
4375                    (XLenVT timm:$round),
4376                    VLOpFrag)),
4377                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew))
4378                    result_reg_class:$rs3,
4379                    (op1_type op1_reg_class:$rs1),
4380                    op2_kind:$rs2,
4381                    (XLenVT timm:$round),
4382                    GPR:$vl, log2sew, TU_MU)>;
4383
4384class VPatTernaryNoMaskWithPolicy<string intrinsic,
4385                                  string inst,
4386                                  string kind,
4387                                  ValueType result_type,
4388                                  ValueType op1_type,
4389                                  ValueType op2_type,
4390                                  int log2sew,
4391                                  LMULInfo vlmul,
4392                                  VReg result_reg_class,
4393                                  RegisterClass op1_reg_class,
4394                                  DAGOperand op2_kind,
4395                                  bit isSEWAware = false> :
4396  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4397                    (result_type result_reg_class:$rs3),
4398                    (op1_type op1_reg_class:$rs1),
4399                    (op2_type op2_kind:$rs2),
4400                    VLOpFrag, (XLenVT timm:$policy))),
4401                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#!if(isSEWAware, "_E"#!shl(1, log2sew), ""))
4402                    result_reg_class:$rs3,
4403                    (op1_type op1_reg_class:$rs1),
4404                    op2_kind:$rs2,
4405                    GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4406
4407class VPatTernaryNoMaskWithPolicyRoundingMode<string intrinsic,
4408                                  string inst,
4409                                  string kind,
4410                                  ValueType result_type,
4411                                  ValueType op1_type,
4412                                  ValueType op2_type,
4413                                  int log2sew,
4414                                  LMULInfo vlmul,
4415                                  VReg result_reg_class,
4416                                  RegisterClass op1_reg_class,
4417                                  DAGOperand op2_kind,
4418                                  bit isSEWAware = 0> :
4419  Pat<(result_type (!cast<Intrinsic>(intrinsic)
4420                    (result_type result_reg_class:$rs3),
4421                    (op1_type op1_reg_class:$rs1),
4422                    (op2_type op2_kind:$rs2),
4423                    (XLenVT timm:$round),
4424                    VLOpFrag, (XLenVT timm:$policy))),
4425                   (!cast<Instruction>(!if(isSEWAware,
4426                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
4427                          inst#"_"#kind#"_"#vlmul.MX))
4428                    result_reg_class:$rs3,
4429                    (op1_type op1_reg_class:$rs1),
4430                    op2_kind:$rs2,
4431                    (XLenVT timm:$round),
4432                    GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4433
4434class VPatTernaryMaskPolicy<string intrinsic,
4435                            string inst,
4436                            string kind,
4437                            ValueType result_type,
4438                            ValueType op1_type,
4439                            ValueType op2_type,
4440                            ValueType mask_type,
4441                            int sew,
4442                            LMULInfo vlmul,
4443                            VReg result_reg_class,
4444                            RegisterClass op1_reg_class,
4445                            DAGOperand op2_kind> :
4446  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4447                    (result_type result_reg_class:$rs3),
4448                    (op1_type op1_reg_class:$rs1),
4449                    (op2_type op2_kind:$rs2),
4450                    (mask_type V0),
4451                    VLOpFrag, (XLenVT timm:$policy))),
4452                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
4453                    result_reg_class:$rs3,
4454                    (op1_type op1_reg_class:$rs1),
4455                    op2_kind:$rs2,
4456                    (mask_type V0),
4457                    GPR:$vl, sew, (XLenVT timm:$policy))>;
4458
4459class VPatTernaryMaskPolicyRoundingMode<string intrinsic,
4460                                        string inst,
4461                                        string kind,
4462                                        ValueType result_type,
4463                                        ValueType op1_type,
4464                                        ValueType op2_type,
4465                                        ValueType mask_type,
4466                                        int log2sew,
4467                                        LMULInfo vlmul,
4468                                        VReg result_reg_class,
4469                                        RegisterClass op1_reg_class,
4470                                        DAGOperand op2_kind,
4471                                        bit isSEWAware = 0> :
4472  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4473                    (result_type result_reg_class:$rs3),
4474                    (op1_type op1_reg_class:$rs1),
4475                    (op2_type op2_kind:$rs2),
4476                    (mask_type V0),
4477                    (XLenVT timm:$round),
4478                    VLOpFrag, (XLenVT timm:$policy))),
4479                   (!cast<Instruction>(!if(isSEWAware,
4480                          inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew) # "_MASK",
4481                          inst#"_"#kind#"_"#vlmul.MX # "_MASK"))
4482                    result_reg_class:$rs3,
4483                    (op1_type op1_reg_class:$rs1),
4484                    op2_kind:$rs2,
4485                    (mask_type V0),
4486                    (XLenVT timm:$round),
4487                    GPR:$vl, log2sew, (XLenVT timm:$policy))>;
4488
4489class VPatTernaryMaskTU<string intrinsic,
4490                        string inst,
4491                        string kind,
4492                        ValueType result_type,
4493                        ValueType op1_type,
4494                        ValueType op2_type,
4495                        ValueType mask_type,
4496                        int log2sew,
4497                        LMULInfo vlmul,
4498                        VReg result_reg_class,
4499                        RegisterClass op1_reg_class,
4500                        DAGOperand op2_kind> :
4501  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4502                    (result_type result_reg_class:$rs3),
4503                    (op1_type op1_reg_class:$rs1),
4504                    (op2_type op2_kind:$rs2),
4505                    (mask_type V0),
4506                    VLOpFrag)),
4507                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)# "_MASK")
4508                    result_reg_class:$rs3,
4509                    (op1_type op1_reg_class:$rs1),
4510                    op2_kind:$rs2,
4511                    (mask_type V0),
4512                    GPR:$vl, log2sew, TU_MU)>;
4513
4514class VPatTernaryMaskTURoundingMode<string intrinsic,
4515                                    string inst,
4516                                    string kind,
4517                                    ValueType result_type,
4518                                    ValueType op1_type,
4519                                    ValueType op2_type,
4520                                    ValueType mask_type,
4521                                    int log2sew,
4522                                    LMULInfo vlmul,
4523                                    VReg result_reg_class,
4524                                    RegisterClass op1_reg_class,
4525                                    DAGOperand op2_kind> :
4526  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
4527                    (result_type result_reg_class:$rs3),
4528                    (op1_type op1_reg_class:$rs1),
4529                    (op2_type op2_kind:$rs2),
4530                    (mask_type V0),
4531                    (XLenVT timm:$round),
4532                    VLOpFrag)),
4533                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_E"#!shl(1, log2sew)# "_MASK")
4534                    result_reg_class:$rs3,
4535                    (op1_type op1_reg_class:$rs1),
4536                    op2_kind:$rs2,
4537                    (mask_type V0),
4538                    (XLenVT timm:$round),
4539                    GPR:$vl, log2sew, TU_MU)>;
4540
4541multiclass VPatUnaryS_M<string intrinsic_name,
4542                             string inst> {
4543  foreach mti = AllMasks in {
4544    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
4545                      (mti.Mask VR:$rs1), VLOpFrag)),
4546                      (!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
4547                      GPR:$vl, mti.Log2SEW)>;
4548    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
4549                      (mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag)),
4550                      (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
4551                      (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
4552  }
4553}
4554
4555multiclass VPatUnaryV_V_AnyMask<string intrinsic, string instruction,
4556                                list<VTypeInfo> vtilist> {
4557  foreach vti = vtilist in {
4558    let Predicates = GetVTypePredicates<vti>.Predicates in
4559    def : VPatUnaryAnyMask<intrinsic, instruction, "VM",
4560                           vti.Vector, vti.Vector, vti.Mask,
4561                           vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
4562  }
4563}
4564
4565multiclass VPatUnaryM_M<string intrinsic,
4566                         string inst> {
4567  foreach mti = AllMasks in {
4568    def : VPatMaskUnaryNoMask<intrinsic, inst, mti>;
4569    def : VPatMaskUnaryMask<intrinsic, inst, mti>;
4570  }
4571}
4572
4573multiclass VPatUnaryV_M<string intrinsic, string instruction> {
4574  foreach vti = AllIntegerVectors in {
4575    let Predicates = GetVTypePredicates<vti>.Predicates in {
4576      def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
4577                            vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
4578      def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
4579                          vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
4580    }
4581  }
4582}
4583
4584multiclass VPatUnaryV_VF<string intrinsic, string instruction, string suffix,
4585                         list<VTypeInfoToFraction> fractionList> {
4586  foreach vtiTofti = fractionList in {
4587      defvar vti = vtiTofti.Vti;
4588      defvar fti = vtiTofti.Fti;
4589      let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
4590                                   GetVTypePredicates<fti>.Predicates) in {
4591        def : VPatUnaryNoMask<intrinsic, instruction, suffix,
4592                              vti.Vector, fti.Vector,
4593                              vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
4594        def : VPatUnaryMask<intrinsic, instruction, suffix,
4595                            vti.Vector, fti.Vector, vti.Mask,
4596                            vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
4597      }
4598  }
4599}
4600
4601multiclass VPatUnaryV_V<string intrinsic, string instruction,
4602                        list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4603  foreach vti = vtilist in {
4604    let Predicates = GetVTypePredicates<vti>.Predicates in {
4605      def : VPatUnaryNoMask<intrinsic, instruction, "V",
4606                            vti.Vector, vti.Vector, vti.Log2SEW,
4607                            vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4608      def : VPatUnaryMask<intrinsic, instruction, "V",
4609                          vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
4610                          vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4611    }
4612  }
4613}
4614
4615multiclass VPatUnaryV_V_RM<string intrinsic, string instruction,
4616                        list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4617  foreach vti = vtilist in {
4618    let Predicates = GetVTypePredicates<vti>.Predicates in {
4619      def : VPatUnaryNoMaskRoundingMode<intrinsic, instruction, "V",
4620                                        vti.Vector, vti.Vector, vti.Log2SEW,
4621                                        vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4622      def : VPatUnaryMaskRoundingMode<intrinsic, instruction, "V",
4623                                      vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
4624                                      vti.LMul, vti.RegClass, vti.RegClass, isSEWAware>;
4625    }
4626  }
4627}
4628
4629multiclass VPatNullaryV<string intrinsic, string instruction> {
4630  foreach vti = AllIntegerVectors in {
4631    let Predicates = GetVTypePredicates<vti>.Predicates in {
4632      def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
4633                            (vti.Vector vti.RegClass:$passthru),
4634                            VLOpFrag)),
4635                            (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
4636                            vti.RegClass:$passthru, GPR:$vl, vti.Log2SEW, TU_MU)>;
4637      def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
4638                            (vti.Vector vti.RegClass:$passthru),
4639                            (vti.Mask V0), VLOpFrag, (XLenVT timm:$policy))),
4640                            (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
4641                            vti.RegClass:$passthru, (vti.Mask V0),
4642                            GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
4643  }
4644  }
4645}
4646
4647multiclass VPatNullaryM<string intrinsic, string inst> {
4648  foreach mti = AllMasks in
4649    def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
4650                        VLOpFrag)),
4651                        (!cast<Instruction>(inst#"_M_"#mti.BX)
4652                        GPR:$vl, mti.Log2SEW)>;
4653}
4654
4655multiclass VPatBinaryM<string intrinsic,
4656                      string inst,
4657                      ValueType result_type,
4658                      ValueType op1_type,
4659                      ValueType op2_type,
4660                      ValueType mask_type,
4661                      int sew,
4662                      VReg result_reg_class,
4663                      VReg op1_reg_class,
4664                      DAGOperand op2_kind> {
4665  def : VPatBinaryM<intrinsic, inst, result_type, op1_type, op2_type,
4666                    sew, op1_reg_class, op2_kind>;
4667  def : VPatBinaryMask<intrinsic, inst, result_type, op1_type, op2_type,
4668                       mask_type, sew, result_reg_class, op1_reg_class,
4669                       op2_kind>;
4670}
4671
4672multiclass VPatBinary<string intrinsic,
4673                      string inst,
4674                      ValueType result_type,
4675                      ValueType op1_type,
4676                      ValueType op2_type,
4677                      ValueType mask_type,
4678                      int sew,
4679                      VReg result_reg_class,
4680                      VReg op1_reg_class,
4681                      DAGOperand op2_kind> {
4682  def : VPatBinaryNoMaskTU<intrinsic, inst, result_type, op1_type, op2_type,
4683                           sew, result_reg_class, op1_reg_class, op2_kind>;
4684  def : VPatBinaryMaskPolicy<intrinsic, inst, result_type, op1_type, op2_type,
4685                             mask_type, sew, result_reg_class, op1_reg_class,
4686                             op2_kind>;
4687}
4688
4689multiclass VPatBinaryRoundingMode<string intrinsic,
4690                                  string inst,
4691                                  ValueType result_type,
4692                                  ValueType op1_type,
4693                                  ValueType op2_type,
4694                                  ValueType mask_type,
4695                                  int sew,
4696                                  VReg result_reg_class,
4697                                  VReg op1_reg_class,
4698                                  DAGOperand op2_kind> {
4699  def : VPatBinaryNoMaskTURoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
4700                                       sew, result_reg_class, op1_reg_class, op2_kind>;
4701  def : VPatBinaryMaskPolicyRoundingMode<intrinsic, inst, result_type, op1_type, op2_type,
4702                                         mask_type, sew, result_reg_class, op1_reg_class,
4703                                         op2_kind>;
4704}
4705
4706multiclass VPatBinaryMSwapped<string intrinsic,
4707                              string inst,
4708                              ValueType result_type,
4709                              ValueType op1_type,
4710                              ValueType op2_type,
4711                              ValueType mask_type,
4712                              int sew,
4713                              VReg result_reg_class,
4714                              VReg op1_reg_class,
4715                              DAGOperand op2_kind> {
4716  def : VPatBinaryMSwapped<intrinsic, inst, result_type, op1_type, op2_type,
4717                           sew, op1_reg_class, op2_kind>;
4718  def : VPatBinaryMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
4719                              mask_type, sew, result_reg_class, op1_reg_class,
4720                              op2_kind>;
4721}
4722
4723multiclass VPatBinaryCarryInTAIL<string intrinsic,
4724                                 string inst,
4725                                 string kind,
4726                                 ValueType result_type,
4727                                 ValueType op1_type,
4728                                 ValueType op2_type,
4729                                 ValueType mask_type,
4730                                 int sew,
4731                                 LMULInfo vlmul,
4732                                 VReg result_reg_class,
4733                                 VReg op1_reg_class,
4734                                 DAGOperand op2_kind> {
4735  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4736                         (result_type result_reg_class:$passthru),
4737                         (op1_type op1_reg_class:$rs1),
4738                         (op2_type op2_kind:$rs2),
4739                         (mask_type V0),
4740                         VLOpFrag)),
4741                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4742                         (result_type result_reg_class:$passthru),
4743                         (op1_type op1_reg_class:$rs1),
4744                         (op2_type op2_kind:$rs2),
4745                         (mask_type V0), GPR:$vl, sew)>;
4746}
4747
4748multiclass VPatBinaryCarryIn<string intrinsic,
4749                             string inst,
4750                             string kind,
4751                             ValueType result_type,
4752                             ValueType op1_type,
4753                             ValueType op2_type,
4754                             ValueType mask_type,
4755                             int sew,
4756                             LMULInfo vlmul,
4757                             VReg op1_reg_class,
4758                             DAGOperand op2_kind> {
4759  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4760                         (op1_type op1_reg_class:$rs1),
4761                         (op2_type op2_kind:$rs2),
4762                         (mask_type V0),
4763                         VLOpFrag)),
4764                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4765                         (op1_type op1_reg_class:$rs1),
4766                         (op2_type op2_kind:$rs2),
4767                         (mask_type V0), GPR:$vl, sew)>;
4768}
4769
4770multiclass VPatBinaryMaskOut<string intrinsic,
4771                             string inst,
4772                             string kind,
4773                             ValueType result_type,
4774                             ValueType op1_type,
4775                             ValueType op2_type,
4776                             int sew,
4777                             LMULInfo vlmul,
4778                             VReg op1_reg_class,
4779                             DAGOperand op2_kind> {
4780  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
4781                         (op1_type op1_reg_class:$rs1),
4782                         (op2_type op2_kind:$rs2),
4783                         VLOpFrag)),
4784                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
4785                         (op1_type op1_reg_class:$rs1),
4786                         (op2_type op2_kind:$rs2),
4787                         GPR:$vl, sew)>;
4788}
4789
4790multiclass VPatConversion<string intrinsic,
4791                          string inst,
4792                          string kind,
4793                          ValueType result_type,
4794                          ValueType op1_type,
4795                          ValueType mask_type,
4796                          int log2sew,
4797                          LMULInfo vlmul,
4798                          VReg result_reg_class,
4799                          VReg op1_reg_class,
4800                          bit isSEWAware = 0> {
4801  def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
4802                        log2sew, vlmul, result_reg_class, op1_reg_class,
4803                        isSEWAware>;
4804  def : VPatUnaryMask<intrinsic, inst, kind, result_type, op1_type,
4805                      mask_type, log2sew, vlmul, result_reg_class, op1_reg_class,
4806                      isSEWAware>;
4807}
4808
4809multiclass VPatConversionRoundingMode<string intrinsic,
4810                                      string inst,
4811                                      string kind,
4812                                      ValueType result_type,
4813                                      ValueType op1_type,
4814                                      ValueType mask_type,
4815                                      int log2sew,
4816                                      LMULInfo vlmul,
4817                                      VReg result_reg_class,
4818                                      VReg op1_reg_class,
4819                                      bit isSEWAware = 0> {
4820  def : VPatUnaryNoMaskRoundingMode<intrinsic, inst, kind, result_type, op1_type,
4821                                    log2sew, vlmul, result_reg_class,
4822                                    op1_reg_class, isSEWAware>;
4823  def : VPatUnaryMaskRoundingMode<intrinsic, inst, kind, result_type, op1_type,
4824                                  mask_type, log2sew, vlmul, result_reg_class,
4825                                  op1_reg_class, isSEWAware>;
4826}
4827
4828multiclass VPatConversionRTZ<string intrinsic,
4829                             string inst,
4830                             string kind,
4831                             ValueType result_type,
4832                             ValueType op1_type,
4833                             ValueType mask_type,
4834                             int log2sew,
4835                             LMULInfo vlmul,
4836                             VReg result_reg_class,
4837                             VReg op1_reg_class,
4838                             bit isSEWAware = 0> {
4839  def : VPatUnaryNoMaskRTZ<intrinsic, inst, kind, result_type, op1_type,
4840                                    log2sew, vlmul, result_reg_class,
4841                                    op1_reg_class, isSEWAware>;
4842  def : VPatUnaryMaskRTZ<intrinsic, inst, kind, result_type, op1_type,
4843                                  mask_type, log2sew, vlmul, result_reg_class,
4844                                  op1_reg_class, isSEWAware>;
4845}
4846
4847multiclass VPatBinaryV_VV<string intrinsic, string instruction,
4848                          list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4849  foreach vti = vtilist in
4850    let Predicates = GetVTypePredicates<vti>.Predicates in
4851    defm : VPatBinary<intrinsic,
4852                      !if(isSEWAware,
4853                          instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
4854                          instruction # "_VV_" # vti.LMul.MX),
4855                      vti.Vector, vti.Vector, vti.Vector,vti.Mask,
4856                      vti.Log2SEW, vti.RegClass,
4857                      vti.RegClass, vti.RegClass>;
4858}
4859
4860multiclass VPatBinaryV_VV_RM<string intrinsic, string instruction,
4861                             list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4862  foreach vti = vtilist in
4863    let Predicates = GetVTypePredicates<vti>.Predicates in
4864    defm : VPatBinaryRoundingMode<intrinsic,
4865                                  !if(isSEWAware,
4866                                      instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
4867                                      instruction # "_VV_" # vti.LMul.MX),
4868                                  vti.Vector, vti.Vector, vti.Vector,vti.Mask,
4869                                  vti.Log2SEW, vti.RegClass,
4870                                  vti.RegClass, vti.RegClass>;
4871}
4872
4873multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
4874                              list<VTypeInfo> vtilist> {
4875  foreach vti = vtilist in {
4876    defvar ivti = GetIntVTypeInfo<vti>.Vti;
4877    let Predicates = GetVTypePredicates<vti>.Predicates in
4878    defm : VPatBinary<intrinsic,
4879                      instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW,
4880                      vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
4881                      vti.Log2SEW, vti.RegClass,
4882                      vti.RegClass, vti.RegClass>;
4883  }
4884}
4885
4886multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction,
4887                                  int eew, list<VTypeInfo> vtilist> {
4888  foreach vti = vtilist in {
4889    // emul = lmul * eew / sew
4890    defvar vlmul = vti.LMul;
4891    defvar octuple_lmul = vlmul.octuple;
4892    defvar octuple_emul = !srl(!mul(octuple_lmul, eew), vti.Log2SEW);
4893    if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
4894      defvar emul_str = octuple_to_str<octuple_emul>.ret;
4895      defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str);
4896      defvar inst = instruction # "_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str;
4897      let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
4898                                   GetVTypePredicates<ivti>.Predicates) in
4899      defm : VPatBinary<intrinsic, inst,
4900                        vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
4901                        vti.Log2SEW, vti.RegClass,
4902                        vti.RegClass, ivti.RegClass>;
4903    }
4904  }
4905}
4906
4907multiclass VPatBinaryV_VX<string intrinsic, string instruction,
4908                          list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4909  foreach vti = vtilist in {
4910    defvar kind = "V"#vti.ScalarSuffix;
4911    let Predicates = GetVTypePredicates<vti>.Predicates in
4912    defm : VPatBinary<intrinsic,
4913                      !if(isSEWAware,
4914                          instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW,
4915                          instruction#"_"#kind#"_"#vti.LMul.MX),
4916                      vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
4917                      vti.Log2SEW, vti.RegClass,
4918                      vti.RegClass, vti.ScalarRegClass>;
4919  }
4920}
4921
4922multiclass VPatBinaryV_VX_RM<string intrinsic, string instruction,
4923                             list<VTypeInfo> vtilist, bit isSEWAware = 0> {
4924  foreach vti = vtilist in {
4925    defvar kind = "V"#vti.ScalarSuffix;
4926    let Predicates = GetVTypePredicates<vti>.Predicates in
4927    defm : VPatBinaryRoundingMode<intrinsic,
4928                                  !if(isSEWAware,
4929                                      instruction#"_"#kind#"_"#vti.LMul.MX#"_E"#vti.SEW,
4930                                      instruction#"_"#kind#"_"#vti.LMul.MX),
4931                                  vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
4932                                  vti.Log2SEW, vti.RegClass,
4933                                  vti.RegClass, vti.ScalarRegClass>;
4934  }
4935}
4936
4937multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
4938                          list<VTypeInfo> vtilist> {
4939  foreach vti = vtilist in
4940    let Predicates = GetVTypePredicates<vti>.Predicates in
4941    defm : VPatBinary<intrinsic, instruction # "_VX_" # vti.LMul.MX,
4942                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
4943                      vti.Log2SEW, vti.RegClass,
4944                      vti.RegClass, GPR>;
4945}
4946
4947multiclass VPatBinaryV_VI<string intrinsic, string instruction,
4948                          list<VTypeInfo> vtilist, Operand imm_type> {
4949  foreach vti = vtilist in
4950    let Predicates = GetVTypePredicates<vti>.Predicates in
4951    defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
4952                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
4953                      vti.Log2SEW, vti.RegClass,
4954                      vti.RegClass, imm_type>;
4955}
4956
4957multiclass VPatBinaryV_VI_RM<string intrinsic, string instruction,
4958                             list<VTypeInfo> vtilist,
4959                             Operand imm_type> {
4960  foreach vti = vtilist in
4961    let Predicates = GetVTypePredicates<vti>.Predicates in
4962    defm : VPatBinaryRoundingMode<intrinsic,
4963                                  instruction # "_VI_" # vti.LMul.MX,
4964                                  vti.Vector, vti.Vector, XLenVT, vti.Mask,
4965                                  vti.Log2SEW, vti.RegClass,
4966                                  vti.RegClass, imm_type>;
4967}
4968
4969multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
4970  foreach mti = AllMasks in
4971    let Predicates = [HasVInstructions] in
4972    def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.BX,
4973                      mti.Mask, mti.Mask, mti.Mask,
4974                      mti.Log2SEW, VR, VR>;
4975}
4976
4977multiclass VPatBinaryW_VV<string intrinsic, string instruction,
4978                          list<VTypeInfoToWide> vtilist> {
4979  foreach VtiToWti = vtilist in {
4980    defvar Vti = VtiToWti.Vti;
4981    defvar Wti = VtiToWti.Wti;
4982    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
4983                                 GetVTypePredicates<Wti>.Predicates) in
4984    defm : VPatBinary<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
4985                      Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
4986                      Vti.Log2SEW, Wti.RegClass,
4987                      Vti.RegClass, Vti.RegClass>;
4988  }
4989}
4990
4991multiclass VPatBinaryW_VV_RM<string intrinsic, string instruction,
4992                             list<VTypeInfoToWide> vtilist, bit isSEWAware = 0> {
4993  foreach VtiToWti = vtilist in {
4994    defvar Vti = VtiToWti.Vti;
4995    defvar Wti = VtiToWti.Wti;
4996    defvar name = !if(isSEWAware,
4997                      instruction # "_VV_" # Vti.LMul.MX # "_E" # Vti.SEW,
4998                      instruction # "_VV_" # Vti.LMul.MX);
4999    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5000                                 GetVTypePredicates<Wti>.Predicates) in
5001    defm : VPatBinaryRoundingMode<intrinsic, name,
5002                                  Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
5003                                  Vti.Log2SEW, Wti.RegClass,
5004                                  Vti.RegClass, Vti.RegClass>;
5005  }
5006}
5007
5008multiclass VPatBinaryW_VX<string intrinsic, string instruction,
5009                          list<VTypeInfoToWide> vtilist> {
5010  foreach VtiToWti = vtilist in {
5011    defvar Vti = VtiToWti.Vti;
5012    defvar Wti = VtiToWti.Wti;
5013    defvar kind = "V"#Vti.ScalarSuffix;
5014    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5015                                 GetVTypePredicates<Wti>.Predicates) in
5016    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5017                      Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
5018                      Vti.Log2SEW, Wti.RegClass,
5019                      Vti.RegClass, Vti.ScalarRegClass>;
5020  }
5021}
5022
5023multiclass VPatBinaryW_VX_RM<string intrinsic, string instruction,
5024                          list<VTypeInfoToWide> vtilist, bit isSEWAware = 0> {
5025  foreach VtiToWti = vtilist in {
5026    defvar Vti = VtiToWti.Vti;
5027    defvar Wti = VtiToWti.Wti;
5028    defvar kind = "V"#Vti.ScalarSuffix;
5029    defvar name = !if(isSEWAware,
5030                      instruction#"_"#kind#"_"#Vti.LMul.MX # "_E" # Vti.SEW,
5031                      instruction#"_"#kind#"_"#Vti.LMul.MX);
5032    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5033                                 GetVTypePredicates<Wti>.Predicates) in
5034    defm : VPatBinaryRoundingMode<intrinsic, name,
5035                                  Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
5036                                  Vti.Log2SEW, Wti.RegClass,
5037                                  Vti.RegClass, Vti.ScalarRegClass>;
5038  }
5039}
5040
5041multiclass VPatBinaryW_WV<string intrinsic, string instruction,
5042                          list<VTypeInfoToWide> vtilist> {
5043  foreach VtiToWti = vtilist in {
5044    defvar Vti = VtiToWti.Vti;
5045    defvar Wti = VtiToWti.Wti;
5046    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5047                                 GetVTypePredicates<Wti>.Predicates) in {
5048      def : VPatTiedBinaryNoMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5049                                 Wti.Vector, Vti.Vector,
5050                                 Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5051      def : VPatBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5052                               Wti.Vector, Wti.Vector, Vti.Vector, Vti.Log2SEW,
5053                               Wti.RegClass, Wti.RegClass, Vti.RegClass>;
5054      let AddedComplexity = 1 in {
5055      def : VPatTiedBinaryNoMaskTU<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5056                                   Wti.Vector, Vti.Vector,
5057                                   Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5058      def : VPatTiedBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5059                               Wti.Vector, Vti.Vector, Vti.Mask,
5060                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5061      }
5062      def : VPatBinaryMaskPolicy<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5063                                 Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5064                                 Vti.Log2SEW, Wti.RegClass,
5065                                 Wti.RegClass, Vti.RegClass>;
5066    }
5067  }
5068}
5069
5070multiclass VPatBinaryW_WV_RM<string intrinsic, string instruction,
5071                             list<VTypeInfoToWide> vtilist, bit isSEWAware = 0> {
5072  foreach VtiToWti = vtilist in {
5073    defvar Vti = VtiToWti.Vti;
5074    defvar Wti = VtiToWti.Wti;
5075    defvar name = !if(isSEWAware,
5076                      instruction # "_WV_" # Vti.LMul.MX # "_E" # Vti.SEW,
5077                      instruction # "_WV_" # Vti.LMul.MX);
5078    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5079                                 GetVTypePredicates<Wti>.Predicates) in {
5080      def : VPatTiedBinaryNoMaskRoundingMode<intrinsic, name,
5081                                             Wti.Vector, Vti.Vector,
5082                                             Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5083      def : VPatBinaryNoMaskTURoundingMode<intrinsic, name,
5084                                           Wti.Vector, Wti.Vector, Vti.Vector, Vti.Log2SEW,
5085                                           Wti.RegClass, Wti.RegClass, Vti.RegClass>;
5086      let AddedComplexity = 1 in {
5087      def : VPatTiedBinaryNoMaskTURoundingMode<intrinsic, name,
5088                                               Wti.Vector, Vti.Vector,
5089                                               Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5090      def : VPatTiedBinaryMaskRoundingMode<intrinsic, name,
5091                                           Wti.Vector, Vti.Vector, Vti.Mask,
5092                                           Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
5093      }
5094      def : VPatBinaryMaskPolicyRoundingMode<intrinsic, name,
5095                                             Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5096                                             Vti.Log2SEW, Wti.RegClass,
5097                                             Wti.RegClass, Vti.RegClass>;
5098    }
5099  }
5100}
5101
5102multiclass VPatBinaryW_WX<string intrinsic, string instruction,
5103                          list<VTypeInfoToWide> vtilist> {
5104  foreach VtiToWti = vtilist in {
5105    defvar Vti = VtiToWti.Vti;
5106    defvar Wti = VtiToWti.Wti;
5107    defvar kind = "W"#Vti.ScalarSuffix;
5108    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5109                                 GetVTypePredicates<Wti>.Predicates) in
5110    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5111                      Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5112                      Vti.Log2SEW, Wti.RegClass,
5113                      Wti.RegClass, Vti.ScalarRegClass>;
5114  }
5115}
5116
5117multiclass VPatBinaryW_WX_RM<string intrinsic, string instruction,
5118                             list<VTypeInfoToWide> vtilist, bit isSEWAware = 0> {
5119  foreach VtiToWti = vtilist in {
5120    defvar Vti = VtiToWti.Vti;
5121    defvar Wti = VtiToWti.Wti;
5122    defvar kind = "W"#Vti.ScalarSuffix;
5123    defvar name = !if(isSEWAware,
5124                      instruction#"_"#kind#"_"#Vti.LMul.MX#"_E"#Vti.SEW,
5125                      instruction#"_"#kind#"_"#Vti.LMul.MX);
5126    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5127                                 GetVTypePredicates<Wti>.Predicates) in
5128    defm : VPatBinaryRoundingMode<intrinsic, name,
5129                                  Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5130                                  Vti.Log2SEW, Wti.RegClass,
5131                                  Wti.RegClass, Vti.ScalarRegClass>;
5132  }
5133}
5134
5135multiclass VPatBinaryV_WV<string intrinsic, string instruction,
5136                          list<VTypeInfoToWide> vtilist> {
5137  foreach VtiToWti = vtilist in {
5138    defvar Vti = VtiToWti.Vti;
5139    defvar Wti = VtiToWti.Wti;
5140    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5141                                 GetVTypePredicates<Wti>.Predicates) in
5142    defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
5143                      Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5144                      Vti.Log2SEW, Vti.RegClass,
5145                      Wti.RegClass, Vti.RegClass>;
5146  }
5147}
5148
5149multiclass VPatBinaryV_WV_RM<string intrinsic, string instruction,
5150                             list<VTypeInfoToWide> vtilist> {
5151  foreach VtiToWti = vtilist in {
5152    defvar Vti = VtiToWti.Vti;
5153    defvar Wti = VtiToWti.Wti;
5154    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5155                                 GetVTypePredicates<Wti>.Predicates) in
5156    defm : VPatBinaryRoundingMode<intrinsic,
5157                                  instruction # "_WV_" # Vti.LMul.MX,
5158                                  Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
5159                                  Vti.Log2SEW, Vti.RegClass,
5160                                  Wti.RegClass, Vti.RegClass>;
5161  }
5162}
5163
5164multiclass VPatBinaryV_WX<string intrinsic, string instruction,
5165                          list<VTypeInfoToWide> vtilist> {
5166  foreach VtiToWti = vtilist in {
5167    defvar Vti = VtiToWti.Vti;
5168    defvar Wti = VtiToWti.Wti;
5169    defvar kind = "W"#Vti.ScalarSuffix;
5170    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5171                                 GetVTypePredicates<Wti>.Predicates) in
5172    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
5173                      Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5174                      Vti.Log2SEW, Vti.RegClass,
5175                      Wti.RegClass, Vti.ScalarRegClass>;
5176  }
5177}
5178
5179multiclass VPatBinaryV_WX_RM<string intrinsic, string instruction,
5180                             list<VTypeInfoToWide> vtilist> {
5181  foreach VtiToWti = vtilist in {
5182    defvar Vti = VtiToWti.Vti;
5183    defvar Wti = VtiToWti.Wti;
5184    defvar kind = "W"#Vti.ScalarSuffix;
5185    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5186                                 GetVTypePredicates<Wti>.Predicates) in
5187    defm : VPatBinaryRoundingMode<intrinsic,
5188                                  instruction#"_"#kind#"_"#Vti.LMul.MX,
5189                                  Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
5190                                  Vti.Log2SEW, Vti.RegClass,
5191                                  Wti.RegClass, Vti.ScalarRegClass>;
5192  }
5193}
5194
5195
5196multiclass VPatBinaryV_WI<string intrinsic, string instruction,
5197                          list<VTypeInfoToWide> vtilist> {
5198  foreach VtiToWti = vtilist in {
5199    defvar Vti = VtiToWti.Vti;
5200    defvar Wti = VtiToWti.Wti;
5201    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5202                                 GetVTypePredicates<Wti>.Predicates) in
5203    defm : VPatBinary<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
5204                      Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
5205                      Vti.Log2SEW, Vti.RegClass,
5206                      Wti.RegClass, uimm5>;
5207  }
5208}
5209
5210multiclass VPatBinaryV_WI_RM<string intrinsic, string instruction,
5211                             list<VTypeInfoToWide> vtilist> {
5212  foreach VtiToWti = vtilist in {
5213    defvar Vti = VtiToWti.Vti;
5214    defvar Wti = VtiToWti.Wti;
5215    let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
5216                                 GetVTypePredicates<Wti>.Predicates) in
5217    defm : VPatBinaryRoundingMode<intrinsic,
5218                                  instruction # "_WI_" # Vti.LMul.MX,
5219                                  Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
5220                                  Vti.Log2SEW, Vti.RegClass,
5221                                  Wti.RegClass, uimm5>;
5222  }
5223}
5224
5225multiclass VPatBinaryV_VM<string intrinsic, string instruction,
5226                          bit CarryOut = 0,
5227                          list<VTypeInfo> vtilist = AllIntegerVectors> {
5228  foreach vti = vtilist in
5229    let Predicates = GetVTypePredicates<vti>.Predicates in
5230    defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM",
5231                             !if(CarryOut, vti.Mask, vti.Vector),
5232                             vti.Vector, vti.Vector, vti.Mask,
5233                             vti.Log2SEW, vti.LMul,
5234                             vti.RegClass, vti.RegClass>;
5235}
5236
5237multiclass VPatBinaryV_XM<string intrinsic, string instruction,
5238                          bit CarryOut = 0,
5239                          list<VTypeInfo> vtilist = AllIntegerVectors> {
5240  foreach vti = vtilist in
5241    let Predicates = GetVTypePredicates<vti>.Predicates in
5242    defm : VPatBinaryCarryIn<intrinsic, instruction,
5243                             "V"#vti.ScalarSuffix#"M",
5244                             !if(CarryOut, vti.Mask, vti.Vector),
5245                             vti.Vector, vti.Scalar, vti.Mask,
5246                             vti.Log2SEW, vti.LMul,
5247                             vti.RegClass, vti.ScalarRegClass>;
5248}
5249
5250multiclass VPatBinaryV_IM<string intrinsic, string instruction,
5251                          bit CarryOut = 0> {
5252  foreach vti = AllIntegerVectors in
5253    let Predicates = GetVTypePredicates<vti>.Predicates in
5254    defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM",
5255                             !if(CarryOut, vti.Mask, vti.Vector),
5256                             vti.Vector, XLenVT, vti.Mask,
5257                             vti.Log2SEW, vti.LMul,
5258                             vti.RegClass, simm5>;
5259}
5260
5261multiclass VPatBinaryV_VM_TAIL<string intrinsic, string instruction> {
5262  foreach vti = AllIntegerVectors in
5263    let Predicates = GetVTypePredicates<vti>.Predicates in
5264    defm : VPatBinaryCarryInTAIL<intrinsic, instruction, "VVM",
5265                                 vti.Vector,
5266                                 vti.Vector, vti.Vector, vti.Mask,
5267                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5268                                 vti.RegClass, vti.RegClass>;
5269}
5270
5271multiclass VPatBinaryV_XM_TAIL<string intrinsic, string instruction> {
5272  foreach vti = AllIntegerVectors in
5273    let Predicates = GetVTypePredicates<vti>.Predicates in
5274    defm : VPatBinaryCarryInTAIL<intrinsic, instruction,
5275                                 "V"#vti.ScalarSuffix#"M",
5276                                 vti.Vector,
5277                                 vti.Vector, vti.Scalar, vti.Mask,
5278                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5279                                 vti.RegClass, vti.ScalarRegClass>;
5280}
5281
5282multiclass VPatBinaryV_IM_TAIL<string intrinsic, string instruction> {
5283  foreach vti = AllIntegerVectors in
5284    let Predicates = GetVTypePredicates<vti>.Predicates in
5285    defm : VPatBinaryCarryInTAIL<intrinsic, instruction, "VIM",
5286                                 vti.Vector,
5287                                 vti.Vector, XLenVT, vti.Mask,
5288                                 vti.Log2SEW, vti.LMul,
5289                                 vti.RegClass, vti.RegClass, simm5>;
5290}
5291
5292multiclass VPatBinaryV_V<string intrinsic, string instruction> {
5293  foreach vti = AllIntegerVectors in
5294    let Predicates = GetVTypePredicates<vti>.Predicates in
5295    defm : VPatBinaryMaskOut<intrinsic, instruction, "VV",
5296                             vti.Mask, vti.Vector, vti.Vector,
5297                             vti.Log2SEW, vti.LMul,
5298                             vti.RegClass, vti.RegClass>;
5299}
5300
5301multiclass VPatBinaryV_X<string intrinsic, string instruction> {
5302  foreach vti = AllIntegerVectors in
5303    let Predicates = GetVTypePredicates<vti>.Predicates in
5304    defm : VPatBinaryMaskOut<intrinsic, instruction, "VX",
5305                             vti.Mask, vti.Vector, XLenVT,
5306                             vti.Log2SEW, vti.LMul,
5307                             vti.RegClass, GPR>;
5308}
5309
5310multiclass VPatBinaryV_I<string intrinsic, string instruction> {
5311  foreach vti = AllIntegerVectors in
5312    let Predicates = GetVTypePredicates<vti>.Predicates in
5313    defm : VPatBinaryMaskOut<intrinsic, instruction, "VI",
5314                             vti.Mask, vti.Vector, XLenVT,
5315                             vti.Log2SEW, vti.LMul,
5316                             vti.RegClass, simm5>;
5317}
5318
5319multiclass VPatBinaryM_VV<string intrinsic, string instruction,
5320                          list<VTypeInfo> vtilist> {
5321  foreach vti = vtilist in
5322    let Predicates = GetVTypePredicates<vti>.Predicates in
5323    defm : VPatBinaryM<intrinsic, instruction # "_VV_" # vti.LMul.MX,
5324                       vti.Mask, vti.Vector, vti.Vector, vti.Mask,
5325                       vti.Log2SEW, VR,
5326                       vti.RegClass, vti.RegClass>;
5327}
5328
5329multiclass VPatBinarySwappedM_VV<string intrinsic, string instruction,
5330                                 list<VTypeInfo> vtilist> {
5331  foreach vti = vtilist in
5332    let Predicates = GetVTypePredicates<vti>.Predicates in
5333    defm : VPatBinaryMSwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX,
5334                              vti.Mask, vti.Vector, vti.Vector, vti.Mask,
5335                              vti.Log2SEW, VR,
5336                              vti.RegClass, vti.RegClass>;
5337}
5338
5339multiclass VPatBinaryM_VX<string intrinsic, string instruction,
5340                          list<VTypeInfo> vtilist> {
5341  foreach vti = vtilist in {
5342    defvar kind = "V"#vti.ScalarSuffix;
5343    let Predicates = GetVTypePredicates<vti>.Predicates in
5344    defm : VPatBinaryM<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
5345                       vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
5346                       vti.Log2SEW, VR,
5347                       vti.RegClass, vti.ScalarRegClass>;
5348  }
5349}
5350
5351multiclass VPatBinaryM_VI<string intrinsic, string instruction,
5352                          list<VTypeInfo> vtilist> {
5353  foreach vti = vtilist in
5354    let Predicates = GetVTypePredicates<vti>.Predicates in
5355    defm : VPatBinaryM<intrinsic, instruction # "_VI_" # vti.LMul.MX,
5356                       vti.Mask, vti.Vector, XLenVT, vti.Mask,
5357                       vti.Log2SEW, VR,
5358                       vti.RegClass, simm5>;
5359}
5360
5361multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
5362                                list<VTypeInfo> vtilist, Operand ImmType = simm5>
5363    : VPatBinaryV_VV<intrinsic, instruction, vtilist>,
5364      VPatBinaryV_VX<intrinsic, instruction, vtilist>,
5365      VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
5366
5367multiclass VPatBinaryV_VV_VX_VI_RM<string intrinsic, string instruction,
5368                                   list<VTypeInfo> vtilist, Operand ImmType>
5369    : VPatBinaryV_VV_RM<intrinsic, instruction, vtilist>,
5370      VPatBinaryV_VX_RM<intrinsic, instruction, vtilist>,
5371      VPatBinaryV_VI_RM<intrinsic, instruction, vtilist, ImmType>;
5372
5373multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
5374                             list<VTypeInfo> vtilist, bit isSEWAware = 0>
5375    : VPatBinaryV_VV<intrinsic, instruction, vtilist, isSEWAware>,
5376      VPatBinaryV_VX<intrinsic, instruction, vtilist, isSEWAware>;
5377
5378multiclass VPatBinaryV_VV_VX_RM<string intrinsic, string instruction,
5379                                list<VTypeInfo> vtilist, bit isSEWAware = 0>
5380    : VPatBinaryV_VV_RM<intrinsic, instruction, vtilist, isSEWAware>,
5381      VPatBinaryV_VX_RM<intrinsic, instruction, vtilist, isSEWAware>;
5382
5383multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
5384                             list<VTypeInfo> vtilist>
5385    : VPatBinaryV_VX<intrinsic, instruction, vtilist>,
5386      VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
5387
5388multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction,
5389                             list<VTypeInfoToWide> vtilist>
5390    : VPatBinaryW_VV<intrinsic, instruction, vtilist>,
5391      VPatBinaryW_VX<intrinsic, instruction, vtilist>;
5392
5393multiclass
5394    VPatBinaryW_VV_VX_RM<string intrinsic, string instruction,
5395                         list<VTypeInfoToWide> vtilist, bit isSEWAware = 0>
5396    : VPatBinaryW_VV_RM<intrinsic, instruction, vtilist, isSEWAware>,
5397      VPatBinaryW_VX_RM<intrinsic, instruction, vtilist, isSEWAware>;
5398
5399multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction,
5400                             list<VTypeInfoToWide> vtilist>
5401    : VPatBinaryW_WV<intrinsic, instruction, vtilist>,
5402      VPatBinaryW_WX<intrinsic, instruction, vtilist>;
5403
5404multiclass
5405    VPatBinaryW_WV_WX_RM<string intrinsic, string instruction,
5406                         list<VTypeInfoToWide> vtilist, bit isSEWAware = 0>
5407    : VPatBinaryW_WV_RM<intrinsic, instruction, vtilist, isSEWAware>,
5408      VPatBinaryW_WX_RM<intrinsic, instruction, vtilist, isSEWAware>;
5409
5410multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction,
5411                                list<VTypeInfoToWide> vtilist>
5412    : VPatBinaryV_WV<intrinsic, instruction, vtilist>,
5413      VPatBinaryV_WX<intrinsic, instruction, vtilist>,
5414      VPatBinaryV_WI<intrinsic, instruction, vtilist>;
5415
5416multiclass VPatBinaryV_WV_WX_WI_RM<string intrinsic, string instruction,
5417                                   list<VTypeInfoToWide> vtilist>
5418    : VPatBinaryV_WV_RM<intrinsic, instruction, vtilist>,
5419      VPatBinaryV_WX_RM<intrinsic, instruction, vtilist>,
5420      VPatBinaryV_WI_RM<intrinsic, instruction, vtilist>;
5421
5422multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
5423    : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
5424      VPatBinaryV_XM_TAIL<intrinsic, instruction>,
5425      VPatBinaryV_IM_TAIL<intrinsic, instruction>;
5426
5427multiclass VPatBinaryM_VM_XM_IM<string intrinsic, string instruction>
5428    : VPatBinaryV_VM<intrinsic, instruction, CarryOut=1>,
5429      VPatBinaryV_XM<intrinsic, instruction, CarryOut=1>,
5430      VPatBinaryV_IM<intrinsic, instruction, CarryOut=1>;
5431
5432multiclass VPatBinaryM_V_X_I<string intrinsic, string instruction>
5433    : VPatBinaryV_V<intrinsic, instruction>,
5434      VPatBinaryV_X<intrinsic, instruction>,
5435      VPatBinaryV_I<intrinsic, instruction>;
5436
5437multiclass VPatBinaryV_VM_XM<string intrinsic, string instruction>
5438    : VPatBinaryV_VM_TAIL<intrinsic, instruction>,
5439      VPatBinaryV_XM_TAIL<intrinsic, instruction>;
5440
5441multiclass VPatBinaryM_VM_XM<string intrinsic, string instruction>
5442    : VPatBinaryV_VM<intrinsic, instruction, CarryOut=1>,
5443      VPatBinaryV_XM<intrinsic, instruction, CarryOut=1>;
5444
5445multiclass VPatBinaryM_V_X<string intrinsic, string instruction>
5446    : VPatBinaryV_V<intrinsic, instruction>,
5447      VPatBinaryV_X<intrinsic, instruction>;
5448
5449multiclass VPatTernaryWithPolicy<string intrinsic,
5450                                 string inst,
5451                                 string kind,
5452                                 ValueType result_type,
5453                                 ValueType op1_type,
5454                                 ValueType op2_type,
5455                                 ValueType mask_type,
5456                                 int sew,
5457                                 LMULInfo vlmul,
5458                                 VReg result_reg_class,
5459                                 RegisterClass op1_reg_class,
5460                                 DAGOperand op2_kind> {
5461  def : VPatTernaryNoMaskWithPolicy<intrinsic, inst, kind, result_type, op1_type,
5462                                    op2_type, sew, vlmul, result_reg_class,
5463                                    op1_reg_class, op2_kind>;
5464  def : VPatTernaryMaskPolicy<intrinsic, inst, kind, result_type, op1_type, op2_type,
5465                              mask_type, sew, vlmul, result_reg_class, op1_reg_class,
5466                              op2_kind>;
5467}
5468
5469multiclass VPatTernaryWithPolicyRoundingMode<string intrinsic,
5470                                             string inst,
5471                                             string kind,
5472                                             ValueType result_type,
5473                                             ValueType op1_type,
5474                                             ValueType op2_type,
5475                                             ValueType mask_type,
5476                                             int sew,
5477                                             LMULInfo vlmul,
5478                                             VReg result_reg_class,
5479                                             RegisterClass op1_reg_class,
5480                                             DAGOperand op2_kind,
5481                                             bit isSEWAware = 0> {
5482  def : VPatTernaryNoMaskWithPolicyRoundingMode<intrinsic, inst, kind, result_type,
5483                                                op1_type, op2_type, sew, vlmul,
5484                                                result_reg_class, op1_reg_class,
5485                                                op2_kind, isSEWAware>;
5486  def : VPatTernaryMaskPolicyRoundingMode<intrinsic, inst, kind, result_type, op1_type,
5487                                                op2_type, mask_type, sew, vlmul,
5488                                                result_reg_class, op1_reg_class,
5489                                                op2_kind, isSEWAware>;
5490}
5491
5492multiclass VPatTernaryTU<string intrinsic,
5493                         string inst,
5494                         string kind,
5495                         ValueType result_type,
5496                         ValueType op1_type,
5497                         ValueType op2_type,
5498                         ValueType mask_type,
5499                         int log2sew,
5500                         LMULInfo vlmul,
5501                         VReg result_reg_class,
5502                         RegisterClass op1_reg_class,
5503                         DAGOperand op2_kind> {
5504  def : VPatTernaryNoMaskTU<intrinsic, inst, kind, result_type, op1_type,
5505                            op2_type, log2sew, vlmul, result_reg_class,
5506                            op1_reg_class, op2_kind>;
5507  def : VPatTernaryMaskTU<intrinsic, inst, kind, result_type, op1_type,
5508                          op2_type, mask_type, log2sew, vlmul,
5509                          result_reg_class, op1_reg_class, op2_kind>;
5510}
5511
5512multiclass VPatTernaryTURoundingMode<string intrinsic,
5513                                     string inst,
5514                                     string kind,
5515                                     ValueType result_type,
5516                                     ValueType op1_type,
5517                                     ValueType op2_type,
5518                                     ValueType mask_type,
5519                                     int log2sew,
5520                                     LMULInfo vlmul,
5521                                     VReg result_reg_class,
5522                                     RegisterClass op1_reg_class,
5523                                     DAGOperand op2_kind> {
5524  def : VPatTernaryNoMaskTURoundingMode<intrinsic, inst, kind, result_type, op1_type,
5525                            op2_type, log2sew, vlmul, result_reg_class,
5526                            op1_reg_class, op2_kind>;
5527  def : VPatTernaryMaskTURoundingMode<intrinsic, inst, kind, result_type, op1_type,
5528                          op2_type, mask_type, log2sew, vlmul,
5529                          result_reg_class, op1_reg_class, op2_kind>;
5530}
5531
5532multiclass VPatTernaryV_VV_AAXA<string intrinsic, string instruction,
5533                                list<VTypeInfo> vtilist> {
5534  foreach vti = vtilist in
5535    let Predicates = GetVTypePredicates<vti>.Predicates in
5536    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
5537                                 vti.Vector, vti.Vector, vti.Vector, vti.Mask,
5538                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5539                                 vti.RegClass, vti.RegClass>;
5540}
5541
5542multiclass VPatTernaryV_VV_AAXA_RM<string intrinsic, string instruction,
5543                                list<VTypeInfo> vtilist, bit isSEWAware = 0> {
5544  foreach vti = vtilist in
5545    let Predicates = GetVTypePredicates<vti>.Predicates in
5546    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction, "VV",
5547                                             vti.Vector, vti.Vector, vti.Vector, vti.Mask,
5548                                             vti.Log2SEW, vti.LMul, vti.RegClass,
5549                                             vti.RegClass, vti.RegClass, isSEWAware>;
5550}
5551
5552multiclass VPatTernaryV_VX<string intrinsic, string instruction,
5553                           list<VTypeInfo> vtilist> {
5554  foreach vti = vtilist in
5555    let Predicates = GetVTypePredicates<vti>.Predicates in
5556    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VX",
5557                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
5558                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5559                                 vti.RegClass, GPR>;
5560}
5561
5562multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
5563                           list<VTypeInfo> vtilist> {
5564  foreach vti = vtilist in
5565    let Predicates = GetVTypePredicates<vti>.Predicates in
5566    defm : VPatTernaryWithPolicy<intrinsic, instruction,
5567                                 "V"#vti.ScalarSuffix,
5568                                 vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
5569                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5570                                 vti.ScalarRegClass, vti.RegClass>;
5571}
5572
5573multiclass VPatTernaryV_VX_AAXA_RM<string intrinsic, string instruction,
5574                           list<VTypeInfo> vtilist, bit isSEWAware = 0> {
5575  foreach vti = vtilist in
5576    let Predicates = GetVTypePredicates<vti>.Predicates in
5577    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction,
5578                                             "V"#vti.ScalarSuffix,
5579                                             vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
5580                                             vti.Log2SEW, vti.LMul, vti.RegClass,
5581                                             vti.ScalarRegClass, vti.RegClass, isSEWAware>;
5582}
5583
5584multiclass VPatTernaryV_VI<string intrinsic, string instruction,
5585                           list<VTypeInfo> vtilist, Operand Imm_type> {
5586  foreach vti = vtilist in
5587    let Predicates = GetVTypePredicates<vti>.Predicates in
5588    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VI",
5589                                 vti.Vector, vti.Vector, XLenVT, vti.Mask,
5590                                 vti.Log2SEW, vti.LMul, vti.RegClass,
5591                                 vti.RegClass, Imm_type>;
5592}
5593
5594multiclass VPatTernaryW_VV<string intrinsic, string instruction,
5595                           list<VTypeInfoToWide> vtilist> {
5596  foreach vtiToWti = vtilist in {
5597    defvar vti = vtiToWti.Vti;
5598    defvar wti = vtiToWti.Wti;
5599    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5600                                 GetVTypePredicates<wti>.Predicates) in
5601    defm : VPatTernaryWithPolicy<intrinsic, instruction, "VV",
5602                                 wti.Vector, vti.Vector, vti.Vector,
5603                                 vti.Mask, vti.Log2SEW, vti.LMul,
5604                                 wti.RegClass, vti.RegClass, vti.RegClass>;
5605  }
5606}
5607
5608multiclass VPatTernaryW_VV_RM<string intrinsic, string instruction,
5609                           list<VTypeInfoToWide> vtilist, bit isSEWAware = 0> {
5610  foreach vtiToWti = vtilist in {
5611    defvar vti = vtiToWti.Vti;
5612    defvar wti = vtiToWti.Wti;
5613    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5614                                 GetVTypePredicates<wti>.Predicates) in
5615    defm : VPatTernaryWithPolicyRoundingMode<intrinsic, instruction, "VV",
5616                                             wti.Vector, vti.Vector, vti.Vector,
5617                                             vti.Mask, vti.Log2SEW, vti.LMul,
5618                                             wti.RegClass, vti.RegClass,
5619                                             vti.RegClass, isSEWAware>;
5620  }
5621}
5622
5623multiclass VPatTernaryW_VX<string intrinsic, string instruction,
5624                           list<VTypeInfoToWide> vtilist> {
5625  foreach vtiToWti = vtilist in {
5626    defvar vti = vtiToWti.Vti;
5627    defvar wti = vtiToWti.Wti;
5628    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5629                                 GetVTypePredicates<wti>.Predicates) in
5630    defm : VPatTernaryWithPolicy<intrinsic, instruction,
5631                                 "V"#vti.ScalarSuffix,
5632                                 wti.Vector, vti.Scalar, vti.Vector,
5633                                 vti.Mask, vti.Log2SEW, vti.LMul,
5634                                 wti.RegClass, vti.ScalarRegClass, vti.RegClass>;
5635  }
5636}
5637
5638multiclass
5639    VPatTernaryW_VX_RM<string intrinsic, string instruction,
5640                       list<VTypeInfoToWide> vtilist, bit isSEWAware = 0> {
5641  foreach vtiToWti = vtilist in {
5642    defvar vti = vtiToWti.Vti;
5643    defvar wti = vtiToWti.Wti;
5644    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5645                                 GetVTypePredicates<wti>.Predicates) in defm
5646        : VPatTernaryWithPolicyRoundingMode<
5647              intrinsic, instruction, "V" #vti.ScalarSuffix, wti.Vector,
5648              vti.Scalar, vti.Vector, vti.Mask, vti.Log2SEW, vti.LMul,
5649              wti.RegClass, vti.ScalarRegClass, vti.RegClass, isSEWAware>;
5650  }
5651}
5652
5653multiclass VPatTernaryV_VV_VX_AAXA<string intrinsic, string instruction,
5654                              list<VTypeInfo> vtilist>
5655    : VPatTernaryV_VV_AAXA<intrinsic, instruction, vtilist>,
5656      VPatTernaryV_VX_AAXA<intrinsic, instruction, vtilist>;
5657
5658multiclass VPatTernaryV_VV_VX_AAXA_RM<string intrinsic, string instruction,
5659                              list<VTypeInfo> vtilist, bit isSEWAware = 0>
5660    : VPatTernaryV_VV_AAXA_RM<intrinsic, instruction, vtilist, isSEWAware>,
5661      VPatTernaryV_VX_AAXA_RM<intrinsic, instruction, vtilist, isSEWAware>;
5662
5663multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction,
5664                              list<VTypeInfo> vtilist, Operand Imm_type>
5665    : VPatTernaryV_VX<intrinsic, instruction, vtilist>,
5666      VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
5667
5668
5669multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
5670                                list<VTypeInfo> vtilist>
5671    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
5672      VPatBinaryM_VX<intrinsic, instruction, vtilist>,
5673      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
5674
5675multiclass VPatTernaryW_VV_VX<string intrinsic, string instruction,
5676                              list<VTypeInfoToWide> vtilist>
5677    : VPatTernaryW_VV<intrinsic, instruction, vtilist>,
5678      VPatTernaryW_VX<intrinsic, instruction, vtilist>;
5679
5680multiclass VPatTernaryW_VV_VX_RM<string intrinsic, string instruction,
5681                              list<VTypeInfoToWide> vtilist, bit isSEWAware = 1>
5682    : VPatTernaryW_VV_RM<intrinsic, instruction, vtilist, isSEWAware>,
5683      VPatTernaryW_VX_RM<intrinsic, instruction, vtilist, isSEWAware>;
5684
5685multiclass VPatBinaryM_VV_VX<string intrinsic, string instruction,
5686                             list<VTypeInfo> vtilist>
5687    : VPatBinaryM_VV<intrinsic, instruction, vtilist>,
5688      VPatBinaryM_VX<intrinsic, instruction, vtilist>;
5689
5690multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
5691                             list<VTypeInfo> vtilist>
5692    : VPatBinaryM_VX<intrinsic, instruction, vtilist>,
5693      VPatBinaryM_VI<intrinsic, instruction, vtilist>;
5694
5695multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
5696                                    list<VTypeInfo> vtilist, Operand ImmType>
5697    : VPatBinaryV_VV_INT<intrinsic#"_vv", instruction, vtilist>,
5698      VPatBinaryV_VX_INT<intrinsic#"_vx", instruction, vtilist>,
5699      VPatBinaryV_VI<intrinsic#"_vx", instruction, vtilist, ImmType>;
5700
5701multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {
5702  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in {
5703    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
5704    let Predicates = GetVTypePredicates<vti>.Predicates in
5705    defm : VPatTernaryTU<intrinsic, instruction, "VS",
5706                         vectorM1.Vector, vti.Vector,
5707                         vectorM1.Vector, vti.Mask,
5708                         vti.Log2SEW, vti.LMul,
5709                         VR, vti.RegClass, VR>;
5710  }
5711  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in {
5712    let Predicates = GetVTypePredicates<gvti>.Predicates in
5713    defm : VPatTernaryTU<intrinsic, instruction, "VS",
5714                         gvti.VectorM1, gvti.Vector,
5715                         gvti.VectorM1, gvti.Mask,
5716                         gvti.Log2SEW, gvti.LMul,
5717                         VR, gvti.RegClass, VR>;
5718  }
5719}
5720
5721multiclass VPatReductionV_VS_RM<string intrinsic, string instruction, bit IsFloat = 0> {
5722  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in {
5723    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
5724    let Predicates = GetVTypePredicates<vti>.Predicates in
5725    defm : VPatTernaryTURoundingMode<intrinsic, instruction, "VS",
5726                                     vectorM1.Vector, vti.Vector,
5727                                     vectorM1.Vector, vti.Mask,
5728                                     vti.Log2SEW, vti.LMul,
5729                                     VR, vti.RegClass, VR>;
5730  }
5731  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in {
5732    let Predicates = GetVTypePredicates<gvti>.Predicates in
5733    defm : VPatTernaryTURoundingMode<intrinsic, instruction, "VS",
5734                                     gvti.VectorM1, gvti.Vector,
5735                                     gvti.VectorM1, gvti.Mask,
5736                                     gvti.Log2SEW, gvti.LMul,
5737                                     VR, gvti.RegClass, VR>;
5738  }
5739}
5740
5741multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat = 0> {
5742  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in {
5743    defvar wtiSEW = !mul(vti.SEW, 2);
5744    if !le(wtiSEW, 64) then {
5745      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
5746      let Predicates = GetVTypePredicates<vti>.Predicates in
5747      defm : VPatTernaryTU<intrinsic, instruction, "VS",
5748                           wtiM1.Vector, vti.Vector,
5749                           wtiM1.Vector, vti.Mask,
5750                           vti.Log2SEW, vti.LMul,
5751                           wtiM1.RegClass, vti.RegClass,
5752                           wtiM1.RegClass>;
5753    }
5754  }
5755}
5756
5757multiclass VPatReductionW_VS_RM<string intrinsic, string instruction, bit IsFloat = 0> {
5758  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in {
5759    defvar wtiSEW = !mul(vti.SEW, 2);
5760    if !le(wtiSEW, 64) then {
5761      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
5762      let Predicates = GetVTypePredicates<vti>.Predicates in
5763      defm : VPatTernaryTURoundingMode<intrinsic, instruction, "VS",
5764                                       wtiM1.Vector, vti.Vector,
5765                                       wtiM1.Vector, vti.Mask,
5766                                       vti.Log2SEW, vti.LMul,
5767                                       wtiM1.RegClass, vti.RegClass,
5768                                       wtiM1.RegClass>;
5769    }
5770  }
5771}
5772
5773multiclass VPatConversionVI_VF<string intrinsic,
5774                               string instruction> {
5775  foreach fvti = AllFloatVectors in {
5776    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
5777    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5778                                 GetVTypePredicates<ivti>.Predicates) in
5779    defm : VPatConversion<intrinsic, instruction, "V",
5780                          ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
5781                          fvti.LMul, ivti.RegClass, fvti.RegClass>;
5782  }
5783}
5784
5785multiclass VPatConversionVI_VF_RM<string intrinsic,
5786                                  string instruction> {
5787  foreach fvti = AllFloatVectors in {
5788    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
5789    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5790                                 GetVTypePredicates<ivti>.Predicates) in
5791    defm : VPatConversionRoundingMode<intrinsic, instruction, "V",
5792                                      ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
5793                                      fvti.LMul, ivti.RegClass, fvti.RegClass>;
5794  }
5795}
5796
5797multiclass VPatConversionVI_VF_RTZ<string intrinsic,
5798                                           string instruction> {
5799  foreach fvti = AllFloatVectors in {
5800    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
5801    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5802                                 GetVTypePredicates<ivti>.Predicates) in
5803    defm : VPatConversionRTZ<intrinsic, instruction, "V",
5804                                              ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
5805                                              fvti.LMul, ivti.RegClass, fvti.RegClass>;
5806  }
5807}
5808
5809multiclass VPatConversionVF_VI_RM<string intrinsic, string instruction,
5810                                  bit isSEWAware = 0> {
5811  foreach fvti = AllFloatVectors in {
5812    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
5813    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5814                                 GetVTypePredicates<ivti>.Predicates) in
5815    defm : VPatConversionRoundingMode<intrinsic, instruction, "V",
5816                                      fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW,
5817                                      ivti.LMul, fvti.RegClass, ivti.RegClass,
5818                                      isSEWAware>;
5819  }
5820}
5821
5822multiclass VPatConversionWI_VF<string intrinsic, string instruction> {
5823  foreach fvtiToFWti = AllWidenableFloatVectors in {
5824    defvar fvti = fvtiToFWti.Vti;
5825    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
5826    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5827                                 GetVTypePredicates<iwti>.Predicates) in
5828    defm : VPatConversion<intrinsic, instruction, "V",
5829                          iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
5830                          fvti.LMul, iwti.RegClass, fvti.RegClass>;
5831  }
5832}
5833
5834multiclass VPatConversionWI_VF_RM<string intrinsic, string instruction> {
5835  foreach fvtiToFWti = AllWidenableFloatVectors in {
5836    defvar fvti = fvtiToFWti.Vti;
5837    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
5838    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5839                                 GetVTypePredicates<iwti>.Predicates) in
5840    defm : VPatConversionRoundingMode<intrinsic, instruction, "V",
5841                                      iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
5842                                      fvti.LMul, iwti.RegClass, fvti.RegClass>;
5843  }
5844}
5845
5846multiclass VPatConversionWI_VF_RTZ<string intrinsic, string instruction> {
5847  foreach fvtiToFWti = AllWidenableFloatVectors in {
5848    defvar fvti = fvtiToFWti.Vti;
5849    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
5850    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5851                                 GetVTypePredicates<iwti>.Predicates) in
5852    defm : VPatConversionRTZ<intrinsic, instruction, "V",
5853                             iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
5854                             fvti.LMul, iwti.RegClass, fvti.RegClass>;
5855  }
5856}
5857
5858multiclass VPatConversionWF_VI<string intrinsic, string instruction,
5859                               bit isSEWAware = 0> {
5860  foreach vtiToWti = AllWidenableIntToFloatVectors in {
5861    defvar vti = vtiToWti.Vti;
5862    defvar fwti = vtiToWti.Wti;
5863    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5864                                 GetVTypePredicates<fwti>.Predicates) in
5865    defm : VPatConversion<intrinsic, instruction, "V",
5866                          fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
5867                          vti.LMul, fwti.RegClass, vti.RegClass, isSEWAware>;
5868  }
5869}
5870
5871multiclass VPatConversionWF_VF<string intrinsic, string instruction,
5872                               bit isSEWAware = 0> {
5873  foreach fvtiToFWti = AllWidenableFloatVectors in {
5874    defvar fvti = fvtiToFWti.Vti;
5875    defvar fwti = fvtiToFWti.Wti;
5876    // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable.
5877    let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal],
5878                         !listconcat(GetVTypePredicates<fvti>.Predicates,
5879                                     GetVTypePredicates<fwti>.Predicates)) in
5880      defm : VPatConversion<intrinsic, instruction, "V",
5881                            fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
5882                            fvti.LMul, fwti.RegClass, fvti.RegClass, isSEWAware>;
5883  }
5884}
5885
5886multiclass VPatConversionWF_VF_BF<string intrinsic, string instruction,
5887                                  bit isSEWAware = 0> {
5888  foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in
5889  {
5890    defvar fvti = fvtiToFWti.Vti;
5891    defvar fwti = fvtiToFWti.Wti;
5892    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5893                                 GetVTypePredicates<fwti>.Predicates) in
5894    defm : VPatConversion<intrinsic, instruction, "V",
5895                          fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
5896                          fvti.LMul, fwti.RegClass, fvti.RegClass, isSEWAware>;
5897  }
5898}
5899
5900multiclass VPatConversionVI_WF<string intrinsic, string instruction> {
5901  foreach vtiToWti = AllWidenableIntToFloatVectors in {
5902    defvar vti = vtiToWti.Vti;
5903    defvar fwti = vtiToWti.Wti;
5904    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5905                                 GetVTypePredicates<fwti>.Predicates) in
5906    defm : VPatConversion<intrinsic, instruction, "W",
5907                          vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
5908                          vti.LMul, vti.RegClass, fwti.RegClass>;
5909  }
5910}
5911
5912multiclass VPatConversionVI_WF_RM <string intrinsic, string instruction> {
5913  foreach vtiToWti = AllWidenableIntToFloatVectors in {
5914    defvar vti = vtiToWti.Vti;
5915    defvar fwti = vtiToWti.Wti;
5916    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5917                                 GetVTypePredicates<fwti>.Predicates) in
5918    defm : VPatConversionRoundingMode<intrinsic, instruction, "W",
5919                                      vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
5920                                      vti.LMul, vti.RegClass, fwti.RegClass>;
5921  }
5922}
5923
5924multiclass VPatConversionVI_WF_RTZ <string intrinsic, string instruction> {
5925  foreach vtiToWti = AllWidenableIntToFloatVectors in {
5926    defvar vti = vtiToWti.Vti;
5927    defvar fwti = vtiToWti.Wti;
5928    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
5929                                 GetVTypePredicates<fwti>.Predicates) in
5930    defm : VPatConversionRTZ<intrinsic, instruction, "W",
5931                             vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
5932                             vti.LMul, vti.RegClass, fwti.RegClass>;
5933  }
5934}
5935
5936multiclass VPatConversionVF_WI_RM <string intrinsic, string instruction,
5937                                   bit isSEWAware = 0> {
5938  foreach fvtiToFWti = AllWidenableFloatVectors in {
5939    defvar fvti = fvtiToFWti.Vti;
5940    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
5941    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5942                                 GetVTypePredicates<iwti>.Predicates) in
5943    defm : VPatConversionRoundingMode<intrinsic, instruction, "W",
5944                                      fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW,
5945                                      fvti.LMul, fvti.RegClass, iwti.RegClass,
5946                                      isSEWAware>;
5947  }
5948}
5949
5950multiclass VPatConversionVF_WF<string intrinsic, string instruction,
5951                               bit isSEWAware = 0> {
5952  foreach fvtiToFWti = AllWidenableFloatVectors in {
5953    defvar fvti = fvtiToFWti.Vti;
5954    defvar fwti = fvtiToFWti.Wti;
5955    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5956                                 GetVTypePredicates<fwti>.Predicates) in
5957    defm : VPatConversion<intrinsic, instruction, "W",
5958                          fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
5959                          fvti.LMul, fvti.RegClass, fwti.RegClass, isSEWAware>;
5960  }
5961}
5962
5963multiclass VPatConversionVF_WF_RM<string intrinsic, string instruction,
5964                                   list<VTypeInfoToWide> wlist = AllWidenableFloatVectors,
5965                                   bit isSEWAware = 0> {
5966  foreach fvtiToFWti = wlist in {
5967    defvar fvti = fvtiToFWti.Vti;
5968    defvar fwti = fvtiToFWti.Wti;
5969    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5970                                 GetVTypePredicates<fwti>.Predicates) in
5971    defm : VPatConversionRoundingMode<intrinsic, instruction, "W",
5972                                      fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
5973                                      fvti.LMul, fvti.RegClass, fwti.RegClass,
5974                                      isSEWAware>;
5975  }
5976}
5977
5978multiclass VPatConversionVF_WF_RTZ<string intrinsic, string instruction,
5979                                   list<VTypeInfoToWide> wlist = AllWidenableFloatVectors,
5980                                   bit isSEWAware = 0> {
5981  foreach fvtiToFWti = wlist in {
5982    defvar fvti = fvtiToFWti.Vti;
5983    defvar fwti = fvtiToFWti.Wti;
5984    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5985                                 GetVTypePredicates<fwti>.Predicates) in
5986    defm : VPatConversionRTZ<intrinsic, instruction, "W",
5987                             fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
5988                             fvti.LMul, fvti.RegClass, fwti.RegClass, isSEWAware>;
5989  }
5990}
5991
5992multiclass VPatConversionVF_WF_BF_RM<string intrinsic, string instruction,
5993                                     bit isSEWAware = 0> {
5994  foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in {
5995    defvar fvti = fvtiToFWti.Vti;
5996    defvar fwti = fvtiToFWti.Wti;
5997    let Predicates = !listconcat(GetVTypePredicates<fvti>.Predicates,
5998                                 GetVTypePredicates<fwti>.Predicates) in
5999    defm : VPatConversionRoundingMode<intrinsic, instruction, "W",
6000                                      fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
6001                                      fvti.LMul, fvti.RegClass, fwti.RegClass,
6002                                      isSEWAware>;
6003  }
6004}
6005
6006multiclass VPatCompare_VI<string intrinsic, string inst,
6007                          ImmLeaf ImmType> {
6008  foreach vti = AllIntegerVectors in {
6009    defvar Intr = !cast<Intrinsic>(intrinsic);
6010    defvar Pseudo = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX);
6011    let Predicates = GetVTypePredicates<vti>.Predicates in
6012    def : Pat<(vti.Mask (Intr (vti.Vector vti.RegClass:$rs1),
6013                              (vti.Scalar ImmType:$rs2),
6014                              VLOpFrag)),
6015              (Pseudo vti.RegClass:$rs1, (DecImm ImmType:$rs2),
6016                      GPR:$vl, vti.Log2SEW)>;
6017    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
6018    defvar PseudoMask = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX#"_MASK");
6019    let Predicates = GetVTypePredicates<vti>.Predicates in
6020    def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$passthru),
6021                                  (vti.Vector vti.RegClass:$rs1),
6022                                  (vti.Scalar ImmType:$rs2),
6023                                  (vti.Mask V0),
6024                                  VLOpFrag)),
6025              (PseudoMask VR:$passthru, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
6026                          (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
6027  }
6028}
6029
6030//===----------------------------------------------------------------------===//
6031// Pseudo instructions
6032//===----------------------------------------------------------------------===//
6033
6034let Predicates = [HasVInstructions] in {
6035
6036//===----------------------------------------------------------------------===//
6037// Pseudo Instructions for CodeGen
6038//===----------------------------------------------------------------------===//
6039
6040let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
6041  def PseudoReadVLENB : Pseudo<(outs GPR:$rd), (ins),
6042                               [(set GPR:$rd, (riscv_read_vlenb))]>,
6043                        PseudoInstExpansion<(CSRRS GPR:$rd, SysRegVLENB.Encoding, X0)>,
6044                        Sched<[WriteRdVLENB]>;
6045}
6046
6047let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1,
6048    Uses = [VL] in
6049def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>,
6050                   PseudoInstExpansion<(CSRRS GPR:$rd, SysRegVL.Encoding, X0)>;
6051
6052foreach lmul = MxList in {
6053  foreach nf = NFSet<lmul>.L in {
6054    defvar vreg = SegRegClass<lmul, nf>.RC;
6055    let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1,
6056        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
6057      def "PseudoVSPILL" # nf # "_" # lmul.MX :
6058        Pseudo<(outs), (ins vreg:$rs1, GPR:$rs2), []>;
6059    }
6060    let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1,
6061        Size = !mul(4, !sub(!mul(nf, 2), 1)) in {
6062      def "PseudoVRELOAD" # nf # "_" # lmul.MX :
6063        Pseudo<(outs vreg:$rs1), (ins GPR:$rs2), []>;
6064    }
6065  }
6066}
6067
6068//===----------------------------------------------------------------------===//
6069// 6. Configuration-Setting Instructions
6070//===----------------------------------------------------------------------===//
6071
6072// Pseudos.
6073let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
6074// Due to rs1=X0 having special meaning, we need a GPRNoX0 register class for
6075// the when we aren't using one of the special X0 encodings. Otherwise it could
6076// be accidentally be made X0 by MachineIR optimizations. To satisfy the
6077// verifier, we also need a GPRX0 instruction for the special encodings.
6078def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPRNoX0:$rs1, VTypeIOp11:$vtypei), []>,
6079                    PseudoInstExpansion<(VSETVLI GPR:$rd, GPR:$rs1, VTypeIOp11:$vtypei)>,
6080                    Sched<[WriteVSETVLI, ReadVSETVLI]>;
6081def PseudoVSETVLIX0 : Pseudo<(outs GPR:$rd), (ins GPRX0:$rs1, VTypeIOp11:$vtypei), []>,
6082                      PseudoInstExpansion<(VSETVLI GPR:$rd, GPR:$rs1, VTypeIOp11:$vtypei)>,
6083                      Sched<[WriteVSETVLI, ReadVSETVLI]>;
6084def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp10:$vtypei), []>,
6085                     PseudoInstExpansion<(VSETIVLI GPR:$rd, uimm5:$rs1, VTypeIOp10:$vtypei)>,
6086                     Sched<[WriteVSETIVLI]>;
6087}
6088
6089//===----------------------------------------------------------------------===//
6090// 7. Vector Loads and Stores
6091//===----------------------------------------------------------------------===//
6092
6093//===----------------------------------------------------------------------===//
6094// 7.4 Vector Unit-Stride Instructions
6095//===----------------------------------------------------------------------===//
6096
6097// Pseudos Unit-Stride Loads and Stores
6098defm PseudoVL : VPseudoUSLoad;
6099defm PseudoVS : VPseudoUSStore;
6100
6101defm PseudoVLM : VPseudoLoadMask;
6102defm PseudoVSM : VPseudoStoreMask;
6103
6104//===----------------------------------------------------------------------===//
6105// 7.5 Vector Strided Instructions
6106//===----------------------------------------------------------------------===//
6107
6108// Vector Strided Loads and Stores
6109defm PseudoVLS : VPseudoSLoad;
6110defm PseudoVSS : VPseudoSStore;
6111
6112//===----------------------------------------------------------------------===//
6113// 7.6 Vector Indexed Instructions
6114//===----------------------------------------------------------------------===//
6115
6116// Vector Indexed Loads and Stores
6117defm PseudoVLUX : VPseudoILoad<Ordered=false>;
6118defm PseudoVLOX : VPseudoILoad<Ordered=true>;
6119defm PseudoVSOX : VPseudoIStore<Ordered=true>;
6120defm PseudoVSUX : VPseudoIStore<Ordered=false>;
6121
6122//===----------------------------------------------------------------------===//
6123// 7.7. Unit-stride Fault-Only-First Loads
6124//===----------------------------------------------------------------------===//
6125
6126// vleff may update VL register
6127let Defs = [VL] in
6128defm PseudoVL : VPseudoFFLoad;
6129
6130//===----------------------------------------------------------------------===//
6131// 7.8. Vector Load/Store Segment Instructions
6132//===----------------------------------------------------------------------===//
6133defm PseudoVLSEG : VPseudoUSSegLoad;
6134defm PseudoVLSSEG : VPseudoSSegLoad;
6135defm PseudoVLOXSEG : VPseudoISegLoad<Ordered=true>;
6136defm PseudoVLUXSEG : VPseudoISegLoad<Ordered=false>;
6137defm PseudoVSSEG : VPseudoUSSegStore;
6138defm PseudoVSSSEG : VPseudoSSegStore;
6139defm PseudoVSOXSEG : VPseudoISegStore<Ordered=true>;
6140defm PseudoVSUXSEG : VPseudoISegStore<Ordered=false>;
6141
6142// vlseg<nf>e<eew>ff.v may update VL register
6143let Defs = [VL] in {
6144defm PseudoVLSEG : VPseudoUSSegLoadFF;
6145}
6146
6147//===----------------------------------------------------------------------===//
6148// 11. Vector Integer Arithmetic Instructions
6149//===----------------------------------------------------------------------===//
6150
6151//===----------------------------------------------------------------------===//
6152// 11.1. Vector Single-Width Integer Add and Subtract
6153//===----------------------------------------------------------------------===//
6154defm PseudoVADD   : VPseudoVALU_VV_VX_VI<Commutable=1>;
6155defm PseudoVSUB   : VPseudoVALU_VV_VX;
6156defm PseudoVRSUB  : VPseudoVALU_VX_VI;
6157
6158foreach vti = AllIntegerVectors in {
6159  // Match vrsub with 2 vector operands to vsub.vv by swapping operands. This
6160  // Occurs when legalizing vrsub.vx intrinsics for i64 on RV32 since we need
6161  // to use a more complex splat sequence. Add the pattern for all VTs for
6162  // consistency.
6163  let Predicates = GetVTypePredicates<vti>.Predicates in {
6164    def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$passthru),
6165                                           (vti.Vector vti.RegClass:$rs2),
6166                                           (vti.Vector vti.RegClass:$rs1),
6167                                           VLOpFrag)),
6168              (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX)
6169                                                        vti.RegClass:$passthru,
6170                                                        vti.RegClass:$rs1,
6171                                                        vti.RegClass:$rs2,
6172                                                        GPR:$vl,
6173                                                        vti.Log2SEW, TU_MU)>;
6174    def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$passthru),
6175                                                (vti.Vector vti.RegClass:$rs2),
6176                                                (vti.Vector vti.RegClass:$rs1),
6177                                                (vti.Mask V0),
6178                                                VLOpFrag,
6179                                                (XLenVT timm:$policy))),
6180              (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
6181                                                        vti.RegClass:$passthru,
6182                                                        vti.RegClass:$rs1,
6183                                                        vti.RegClass:$rs2,
6184                                                        (vti.Mask V0),
6185                                                        GPR:$vl,
6186                                                        vti.Log2SEW,
6187                                                        (XLenVT timm:$policy))>;
6188
6189    // Match VSUB with a small immediate to vadd.vi by negating the immediate.
6190    def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$passthru),
6191                                          (vti.Vector vti.RegClass:$rs1),
6192                                          (vti.Scalar simm5_plus1:$rs2),
6193                                          VLOpFrag)),
6194              (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX)
6195                                                      vti.RegClass:$passthru,
6196                                                      vti.RegClass:$rs1,
6197                                                      (NegImm simm5_plus1:$rs2),
6198                                                      GPR:$vl,
6199                                                      vti.Log2SEW, TU_MU)>;
6200    def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$passthru),
6201                                               (vti.Vector vti.RegClass:$rs1),
6202                                               (vti.Scalar simm5_plus1:$rs2),
6203                                               (vti.Mask V0),
6204                                               VLOpFrag,
6205                                               (XLenVT timm:$policy))),
6206              (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
6207                                                        vti.RegClass:$passthru,
6208                                                        vti.RegClass:$rs1,
6209                                                        (NegImm simm5_plus1:$rs2),
6210                                                        (vti.Mask V0),
6211                                                        GPR:$vl,
6212                                                        vti.Log2SEW,
6213                                                        (XLenVT timm:$policy))>;
6214  }
6215}
6216
6217//===----------------------------------------------------------------------===//
6218// 11.2. Vector Widening Integer Add/Subtract
6219//===----------------------------------------------------------------------===//
6220defm PseudoVWADDU : VPseudoVWALU_VV_VX<Commutable=1>;
6221defm PseudoVWSUBU : VPseudoVWALU_VV_VX;
6222defm PseudoVWADD  : VPseudoVWALU_VV_VX<Commutable=1>;
6223defm PseudoVWSUB  : VPseudoVWALU_VV_VX;
6224defm PseudoVWADDU : VPseudoVWALU_WV_WX;
6225defm PseudoVWSUBU : VPseudoVWALU_WV_WX;
6226defm PseudoVWADD  : VPseudoVWALU_WV_WX;
6227defm PseudoVWSUB  : VPseudoVWALU_WV_WX;
6228
6229//===----------------------------------------------------------------------===//
6230// 11.3. Vector Integer Extension
6231//===----------------------------------------------------------------------===//
6232defm PseudoVZEXT_VF2 : PseudoVEXT_VF2;
6233defm PseudoVZEXT_VF4 : PseudoVEXT_VF4;
6234defm PseudoVZEXT_VF8 : PseudoVEXT_VF8;
6235defm PseudoVSEXT_VF2 : PseudoVEXT_VF2;
6236defm PseudoVSEXT_VF4 : PseudoVEXT_VF4;
6237defm PseudoVSEXT_VF8 : PseudoVEXT_VF8;
6238
6239//===----------------------------------------------------------------------===//
6240// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
6241//===----------------------------------------------------------------------===//
6242defm PseudoVADC  : VPseudoVCALU_VM_XM_IM;
6243defm PseudoVMADC : VPseudoVCALUM_VM_XM_IM;
6244defm PseudoVMADC : VPseudoVCALUM_V_X_I;
6245
6246defm PseudoVSBC  : VPseudoVCALU_VM_XM;
6247defm PseudoVMSBC : VPseudoVCALUM_VM_XM;
6248defm PseudoVMSBC : VPseudoVCALUM_V_X;
6249
6250//===----------------------------------------------------------------------===//
6251// 11.5. Vector Bitwise Logical Instructions
6252//===----------------------------------------------------------------------===//
6253defm PseudoVAND : VPseudoVALU_VV_VX_VI<Commutable=1>;
6254defm PseudoVOR  : VPseudoVALU_VV_VX_VI<Commutable=1>;
6255defm PseudoVXOR : VPseudoVALU_VV_VX_VI<Commutable=1>;
6256
6257//===----------------------------------------------------------------------===//
6258// 11.6. Vector Single-Width Bit Shift Instructions
6259//===----------------------------------------------------------------------===//
6260defm PseudoVSLL : VPseudoVSHT_VV_VX_VI;
6261defm PseudoVSRL : VPseudoVSHT_VV_VX_VI;
6262defm PseudoVSRA : VPseudoVSHT_VV_VX_VI;
6263
6264//===----------------------------------------------------------------------===//
6265// 11.7. Vector Narrowing Integer Right Shift Instructions
6266//===----------------------------------------------------------------------===//
6267defm PseudoVNSRL : VPseudoVNSHT_WV_WX_WI;
6268defm PseudoVNSRA : VPseudoVNSHT_WV_WX_WI;
6269
6270//===----------------------------------------------------------------------===//
6271// 11.8. Vector Integer Comparison Instructions
6272//===----------------------------------------------------------------------===//
6273defm PseudoVMSEQ  : VPseudoVCMPM_VV_VX_VI<Commutable=1>;
6274defm PseudoVMSNE  : VPseudoVCMPM_VV_VX_VI<Commutable=1>;
6275defm PseudoVMSLTU : VPseudoVCMPM_VV_VX;
6276defm PseudoVMSLT  : VPseudoVCMPM_VV_VX;
6277defm PseudoVMSLEU : VPseudoVCMPM_VV_VX_VI;
6278defm PseudoVMSLE  : VPseudoVCMPM_VV_VX_VI;
6279defm PseudoVMSGTU : VPseudoVCMPM_VX_VI;
6280defm PseudoVMSGT  : VPseudoVCMPM_VX_VI;
6281
6282//===----------------------------------------------------------------------===//
6283// 11.9. Vector Integer Min/Max Instructions
6284//===----------------------------------------------------------------------===//
6285defm PseudoVMINU : VPseudoVMINMAX_VV_VX;
6286defm PseudoVMIN  : VPseudoVMINMAX_VV_VX;
6287defm PseudoVMAXU : VPseudoVMINMAX_VV_VX;
6288defm PseudoVMAX  : VPseudoVMINMAX_VV_VX;
6289
6290//===----------------------------------------------------------------------===//
6291// 11.10. Vector Single-Width Integer Multiply Instructions
6292//===----------------------------------------------------------------------===//
6293defm PseudoVMUL    : VPseudoVMUL_VV_VX<Commutable=1>;
6294defm PseudoVMULH   : VPseudoVMUL_VV_VX<Commutable=1>;
6295defm PseudoVMULHU  : VPseudoVMUL_VV_VX<Commutable=1>;
6296defm PseudoVMULHSU : VPseudoVMUL_VV_VX;
6297
6298//===----------------------------------------------------------------------===//
6299// 11.11. Vector Integer Divide Instructions
6300//===----------------------------------------------------------------------===//
6301defm PseudoVDIVU : VPseudoVDIV_VV_VX;
6302defm PseudoVDIV  : VPseudoVDIV_VV_VX;
6303defm PseudoVREMU : VPseudoVDIV_VV_VX;
6304defm PseudoVREM  : VPseudoVDIV_VV_VX;
6305
6306//===----------------------------------------------------------------------===//
6307// 11.12. Vector Widening Integer Multiply Instructions
6308//===----------------------------------------------------------------------===//
6309defm PseudoVWMUL   : VPseudoVWMUL_VV_VX<Commutable=1>;
6310defm PseudoVWMULU  : VPseudoVWMUL_VV_VX<Commutable=1>;
6311defm PseudoVWMULSU : VPseudoVWMUL_VV_VX;
6312
6313//===----------------------------------------------------------------------===//
6314// 11.13. Vector Single-Width Integer Multiply-Add Instructions
6315//===----------------------------------------------------------------------===//
6316defm PseudoVMACC  : VPseudoVMAC_VV_VX_AAXA;
6317defm PseudoVNMSAC : VPseudoVMAC_VV_VX_AAXA;
6318defm PseudoVMADD  : VPseudoVMAC_VV_VX_AAXA;
6319defm PseudoVNMSUB : VPseudoVMAC_VV_VX_AAXA;
6320
6321//===----------------------------------------------------------------------===//
6322// 11.14. Vector Widening Integer Multiply-Add Instructions
6323//===----------------------------------------------------------------------===//
6324defm PseudoVWMACCU  : VPseudoVWMAC_VV_VX<Commutable=1>;
6325defm PseudoVWMACC   : VPseudoVWMAC_VV_VX<Commutable=1>;
6326defm PseudoVWMACCSU : VPseudoVWMAC_VV_VX;
6327defm PseudoVWMACCUS : VPseudoVWMAC_VX;
6328
6329//===----------------------------------------------------------------------===//
6330// 11.15. Vector Integer Merge Instructions
6331//===----------------------------------------------------------------------===//
6332defm PseudoVMERGE : VPseudoVMRG_VM_XM_IM;
6333
6334//===----------------------------------------------------------------------===//
6335// 11.16. Vector Integer Move Instructions
6336//===----------------------------------------------------------------------===//
6337defm PseudoVMV_V : VPseudoUnaryVMV_V_X_I;
6338
6339//===----------------------------------------------------------------------===//
6340// 12. Vector Fixed-Point Arithmetic Instructions
6341//===----------------------------------------------------------------------===//
6342
6343//===----------------------------------------------------------------------===//
6344// 12.1. Vector Single-Width Saturating Add and Subtract
6345//===----------------------------------------------------------------------===//
6346let Defs = [VXSAT] in {
6347  defm PseudoVSADDU : VPseudoVSALU_VV_VX_VI<Commutable=1>;
6348  defm PseudoVSADD  : VPseudoVSALU_VV_VX_VI<Commutable=1>;
6349  defm PseudoVSSUBU : VPseudoVSALU_VV_VX;
6350  defm PseudoVSSUB  : VPseudoVSALU_VV_VX;
6351}
6352
6353//===----------------------------------------------------------------------===//
6354// 12.2. Vector Single-Width Averaging Add and Subtract
6355//===----------------------------------------------------------------------===//
6356defm PseudoVAADDU : VPseudoVAALU_VV_VX_RM<Commutable=1>;
6357defm PseudoVAADD  : VPseudoVAALU_VV_VX_RM<Commutable=1>;
6358defm PseudoVASUBU : VPseudoVAALU_VV_VX_RM;
6359defm PseudoVASUB  : VPseudoVAALU_VV_VX_RM;
6360
6361//===----------------------------------------------------------------------===//
6362// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
6363//===----------------------------------------------------------------------===//
6364let Defs = [VXSAT] in {
6365  defm PseudoVSMUL : VPseudoVSMUL_VV_VX_RM;
6366}
6367
6368//===----------------------------------------------------------------------===//
6369// 12.4. Vector Single-Width Scaling Shift Instructions
6370//===----------------------------------------------------------------------===//
6371defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI_RM;
6372defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI_RM;
6373
6374//===----------------------------------------------------------------------===//
6375// 12.5. Vector Narrowing Fixed-Point Clip Instructions
6376//===----------------------------------------------------------------------===//
6377let Defs = [VXSAT] in {
6378  defm PseudoVNCLIP  : VPseudoVNCLP_WV_WX_WI_RM;
6379  defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI_RM;
6380}
6381
6382} // Predicates = [HasVInstructions]
6383
6384//===----------------------------------------------------------------------===//
6385// 13. Vector Floating-Point Instructions
6386//===----------------------------------------------------------------------===//
6387
6388let Predicates = [HasVInstructionsAnyF] in {
6389//===----------------------------------------------------------------------===//
6390// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
6391//===----------------------------------------------------------------------===//
6392let mayRaiseFPException = true in {
6393defm PseudoVFADD  : VPseudoVALU_VV_VF_RM;
6394defm PseudoVFSUB  : VPseudoVALU_VV_VF_RM;
6395defm PseudoVFRSUB : VPseudoVALU_VF_RM;
6396}
6397
6398//===----------------------------------------------------------------------===//
6399// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
6400//===----------------------------------------------------------------------===//
6401let mayRaiseFPException = true, hasSideEffects = 0 in {
6402defm PseudoVFWADD : VPseudoVFWALU_VV_VF_RM;
6403defm PseudoVFWSUB : VPseudoVFWALU_VV_VF_RM;
6404defm PseudoVFWADD : VPseudoVFWALU_WV_WF_RM;
6405defm PseudoVFWSUB : VPseudoVFWALU_WV_WF_RM;
6406}
6407
6408//===----------------------------------------------------------------------===//
6409// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
6410//===----------------------------------------------------------------------===//
6411let mayRaiseFPException = true, hasSideEffects = 0 in {
6412defm PseudoVFMUL  : VPseudoVFMUL_VV_VF_RM;
6413defm PseudoVFDIV  : VPseudoVFDIV_VV_VF_RM;
6414defm PseudoVFRDIV : VPseudoVFRDIV_VF_RM;
6415}
6416
6417//===----------------------------------------------------------------------===//
6418// 13.5. Vector Widening Floating-Point Multiply
6419//===----------------------------------------------------------------------===//
6420let mayRaiseFPException = true, hasSideEffects = 0 in {
6421defm PseudoVFWMUL : VPseudoVWMUL_VV_VF_RM;
6422}
6423
6424//===----------------------------------------------------------------------===//
6425// 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
6426//===----------------------------------------------------------------------===//
6427let mayRaiseFPException = true, hasSideEffects = 0 in {
6428defm PseudoVFMACC  : VPseudoVMAC_VV_VF_AAXA_RM;
6429defm PseudoVFNMACC : VPseudoVMAC_VV_VF_AAXA_RM;
6430defm PseudoVFMSAC  : VPseudoVMAC_VV_VF_AAXA_RM;
6431defm PseudoVFNMSAC : VPseudoVMAC_VV_VF_AAXA_RM;
6432defm PseudoVFMADD  : VPseudoVMAC_VV_VF_AAXA_RM;
6433defm PseudoVFNMADD : VPseudoVMAC_VV_VF_AAXA_RM;
6434defm PseudoVFMSUB  : VPseudoVMAC_VV_VF_AAXA_RM;
6435defm PseudoVFNMSUB : VPseudoVMAC_VV_VF_AAXA_RM;
6436}
6437
6438//===----------------------------------------------------------------------===//
6439// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
6440//===----------------------------------------------------------------------===//
6441let mayRaiseFPException = true, hasSideEffects = 0 in {
6442defm PseudoVFWMACC  : VPseudoVWMAC_VV_VF_RM;
6443defm PseudoVFWNMACC : VPseudoVWMAC_VV_VF_RM;
6444defm PseudoVFWMSAC  : VPseudoVWMAC_VV_VF_RM;
6445defm PseudoVFWNMSAC : VPseudoVWMAC_VV_VF_RM;
6446let Predicates = [HasStdExtZvfbfwma] in
6447defm PseudoVFWMACCBF16  : VPseudoVWMAC_VV_VF_BF_RM;
6448}
6449
6450//===----------------------------------------------------------------------===//
6451// 13.8. Vector Floating-Point Square-Root Instruction
6452//===----------------------------------------------------------------------===//
6453let mayRaiseFPException = true, hasSideEffects = 0 in
6454defm PseudoVFSQRT : VPseudoVSQR_V_RM;
6455
6456//===----------------------------------------------------------------------===//
6457// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
6458//===----------------------------------------------------------------------===//
6459let mayRaiseFPException = true in
6460defm PseudoVFRSQRT7 : VPseudoVRCP_V;
6461
6462//===----------------------------------------------------------------------===//
6463// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
6464//===----------------------------------------------------------------------===//
6465let mayRaiseFPException = true, hasSideEffects = 0 in
6466defm PseudoVFREC7 : VPseudoVRCP_V_RM;
6467
6468//===----------------------------------------------------------------------===//
6469// 13.11. Vector Floating-Point Min/Max Instructions
6470//===----------------------------------------------------------------------===//
6471let mayRaiseFPException = true in {
6472defm PseudoVFMIN : VPseudoVMAX_VV_VF;
6473defm PseudoVFMAX : VPseudoVMAX_VV_VF;
6474}
6475
6476//===----------------------------------------------------------------------===//
6477// 13.12. Vector Floating-Point Sign-Injection Instructions
6478//===----------------------------------------------------------------------===//
6479defm PseudoVFSGNJ  : VPseudoVSGNJ_VV_VF;
6480defm PseudoVFSGNJN : VPseudoVSGNJ_VV_VF;
6481defm PseudoVFSGNJX : VPseudoVSGNJ_VV_VF;
6482
6483//===----------------------------------------------------------------------===//
6484// 13.13. Vector Floating-Point Compare Instructions
6485//===----------------------------------------------------------------------===//
6486let mayRaiseFPException = true in {
6487defm PseudoVMFEQ : VPseudoVCMPM_VV_VF;
6488defm PseudoVMFNE : VPseudoVCMPM_VV_VF;
6489defm PseudoVMFLT : VPseudoVCMPM_VV_VF;
6490defm PseudoVMFLE : VPseudoVCMPM_VV_VF;
6491defm PseudoVMFGT : VPseudoVCMPM_VF;
6492defm PseudoVMFGE : VPseudoVCMPM_VF;
6493}
6494
6495//===----------------------------------------------------------------------===//
6496// 13.14. Vector Floating-Point Classify Instruction
6497//===----------------------------------------------------------------------===//
6498defm PseudoVFCLASS : VPseudoVCLS_V;
6499
6500//===----------------------------------------------------------------------===//
6501// 13.15. Vector Floating-Point Merge Instruction
6502//===----------------------------------------------------------------------===//
6503defm PseudoVFMERGE : VPseudoVMRG_FM;
6504
6505//===----------------------------------------------------------------------===//
6506// 13.16. Vector Floating-Point Move Instruction
6507//===----------------------------------------------------------------------===//
6508let isReMaterializable = 1 in
6509defm PseudoVFMV_V : VPseudoVMV_F;
6510
6511//===----------------------------------------------------------------------===//
6512// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
6513//===----------------------------------------------------------------------===//
6514let mayRaiseFPException = true in {
6515let hasSideEffects = 0 in {
6516defm PseudoVFCVT_XU_F : VPseudoVCVTI_V_RM;
6517defm PseudoVFCVT_X_F : VPseudoVCVTI_V_RM;
6518}
6519
6520defm PseudoVFCVT_RTZ_XU_F : VPseudoVCVTI_V;
6521defm PseudoVFCVT_RTZ_X_F : VPseudoVCVTI_V;
6522
6523defm PseudoVFROUND_NOEXCEPT : VPseudoVFROUND_NOEXCEPT_V;
6524let hasSideEffects = 0 in {
6525defm PseudoVFCVT_F_XU : VPseudoVCVTF_V_RM;
6526defm PseudoVFCVT_F_X : VPseudoVCVTF_V_RM;
6527}
6528} // mayRaiseFPException = true
6529
6530//===----------------------------------------------------------------------===//
6531// 13.18. Widening Floating-Point/Integer Type-Convert Instructions
6532//===----------------------------------------------------------------------===//
6533let mayRaiseFPException = true in {
6534let hasSideEffects = 0 in {
6535defm PseudoVFWCVT_XU_F     : VPseudoVWCVTI_V_RM;
6536defm PseudoVFWCVT_X_F      : VPseudoVWCVTI_V_RM;
6537}
6538
6539defm PseudoVFWCVT_RTZ_XU_F : VPseudoVWCVTI_V;
6540defm PseudoVFWCVT_RTZ_X_F  : VPseudoVWCVTI_V;
6541
6542defm PseudoVFWCVT_F_XU     : VPseudoVWCVTF_V;
6543defm PseudoVFWCVT_F_X      : VPseudoVWCVTF_V;
6544
6545defm PseudoVFWCVT_F_F      : VPseudoVWCVTD_V;
6546defm PseudoVFWCVTBF16_F_F :  VPseudoVWCVTD_V;
6547} // mayRaiseFPException = true
6548
6549//===----------------------------------------------------------------------===//
6550// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
6551//===----------------------------------------------------------------------===//
6552let mayRaiseFPException = true in {
6553let hasSideEffects = 0 in {
6554defm PseudoVFNCVT_XU_F     : VPseudoVNCVTI_W_RM;
6555defm PseudoVFNCVT_X_F      : VPseudoVNCVTI_W_RM;
6556}
6557
6558defm PseudoVFNCVT_RTZ_XU_F : VPseudoVNCVTI_W;
6559defm PseudoVFNCVT_RTZ_X_F  : VPseudoVNCVTI_W;
6560
6561let hasSideEffects = 0 in {
6562defm PseudoVFNCVT_F_XU     : VPseudoVNCVTF_W_RM;
6563defm PseudoVFNCVT_F_X      : VPseudoVNCVTF_W_RM;
6564}
6565
6566let hasSideEffects = 0 in {
6567defm PseudoVFNCVT_F_F      : VPseudoVNCVTD_W_RM;
6568defm PseudoVFNCVTBF16_F_F :  VPseudoVNCVTD_W_RM;
6569}
6570
6571defm PseudoVFNCVT_ROD_F_F  : VPseudoVNCVTD_W;
6572} // mayRaiseFPException = true
6573} // Predicates = [HasVInstructionsAnyF]
6574
6575//===----------------------------------------------------------------------===//
6576// 14. Vector Reduction Operations
6577//===----------------------------------------------------------------------===//
6578
6579let Predicates = [HasVInstructions] in {
6580//===----------------------------------------------------------------------===//
6581// 14.1. Vector Single-Width Integer Reduction Instructions
6582//===----------------------------------------------------------------------===//
6583defm PseudoVREDSUM  : VPseudoVRED_VS;
6584defm PseudoVREDAND  : VPseudoVRED_VS;
6585defm PseudoVREDOR   : VPseudoVRED_VS;
6586defm PseudoVREDXOR  : VPseudoVRED_VS;
6587defm PseudoVREDMINU : VPseudoVREDMINMAX_VS;
6588defm PseudoVREDMIN  : VPseudoVREDMINMAX_VS;
6589defm PseudoVREDMAXU : VPseudoVREDMINMAX_VS;
6590defm PseudoVREDMAX  : VPseudoVREDMINMAX_VS;
6591
6592//===----------------------------------------------------------------------===//
6593// 14.2. Vector Widening Integer Reduction Instructions
6594//===----------------------------------------------------------------------===//
6595let IsRVVWideningReduction = 1 in {
6596defm PseudoVWREDSUMU   : VPseudoVWRED_VS;
6597defm PseudoVWREDSUM    : VPseudoVWRED_VS;
6598}
6599} // Predicates = [HasVInstructions]
6600
6601let Predicates = [HasVInstructionsAnyF] in {
6602//===----------------------------------------------------------------------===//
6603// 14.3. Vector Single-Width Floating-Point Reduction Instructions
6604//===----------------------------------------------------------------------===//
6605let mayRaiseFPException = true, hasSideEffects = 0 in {
6606defm PseudoVFREDOSUM : VPseudoVFREDO_VS_RM;
6607defm PseudoVFREDUSUM : VPseudoVFRED_VS_RM;
6608}
6609let mayRaiseFPException = true in {
6610defm PseudoVFREDMIN  : VPseudoVFREDMINMAX_VS;
6611defm PseudoVFREDMAX  : VPseudoVFREDMINMAX_VS;
6612}
6613
6614//===----------------------------------------------------------------------===//
6615// 14.4. Vector Widening Floating-Point Reduction Instructions
6616//===----------------------------------------------------------------------===//
6617let IsRVVWideningReduction = 1, hasSideEffects = 0, mayRaiseFPException = true in {
6618defm PseudoVFWREDUSUM  : VPseudoVFWRED_VS_RM;
6619defm PseudoVFWREDOSUM  : VPseudoVFWREDO_VS_RM;
6620}
6621
6622} // Predicates = [HasVInstructionsAnyF]
6623
6624//===----------------------------------------------------------------------===//
6625// 15. Vector Mask Instructions
6626//===----------------------------------------------------------------------===//
6627
6628let Predicates = [HasVInstructions] in {
6629//===----------------------------------------------------------------------===//
6630// 15.1 Vector Mask-Register Logical Instructions
6631//===----------------------------------------------------------------------===//
6632
6633defm PseudoVMAND: VPseudoVALU_MM<Commutable=1>;
6634defm PseudoVMNAND: VPseudoVALU_MM<Commutable=1>;
6635defm PseudoVMANDN: VPseudoVALU_MM;
6636defm PseudoVMXOR: VPseudoVALU_MM<Commutable=1>;
6637defm PseudoVMOR: VPseudoVALU_MM<Commutable=1>;
6638defm PseudoVMNOR: VPseudoVALU_MM<Commutable=1>;
6639defm PseudoVMORN: VPseudoVALU_MM;
6640defm PseudoVMXNOR: VPseudoVALU_MM<Commutable=1>;
6641
6642// Pseudo instructions
6643defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">;
6644defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">;
6645
6646//===----------------------------------------------------------------------===//
6647// 15.2. Vector mask population count vcpop
6648//===----------------------------------------------------------------------===//
6649let IsSignExtendingOpW = 1 in
6650defm PseudoVCPOP: VPseudoVPOP_M;
6651
6652//===----------------------------------------------------------------------===//
6653// 15.3. vfirst find-first-set mask bit
6654//===----------------------------------------------------------------------===//
6655
6656let IsSignExtendingOpW = 1 in
6657defm PseudoVFIRST: VPseudoV1ST_M;
6658
6659//===----------------------------------------------------------------------===//
6660// 15.4. vmsbf.m set-before-first mask bit
6661//===----------------------------------------------------------------------===//
6662defm PseudoVMSBF: VPseudoVSFS_M;
6663
6664//===----------------------------------------------------------------------===//
6665// 15.5. vmsif.m set-including-first mask bit
6666//===----------------------------------------------------------------------===//
6667defm PseudoVMSIF: VPseudoVSFS_M;
6668
6669//===----------------------------------------------------------------------===//
6670// 15.6. vmsof.m set-only-first mask bit
6671//===----------------------------------------------------------------------===//
6672defm PseudoVMSOF: VPseudoVSFS_M;
6673
6674//===----------------------------------------------------------------------===//
6675// 15.8.  Vector Iota Instruction
6676//===----------------------------------------------------------------------===//
6677defm PseudoVIOTA_M: VPseudoVIOTA_M;
6678
6679//===----------------------------------------------------------------------===//
6680// 15.9. Vector Element Index Instruction
6681//===----------------------------------------------------------------------===//
6682let isReMaterializable = 1 in
6683defm PseudoVID : VPseudoVID_V;
6684} // Predicates = [HasVInstructions]
6685
6686//===----------------------------------------------------------------------===//
6687// 16. Vector Permutation Instructions
6688//===----------------------------------------------------------------------===//
6689
6690//===----------------------------------------------------------------------===//
6691// 16.1. Integer Scalar Move Instructions
6692//===----------------------------------------------------------------------===//
6693
6694let Predicates = [HasVInstructions] in {
6695let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
6696  let HasSEWOp = 1, BaseInstr = VMV_X_S in
6697  def PseudoVMV_X_S:
6698    Pseudo<(outs GPR:$rd), (ins VR:$rs2, sew:$sew), []>,
6699    Sched<[WriteVMovXS, ReadVMovXS]>,
6700    RISCVVPseudo;
6701  let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X, isReMaterializable = 1,
6702      Constraints = "$rd = $rs1" in
6703  def PseudoVMV_S_X: Pseudo<(outs VR:$rd),
6704                            (ins VR:$rs1, GPR:$rs2, AVL:$vl, sew:$sew),
6705                            []>,
6706    Sched<[WriteVMovSX, ReadVMovSX_V, ReadVMovSX_X]>,
6707    RISCVVPseudo;
6708}
6709} // Predicates = [HasVInstructions]
6710
6711//===----------------------------------------------------------------------===//
6712// 16.2. Floating-Point Scalar Move Instructions
6713//===----------------------------------------------------------------------===//
6714
6715let Predicates = [HasVInstructionsAnyF] in {
6716let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
6717  foreach f = FPList in {
6718    let HasSEWOp = 1, BaseInstr = VFMV_F_S in
6719    def "PseudoVFMV_" # f.FX # "_S" :
6720      Pseudo<(outs f.fprclass:$rd),
6721             (ins VR:$rs2, sew:$sew), []>,
6722      Sched<[WriteVMovFS, ReadVMovFS]>,
6723      RISCVVPseudo;
6724    let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, isReMaterializable = 1,
6725        Constraints = "$rd = $rs1" in
6726    def "PseudoVFMV_S_" # f.FX :
6727      Pseudo<(outs VR:$rd),
6728             (ins VR:$rs1, f.fprclass:$rs2, AVL:$vl, sew:$sew),
6729             []>,
6730      Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>,
6731      RISCVVPseudo;
6732  }
6733}
6734} // Predicates = [HasVInstructionsAnyF]
6735
6736//===----------------------------------------------------------------------===//
6737// 16.3. Vector Slide Instructions
6738//===----------------------------------------------------------------------===//
6739let Predicates = [HasVInstructions] in {
6740  defm PseudoVSLIDEUP    : VPseudoVSLD_VX_VI</*slidesUp=*/true, "@earlyclobber $rd">;
6741  defm PseudoVSLIDEDOWN  : VPseudoVSLD_VX_VI</*slidesUp=*/false>;
6742  defm PseudoVSLIDE1UP   : VPseudoVSLD1_VX<"@earlyclobber $rd">;
6743  defm PseudoVSLIDE1DOWN : VPseudoVSLD1_VX;
6744} // Predicates = [HasVInstructions]
6745
6746let Predicates = [HasVInstructionsAnyF] in {
6747  defm PseudoVFSLIDE1UP  : VPseudoVSLD1_VF<"@earlyclobber $rd">;
6748  defm PseudoVFSLIDE1DOWN : VPseudoVSLD1_VF;
6749} // Predicates = [HasVInstructionsAnyF]
6750
6751//===----------------------------------------------------------------------===//
6752// 16.4. Vector Register Gather Instructions
6753//===----------------------------------------------------------------------===//
6754let Predicates = [HasVInstructions] in {
6755defm PseudoVRGATHER     : VPseudoVGTR_VV_VX_VI;
6756defm PseudoVRGATHEREI16 : VPseudoVGTR_EI16_VV;
6757
6758//===----------------------------------------------------------------------===//
6759// 16.5. Vector Compress Instruction
6760//===----------------------------------------------------------------------===//
6761defm PseudoVCOMPRESS : VPseudoVCPR_V;
6762} // Predicates = [HasVInstructions]
6763
6764//===----------------------------------------------------------------------===//
6765// Patterns.
6766//===----------------------------------------------------------------------===//
6767
6768//===----------------------------------------------------------------------===//
6769// 11. Vector Integer Arithmetic Instructions
6770//===----------------------------------------------------------------------===//
6771
6772//===----------------------------------------------------------------------===//
6773// 11.1. Vector Single-Width Integer Add and Subtract
6774//===----------------------------------------------------------------------===//
6775defm : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>;
6776defm : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>;
6777defm : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>;
6778
6779//===----------------------------------------------------------------------===//
6780// 11.2. Vector Widening Integer Add/Subtract
6781//===----------------------------------------------------------------------===//
6782defm : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU", AllWidenableIntVectors>;
6783defm : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU", AllWidenableIntVectors>;
6784defm : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD", AllWidenableIntVectors>;
6785defm : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB", AllWidenableIntVectors>;
6786defm : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU", AllWidenableIntVectors>;
6787defm : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU", AllWidenableIntVectors>;
6788defm : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD", AllWidenableIntVectors>;
6789defm : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB", AllWidenableIntVectors>;
6790
6791//===----------------------------------------------------------------------===//
6792// 11.3. Vector Integer Extension
6793//===----------------------------------------------------------------------===//
6794defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF2",
6795                     AllFractionableVF2IntVectors>;
6796defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF4",
6797                     AllFractionableVF4IntVectors>;
6798defm : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF8",
6799                     AllFractionableVF8IntVectors>;
6800defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF2",
6801                     AllFractionableVF2IntVectors>;
6802defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF4",
6803                     AllFractionableVF4IntVectors>;
6804defm : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF8",
6805                     AllFractionableVF8IntVectors>;
6806
6807//===----------------------------------------------------------------------===//
6808// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
6809//===----------------------------------------------------------------------===//
6810defm : VPatBinaryV_VM_XM_IM<"int_riscv_vadc", "PseudoVADC">;
6811defm : VPatBinaryM_VM_XM_IM<"int_riscv_vmadc_carry_in", "PseudoVMADC">;
6812defm : VPatBinaryM_V_X_I<"int_riscv_vmadc", "PseudoVMADC">;
6813
6814defm : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">;
6815defm : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">;
6816defm : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">;
6817
6818//===----------------------------------------------------------------------===//
6819// 11.5. Vector Bitwise Logical Instructions
6820//===----------------------------------------------------------------------===//
6821defm : VPatBinaryV_VV_VX_VI<"int_riscv_vand", "PseudoVAND", AllIntegerVectors>;
6822defm : VPatBinaryV_VV_VX_VI<"int_riscv_vor", "PseudoVOR", AllIntegerVectors>;
6823defm : VPatBinaryV_VV_VX_VI<"int_riscv_vxor", "PseudoVXOR", AllIntegerVectors>;
6824
6825//===----------------------------------------------------------------------===//
6826// 11.6. Vector Single-Width Bit Shift Instructions
6827//===----------------------------------------------------------------------===//
6828defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsll", "PseudoVSLL", AllIntegerVectors,
6829                            uimm5>;
6830defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors,
6831                            uimm5>;
6832defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
6833                            uimm5>;
6834
6835foreach vti = AllIntegerVectors in {
6836  // Emit shift by 1 as an add since it might be faster.
6837  let Predicates = GetVTypePredicates<vti>.Predicates in {
6838    def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$passthru),
6839                                          (vti.Vector vti.RegClass:$rs1),
6840                                          (XLenVT 1), VLOpFrag)),
6841              (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX)
6842                 vti.RegClass:$passthru, vti.RegClass:$rs1,
6843                 vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>;
6844    def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$passthru),
6845                                               (vti.Vector vti.RegClass:$rs1),
6846                                               (XLenVT 1),
6847                                               (vti.Mask V0),
6848                                               VLOpFrag,
6849                                               (XLenVT timm:$policy))),
6850              (!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX#"_MASK")
6851                                                          vti.RegClass:$passthru,
6852                                                          vti.RegClass:$rs1,
6853                                                          vti.RegClass:$rs1,
6854                                                          (vti.Mask V0),
6855                                                          GPR:$vl,
6856                                                          vti.Log2SEW,
6857                                                          (XLenVT timm:$policy))>;
6858  }
6859}
6860
6861//===----------------------------------------------------------------------===//
6862// 11.7. Vector Narrowing Integer Right Shift Instructions
6863//===----------------------------------------------------------------------===//
6864defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>;
6865defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>;
6866
6867//===----------------------------------------------------------------------===//
6868// 11.8. Vector Integer Comparison Instructions
6869//===----------------------------------------------------------------------===//
6870defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmseq", "PseudoVMSEQ", AllIntegerVectors>;
6871defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsne", "PseudoVMSNE", AllIntegerVectors>;
6872defm : VPatBinaryM_VV_VX<"int_riscv_vmsltu", "PseudoVMSLTU", AllIntegerVectors>;
6873defm : VPatBinaryM_VV_VX<"int_riscv_vmslt", "PseudoVMSLT", AllIntegerVectors>;
6874defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsleu", "PseudoVMSLEU", AllIntegerVectors>;
6875defm : VPatBinaryM_VV_VX_VI<"int_riscv_vmsle", "PseudoVMSLE", AllIntegerVectors>;
6876
6877defm : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>;
6878defm : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
6879
6880// Match vmsgt with 2 vector operands to vmslt with the operands swapped.
6881defm : VPatBinarySwappedM_VV<"int_riscv_vmsgtu", "PseudoVMSLTU", AllIntegerVectors>;
6882defm : VPatBinarySwappedM_VV<"int_riscv_vmsgt", "PseudoVMSLT", AllIntegerVectors>;
6883
6884defm : VPatBinarySwappedM_VV<"int_riscv_vmsgeu", "PseudoVMSLEU", AllIntegerVectors>;
6885defm : VPatBinarySwappedM_VV<"int_riscv_vmsge", "PseudoVMSLE", AllIntegerVectors>;
6886
6887// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16 and
6888// non-zero. Zero can be .vx with x0. This avoids the user needing to know that
6889// there is no vmslt(u).vi instruction. Similar for vmsge(u).vx intrinsics
6890// using vmslt(u).vi.
6891defm : VPatCompare_VI<"int_riscv_vmslt", "PseudoVMSLE", simm5_plus1_nonzero>;
6892defm : VPatCompare_VI<"int_riscv_vmsltu", "PseudoVMSLEU", simm5_plus1_nonzero>;
6893
6894// We need to handle 0 for vmsge.vi using vmslt.vi because there is no vmsge.vx.
6895defm : VPatCompare_VI<"int_riscv_vmsge", "PseudoVMSGT", simm5_plus1>;
6896defm : VPatCompare_VI<"int_riscv_vmsgeu", "PseudoVMSGTU", simm5_plus1_nonzero>;
6897
6898//===----------------------------------------------------------------------===//
6899// 11.9. Vector Integer Min/Max Instructions
6900//===----------------------------------------------------------------------===//
6901defm : VPatBinaryV_VV_VX<"int_riscv_vminu", "PseudoVMINU", AllIntegerVectors>;
6902defm : VPatBinaryV_VV_VX<"int_riscv_vmin", "PseudoVMIN", AllIntegerVectors>;
6903defm : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>;
6904defm : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>;
6905
6906//===----------------------------------------------------------------------===//
6907// 11.10. Vector Single-Width Integer Multiply Instructions
6908//===----------------------------------------------------------------------===//
6909defm : VPatBinaryV_VV_VX<"int_riscv_vmul", "PseudoVMUL", AllIntegerVectors>;
6910
6911defvar IntegerVectorsExceptI64 = !filter(vti, AllIntegerVectors,
6912                                         !ne(vti.SEW, 64));
6913defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH",
6914                         IntegerVectorsExceptI64>;
6915defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU",
6916                         IntegerVectorsExceptI64>;
6917defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU",
6918                         IntegerVectorsExceptI64>;
6919
6920// vmulh, vmulhu, vmulhsu are not included for EEW=64 in Zve64*.
6921defvar I64IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 64));
6922let Predicates = [HasVInstructionsFullMultiply] in {
6923  defm : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH",
6924                           I64IntegerVectors>;
6925  defm : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU",
6926                           I64IntegerVectors>;
6927  defm : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU",
6928                           I64IntegerVectors>;
6929}
6930
6931//===----------------------------------------------------------------------===//
6932// 11.11. Vector Integer Divide Instructions
6933//===----------------------------------------------------------------------===//
6934defm : VPatBinaryV_VV_VX<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors, isSEWAware=1>;
6935defm : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors, isSEWAware=1>;
6936defm : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors, isSEWAware=1>;
6937defm : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors, isSEWAware=1>;
6938
6939//===----------------------------------------------------------------------===//
6940// 11.12. Vector Widening Integer Multiply Instructions
6941//===----------------------------------------------------------------------===//
6942defm : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL", AllWidenableIntVectors>;
6943defm : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU", AllWidenableIntVectors>;
6944defm : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>;
6945
6946//===----------------------------------------------------------------------===//
6947// 11.13. Vector Single-Width Integer Multiply-Add Instructions
6948//===----------------------------------------------------------------------===//
6949defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmadd", "PseudoVMADD", AllIntegerVectors>;
6950defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsub", "PseudoVNMSUB", AllIntegerVectors>;
6951defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>;
6952defm : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>;
6953
6954//===----------------------------------------------------------------------===//
6955// 11.14. Vector Widening Integer Multiply-Add Instructions
6956//===----------------------------------------------------------------------===//
6957defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccu", "PseudoVWMACCU", AllWidenableIntVectors>;
6958defm : VPatTernaryW_VV_VX<"int_riscv_vwmacc", "PseudoVWMACC", AllWidenableIntVectors>;
6959defm : VPatTernaryW_VV_VX<"int_riscv_vwmaccsu", "PseudoVWMACCSU", AllWidenableIntVectors>;
6960defm : VPatTernaryW_VX<"int_riscv_vwmaccus", "PseudoVWMACCUS", AllWidenableIntVectors>;
6961
6962//===----------------------------------------------------------------------===//
6963// 11.15. Vector Integer Merge Instructions
6964//===----------------------------------------------------------------------===//
6965defm : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">;
6966
6967//===----------------------------------------------------------------------===//
6968// 11.16. Vector Integer Move Instructions
6969//===----------------------------------------------------------------------===//
6970foreach vti = AllVectors in {
6971  let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal],
6972                       GetVTypePredicates<vti>.Predicates) in {
6973    def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$passthru),
6974                                             (vti.Vector vti.RegClass:$rs1),
6975                                             VLOpFrag)),
6976              (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
6977               $passthru, $rs1, GPR:$vl, vti.Log2SEW, TU_MU)>;
6978
6979    // vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td
6980  }
6981}
6982
6983//===----------------------------------------------------------------------===//
6984// 12. Vector Fixed-Point Arithmetic Instructions
6985//===----------------------------------------------------------------------===//
6986
6987//===----------------------------------------------------------------------===//
6988// 12.1. Vector Single-Width Saturating Add and Subtract
6989//===----------------------------------------------------------------------===//
6990defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>;
6991defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>;
6992defm : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>;
6993defm : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>;
6994
6995//===----------------------------------------------------------------------===//
6996// 12.2. Vector Single-Width Averaging Add and Subtract
6997//===----------------------------------------------------------------------===//
6998defm : VPatBinaryV_VV_VX_RM<"int_riscv_vaaddu", "PseudoVAADDU",
6999                            AllIntegerVectors>;
7000defm : VPatBinaryV_VV_VX_RM<"int_riscv_vasubu", "PseudoVASUBU",
7001                            AllIntegerVectors>;
7002defm : VPatBinaryV_VV_VX_RM<"int_riscv_vasub", "PseudoVASUB",
7003                            AllIntegerVectors>;
7004defm : VPatBinaryV_VV_VX_RM<"int_riscv_vaadd", "PseudoVAADD",
7005                            AllIntegerVectors>;
7006
7007//===----------------------------------------------------------------------===//
7008// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
7009//===----------------------------------------------------------------------===//
7010defm : VPatBinaryV_VV_VX_RM<"int_riscv_vsmul", "PseudoVSMUL",
7011                             IntegerVectorsExceptI64>;
7012// vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*.
7013let Predicates = [HasVInstructionsFullMultiply] in
7014defm : VPatBinaryV_VV_VX_RM<"int_riscv_vsmul", "PseudoVSMUL",
7015                             I64IntegerVectors>;
7016
7017//===----------------------------------------------------------------------===//
7018// 12.4. Vector Single-Width Scaling Shift Instructions
7019//===----------------------------------------------------------------------===//
7020defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vssrl", "PseudoVSSRL",
7021                               AllIntegerVectors, uimm5>;
7022defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vssra", "PseudoVSSRA",
7023                               AllIntegerVectors, uimm5>;
7024
7025//===----------------------------------------------------------------------===//
7026// 12.5. Vector Narrowing Fixed-Point Clip Instructions
7027//===----------------------------------------------------------------------===//
7028defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclipu", "PseudoVNCLIPU",
7029                               AllWidenableIntVectors>;
7030defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclip", "PseudoVNCLIP",
7031                               AllWidenableIntVectors>;
7032
7033//===----------------------------------------------------------------------===//
7034// 13. Vector Floating-Point Instructions
7035//===----------------------------------------------------------------------===//
7036
7037//===----------------------------------------------------------------------===//
7038// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
7039//===----------------------------------------------------------------------===//
7040defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors,
7041                            isSEWAware = 1>;
7042defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors,
7043                            isSEWAware = 1>;
7044defm : VPatBinaryV_VX_RM<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors,
7045                         isSEWAware = 1>;
7046
7047//===----------------------------------------------------------------------===//
7048// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
7049//===----------------------------------------------------------------------===//
7050defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwadd", "PseudoVFWADD",
7051                            AllWidenableFloatVectors, isSEWAware=1>;
7052defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwsub", "PseudoVFWSUB",
7053                            AllWidenableFloatVectors, isSEWAware=1>;
7054defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwadd_w", "PseudoVFWADD",
7055                            AllWidenableFloatVectors, isSEWAware=1>;
7056defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwsub_w", "PseudoVFWSUB",
7057                            AllWidenableFloatVectors, isSEWAware=1>;
7058
7059//===----------------------------------------------------------------------===//
7060// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
7061//===----------------------------------------------------------------------===//
7062defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfmul", "PseudoVFMUL",
7063                            AllFloatVectors, isSEWAware=1>;
7064defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfdiv", "PseudoVFDIV",
7065                            AllFloatVectors, isSEWAware=1>;
7066defm : VPatBinaryV_VX_RM<"int_riscv_vfrdiv", "PseudoVFRDIV",
7067                         AllFloatVectors, isSEWAware=1>;
7068
7069//===----------------------------------------------------------------------===//
7070// 13.5. Vector Widening Floating-Point Multiply
7071//===----------------------------------------------------------------------===//
7072defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwmul", "PseudoVFWMUL",
7073                            AllWidenableFloatVectors, isSEWAware=1>;
7074
7075//===----------------------------------------------------------------------===//
7076// 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
7077//===----------------------------------------------------------------------===//
7078defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmacc", "PseudoVFMACC",
7079                                  AllFloatVectors, isSEWAware=1>;
7080defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmacc", "PseudoVFNMACC",
7081                                  AllFloatVectors, isSEWAware=1>;
7082defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsac", "PseudoVFMSAC",
7083                                  AllFloatVectors, isSEWAware=1>;
7084defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsac", "PseudoVFNMSAC",
7085                                  AllFloatVectors, isSEWAware=1>;
7086defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmadd", "PseudoVFMADD",
7087                                  AllFloatVectors, isSEWAware=1>;
7088defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmadd", "PseudoVFNMADD",
7089                                  AllFloatVectors, isSEWAware=1>;
7090defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsub", "PseudoVFMSUB",
7091                                  AllFloatVectors, isSEWAware=1>;
7092defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsub", "PseudoVFNMSUB",
7093                                  AllFloatVectors, isSEWAware=1>;
7094
7095//===----------------------------------------------------------------------===//
7096// 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
7097//===----------------------------------------------------------------------===//
7098defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmacc", "PseudoVFWMACC",
7099                             AllWidenableFloatVectors, isSEWAware=1>;
7100defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmacc", "PseudoVFWNMACC",
7101                             AllWidenableFloatVectors, isSEWAware=1>;
7102defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmsac", "PseudoVFWMSAC",
7103                             AllWidenableFloatVectors, isSEWAware=1>;
7104defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmsac", "PseudoVFWNMSAC",
7105                             AllWidenableFloatVectors, isSEWAware=1>;
7106let Predicates = [HasStdExtZvfbfwma] in
7107defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmaccbf16", "PseudoVFWMACCBF16",
7108                              AllWidenableBFloatToFloatVectors, isSEWAware=1>;
7109
7110//===----------------------------------------------------------------------===//
7111// 13.8. Vector Floating-Point Square-Root Instruction
7112//===----------------------------------------------------------------------===//
7113defm : VPatUnaryV_V_RM<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors, isSEWAware=1>;
7114
7115//===----------------------------------------------------------------------===//
7116// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
7117//===----------------------------------------------------------------------===//
7118defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7", AllFloatVectors, isSEWAware=1>;
7119
7120//===----------------------------------------------------------------------===//
7121// 13.10. Vector Floating-Point Reciprocal Estimate Instruction
7122//===----------------------------------------------------------------------===//
7123defm : VPatUnaryV_V_RM<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors, isSEWAware=1>;
7124
7125//===----------------------------------------------------------------------===//
7126// 13.11. Vector Floating-Point Min/Max Instructions
7127//===----------------------------------------------------------------------===//
7128defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors,
7129                         isSEWAware=1>;
7130defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX", AllFloatVectors,
7131                         isSEWAware=1>;
7132
7133//===----------------------------------------------------------------------===//
7134// 13.12. Vector Floating-Point Sign-Injection Instructions
7135//===----------------------------------------------------------------------===//
7136defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors,
7137                         isSEWAware=1>;
7138defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN", AllFloatVectors,
7139                         isSEWAware=1>;
7140defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVectors,
7141                         isSEWAware=1>;
7142
7143//===----------------------------------------------------------------------===//
7144// 13.13. Vector Floating-Point Compare Instructions
7145//===----------------------------------------------------------------------===//
7146defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ", AllFloatVectors>;
7147defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE", AllFloatVectors>;
7148defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT", AllFloatVectors>;
7149defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>;
7150defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>;
7151defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>;
7152defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT", AllFloatVectors>;
7153defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE", AllFloatVectors>;
7154
7155//===----------------------------------------------------------------------===//
7156// 13.14. Vector Floating-Point Classify Instruction
7157//===----------------------------------------------------------------------===//
7158defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
7159
7160//===----------------------------------------------------------------------===//
7161// 13.15. Vector Floating-Point Merge Instruction
7162//===----------------------------------------------------------------------===//
7163// We can use vmerge.vvm to support vector-vector vfmerge.
7164// NOTE: Clang previously used int_riscv_vfmerge for vector-vector, but now uses
7165// int_riscv_vmerge. Support both for compatibility.
7166foreach vti = AllFloatVectors in {
7167  let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal],
7168                       GetVTypePredicates<vti>.Predicates) in
7169    defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM",
7170                                 vti.Vector,
7171                                 vti.Vector, vti.Vector, vti.Mask,
7172                                 vti.Log2SEW, vti.LMul, vti.RegClass,
7173                                 vti.RegClass, vti.RegClass>;
7174  let Predicates = GetVTypePredicates<vti>.Predicates in
7175    defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE",
7176                                 "V"#vti.ScalarSuffix#"M",
7177                                 vti.Vector,
7178                                 vti.Vector, vti.Scalar, vti.Mask,
7179                                 vti.Log2SEW, vti.LMul, vti.RegClass,
7180                                 vti.RegClass, vti.ScalarRegClass>;
7181}
7182
7183foreach vti = AllBFloatVectors in
7184  let Predicates = [HasVInstructionsBF16Minimal] in
7185    defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM",
7186                                 vti.Vector,
7187                                 vti.Vector, vti.Vector, vti.Mask,
7188                                 vti.Log2SEW, vti.LMul, vti.RegClass,
7189                                 vti.RegClass, vti.RegClass>;
7190
7191foreach fvti = AllFloatVectors in {
7192  defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
7193  let Predicates = GetVTypePredicates<fvti>.Predicates in
7194  def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$passthru),
7195                                            (fvti.Vector fvti.RegClass:$rs2),
7196                                            (fvti.Scalar (fpimm0)),
7197                                            (fvti.Mask V0), VLOpFrag)),
7198            (instr fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0,
7199                   (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
7200}
7201
7202//===----------------------------------------------------------------------===//
7203// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
7204//===----------------------------------------------------------------------===//
7205defm : VPatConversionVI_VF_RTZ<"int_riscv_vfcvt_x_f_v", "PseudoVFCVT_RTZ_X_F">;
7206defm : VPatConversionVI_VF_RTZ<"int_riscv_vfcvt_xu_f_v", "PseudoVFCVT_RTZ_XU_F">;
7207defm : VPatConversionVI_VF_RM<"int_riscv_vfcvt_x_f_v", "PseudoVFCVT_X_F">;
7208defm : VPatConversionVI_VF_RM<"int_riscv_vfcvt_xu_f_v", "PseudoVFCVT_XU_F">;
7209defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_xu_f_v", "PseudoVFCVT_RTZ_XU_F">;
7210defm : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_x_f_v", "PseudoVFCVT_RTZ_X_F">;
7211defm : VPatConversionVF_VI_RM<"int_riscv_vfcvt_f_x_v", "PseudoVFCVT_F_X",
7212                              isSEWAware=1>;
7213defm : VPatConversionVF_VI_RM<"int_riscv_vfcvt_f_xu_v", "PseudoVFCVT_F_XU",
7214                              isSEWAware=1>;
7215
7216//===----------------------------------------------------------------------===//
7217// 13.18. Widening Floating-Point/Integer Type-Convert Instructions
7218//===----------------------------------------------------------------------===//
7219defm : VPatConversionWI_VF_RTZ<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">;
7220defm : VPatConversionWI_VF_RTZ<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_RTZ_X_F">;
7221defm : VPatConversionWI_VF_RM<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_XU_F">;
7222defm : VPatConversionWI_VF_RM<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_X_F">;
7223defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">;
7224defm : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_x_f_v", "PseudoVFWCVT_RTZ_X_F">;
7225defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU",
7226                           isSEWAware=1>;
7227defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X",
7228                           isSEWAware=1>;
7229defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F",
7230                           isSEWAware=1>;
7231defm : VPatConversionWF_VF_BF<"int_riscv_vfwcvtbf16_f_f_v",
7232                              "PseudoVFWCVTBF16_F_F", isSEWAware=1>;
7233
7234//===----------------------------------------------------------------------===//
7235// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
7236//===----------------------------------------------------------------------===//
7237defm : VPatConversionVI_WF_RTZ<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">;
7238defm : VPatConversionVI_WF_RTZ<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_RTZ_X_F">;
7239defm : VPatConversionVI_WF_RM<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F">;
7240defm : VPatConversionVI_WF_RM<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F">;
7241defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">;
7242defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">;
7243defm : VPatConversionVF_WI_RM<"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU",
7244                              isSEWAware=1>;
7245defm : VPatConversionVF_WI_RM<"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X",
7246                              isSEWAware=1>;
7247defvar WidenableFloatVectorsExceptF16 = !filter(fvtiToFWti, AllWidenableFloatVectors,
7248                                                !ne(fvtiToFWti.Vti.Scalar, f16));
7249defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F",
7250                           WidenableFloatVectorsExceptF16, isSEWAware=1>;
7251// Define vfncvt.f.f.w for f16 when Zvfhmin is enable.
7252defvar F16WidenableFloatVectors = !filter(fvtiToFWti, AllWidenableFloatVectors,
7253                                          !eq(fvtiToFWti.Vti.Scalar, f16));
7254let Predicates = [HasVInstructionsF16Minimal] in
7255defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F",
7256                           F16WidenableFloatVectors, isSEWAware=1>;
7257defm : VPatConversionVF_WF_BF_RM<"int_riscv_vfncvtbf16_f_f_w",
7258                                 "PseudoVFNCVTBF16_F_F", isSEWAware=1>;
7259defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F",
7260                           isSEWAware=1>;
7261
7262//===----------------------------------------------------------------------===//
7263// 14. Vector Reduction Operations
7264//===----------------------------------------------------------------------===//
7265
7266//===----------------------------------------------------------------------===//
7267// 14.1. Vector Single-Width Integer Reduction Instructions
7268//===----------------------------------------------------------------------===//
7269defm : VPatReductionV_VS<"int_riscv_vredsum", "PseudoVREDSUM">;
7270defm : VPatReductionV_VS<"int_riscv_vredand", "PseudoVREDAND">;
7271defm : VPatReductionV_VS<"int_riscv_vredor", "PseudoVREDOR">;
7272defm : VPatReductionV_VS<"int_riscv_vredxor", "PseudoVREDXOR">;
7273defm : VPatReductionV_VS<"int_riscv_vredminu", "PseudoVREDMINU">;
7274defm : VPatReductionV_VS<"int_riscv_vredmin", "PseudoVREDMIN">;
7275defm : VPatReductionV_VS<"int_riscv_vredmaxu", "PseudoVREDMAXU">;
7276defm : VPatReductionV_VS<"int_riscv_vredmax", "PseudoVREDMAX">;
7277
7278//===----------------------------------------------------------------------===//
7279// 14.2. Vector Widening Integer Reduction Instructions
7280//===----------------------------------------------------------------------===//
7281defm : VPatReductionW_VS<"int_riscv_vwredsumu", "PseudoVWREDSUMU">;
7282defm : VPatReductionW_VS<"int_riscv_vwredsum", "PseudoVWREDSUM">;
7283
7284//===----------------------------------------------------------------------===//
7285// 14.3. Vector Single-Width Floating-Point Reduction Instructions
7286//===----------------------------------------------------------------------===//
7287defm : VPatReductionV_VS_RM<"int_riscv_vfredosum", "PseudoVFREDOSUM", IsFloat=1>;
7288defm : VPatReductionV_VS_RM<"int_riscv_vfredusum", "PseudoVFREDUSUM", IsFloat=1>;
7289defm : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", IsFloat=1>;
7290defm : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", IsFloat=1>;
7291
7292//===----------------------------------------------------------------------===//
7293// 14.4. Vector Widening Floating-Point Reduction Instructions
7294//===----------------------------------------------------------------------===//
7295defm : VPatReductionW_VS_RM<"int_riscv_vfwredusum", "PseudoVFWREDUSUM", IsFloat=1>;
7296defm : VPatReductionW_VS_RM<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", IsFloat=1>;
7297
7298//===----------------------------------------------------------------------===//
7299// 15. Vector Mask Instructions
7300//===----------------------------------------------------------------------===//
7301
7302//===----------------------------------------------------------------------===//
7303// 15.1 Vector Mask-Register Logical Instructions
7304//===----------------------------------------------------------------------===//
7305defm : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
7306defm : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
7307defm : VPatBinaryM_MM<"int_riscv_vmandn", "PseudoVMANDN">;
7308defm : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
7309defm : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
7310defm : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
7311defm : VPatBinaryM_MM<"int_riscv_vmorn", "PseudoVMORN">;
7312defm : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
7313
7314// pseudo instructions
7315defm : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">;
7316defm : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
7317
7318//===----------------------------------------------------------------------===//
7319// 15.2. Vector count population in mask vcpop.m
7320//===----------------------------------------------------------------------===//
7321defm : VPatUnaryS_M<"int_riscv_vcpop", "PseudoVCPOP">;
7322
7323//===----------------------------------------------------------------------===//
7324// 15.3. vfirst find-first-set mask bit
7325//===----------------------------------------------------------------------===//
7326defm : VPatUnaryS_M<"int_riscv_vfirst", "PseudoVFIRST">;
7327
7328//===----------------------------------------------------------------------===//
7329// 15.4. vmsbf.m set-before-first mask bit
7330//===----------------------------------------------------------------------===//
7331defm : VPatUnaryM_M<"int_riscv_vmsbf", "PseudoVMSBF">;
7332
7333//===----------------------------------------------------------------------===//
7334// 15.5. vmsif.m set-including-first mask bit
7335//===----------------------------------------------------------------------===//
7336defm : VPatUnaryM_M<"int_riscv_vmsif", "PseudoVMSIF">;
7337
7338//===----------------------------------------------------------------------===//
7339// 15.6. vmsof.m set-only-first mask bit
7340//===----------------------------------------------------------------------===//
7341defm : VPatUnaryM_M<"int_riscv_vmsof", "PseudoVMSOF">;
7342
7343//===----------------------------------------------------------------------===//
7344// 15.8.  Vector Iota Instruction
7345//===----------------------------------------------------------------------===//
7346defm : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">;
7347
7348//===----------------------------------------------------------------------===//
7349// 15.9. Vector Element Index Instruction
7350//===----------------------------------------------------------------------===//
7351defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
7352
7353
7354//===----------------------------------------------------------------------===//
7355// 16. Vector Permutation Instructions
7356//===----------------------------------------------------------------------===//
7357
7358//===----------------------------------------------------------------------===//
7359// 16.1. Integer Scalar Move Instructions
7360//===----------------------------------------------------------------------===//
7361
7362foreach vti = NoGroupIntegerVectors in {
7363  let Predicates = GetVTypePredicates<vti>.Predicates in
7364  def : Pat<(XLenVT (riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2))),
7365            (PseudoVMV_X_S $rs2, vti.Log2SEW)>;
7366  // vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td
7367}
7368
7369//===----------------------------------------------------------------------===//
7370// 16.3. Vector Slide Instructions
7371//===----------------------------------------------------------------------===//
7372defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>;
7373defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>;
7374defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>;
7375defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>;
7376
7377defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>;
7378defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>;
7379defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>;
7380defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>;
7381
7382//===----------------------------------------------------------------------===//
7383// 16.4. Vector Register Gather Instructions
7384//===----------------------------------------------------------------------===//
7385defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
7386                                AllIntegerVectors, uimm5>;
7387defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
7388                              eew=16, vtilist=AllIntegerVectors>;
7389
7390defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
7391                                AllFloatVectorsExceptFP16, uimm5>;
7392let Predicates = [HasVInstructionsF16Minimal] in
7393  defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
7394                                  AllFP16Vectors, uimm5>;
7395defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
7396                                AllBFloatVectors, uimm5>;
7397defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
7398                              eew=16, vtilist=AllFloatVectors>;
7399//===----------------------------------------------------------------------===//
7400// 16.5. Vector Compress Instruction
7401//===----------------------------------------------------------------------===//
7402defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
7403defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectorsExceptFP16>;
7404let Predicates = [HasVInstructionsF16Minimal] in
7405  defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFP16Vectors>;
7406defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllBFloatVectors>;
7407
7408// Include the non-intrinsic ISel patterns
7409include "RISCVInstrInfoVVLPatterns.td"
7410include "RISCVInstrInfoVSDPatterns.td"
7411