xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.td (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1//===-- RISCVRegisterInfo.td - RISC-V Register defs --------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9//===----------------------------------------------------------------------===//
10//  Declarations that describe the RISC-V register files
11//===----------------------------------------------------------------------===//
12
13let Namespace = "RISCV" in {
14class RISCVReg<bits<5> Enc, string n, list<string> alt = []> : Register<n> {
15  let HWEncoding{4-0} = Enc;
16  let AltNames = alt;
17}
18
19class RISCVRegWithSubRegs<bits<5> Enc, string n, list<Register> subregs,
20                          list<string> alt = []>
21      : RegisterWithSubRegs<n, subregs> {
22  let HWEncoding{4-0} = Enc;
23  let AltNames = alt;
24}
25
26class RISCVReg16<bits<5> Enc, string n, list<string> alt = []> : Register<n> {
27  let HWEncoding{4-0} = Enc;
28  let AltNames = alt;
29}
30
31def sub_16 : SubRegIndex<16>;
32class RISCVReg32<RISCVReg16 subreg>
33  : RISCVRegWithSubRegs<subreg.HWEncoding{4-0}, subreg.AsmName, [subreg],
34                        subreg.AltNames> {
35  let SubRegIndices = [sub_16];
36}
37
38// Because RISCVReg64 register have AsmName and AltNames that alias with their
39// 16/32-bit sub-register, RISCVAsmParser will need to coerce a register number
40// from a RISCVReg16/RISCVReg32 to the equivalent RISCVReg64 when appropriate.
41def sub_32 : SubRegIndex<32>;
42class RISCVReg64<RISCVReg32 subreg>
43  : RISCVRegWithSubRegs<subreg.HWEncoding{4-0}, subreg.AsmName, [subreg],
44                        subreg.AltNames> {
45  let SubRegIndices = [sub_32];
46}
47
48let FallbackRegAltNameIndex = NoRegAltName in
49def ABIRegAltName : RegAltNameIndex;
50
51def sub_vrm4_0 : SubRegIndex<256>;
52def sub_vrm4_1 : SubRegIndex<256, 256>;
53def sub_vrm2_0 : SubRegIndex<128>;
54def sub_vrm2_1 : SubRegIndex<128, 128>;
55def sub_vrm2_2 : ComposedSubRegIndex<sub_vrm4_1, sub_vrm2_0>;
56def sub_vrm2_3 : ComposedSubRegIndex<sub_vrm4_1, sub_vrm2_1>;
57def sub_vrm1_0 : SubRegIndex<64>;
58def sub_vrm1_1 : SubRegIndex<64, 64>;
59def sub_vrm1_2 : ComposedSubRegIndex<sub_vrm2_1, sub_vrm1_0>;
60def sub_vrm1_3 : ComposedSubRegIndex<sub_vrm2_1, sub_vrm1_1>;
61def sub_vrm1_4 : ComposedSubRegIndex<sub_vrm2_2, sub_vrm1_0>;
62def sub_vrm1_5 : ComposedSubRegIndex<sub_vrm2_2, sub_vrm1_1>;
63def sub_vrm1_6 : ComposedSubRegIndex<sub_vrm2_3, sub_vrm1_0>;
64def sub_vrm1_7 : ComposedSubRegIndex<sub_vrm2_3, sub_vrm1_1>;
65
66// GPR sizes change with HwMode.
67def sub_gpr_even : SubRegIndex<32> {
68  let SubRegRanges = SubRegRangeByHwMode<[RV32, RV64],
69                                         [SubRegRange<32>, SubRegRange<64>]>;
70}
71def sub_gpr_odd  : SubRegIndex<32, 32> {
72  let SubRegRanges = SubRegRangeByHwMode<[RV32, RV64],
73                                         [SubRegRange<32, 32>, SubRegRange<64, 64>]>;
74}
75} // Namespace = "RISCV"
76
77// Integer registers
78// CostPerUse is set higher for registers that may not be compressible as they
79// are not part of GPRC, the most restrictive register class used by the
80// compressed instruction set. This will influence the greedy register
81// allocator to reduce the use of registers that can't be encoded in 16 bit
82// instructions.
83
84let RegAltNameIndices = [ABIRegAltName] in {
85  let isConstant = true in
86  def X0  : RISCVReg<0, "x0", ["zero"]>, DwarfRegNum<[0]>;
87  let CostPerUse = [0, 1] in {
88  def X1  : RISCVReg<1, "x1", ["ra"]>, DwarfRegNum<[1]>;
89  def X2  : RISCVReg<2, "x2", ["sp"]>, DwarfRegNum<[2]>;
90  def X3  : RISCVReg<3, "x3", ["gp"]>, DwarfRegNum<[3]>;
91  def X4  : RISCVReg<4, "x4", ["tp"]>, DwarfRegNum<[4]>;
92  def X5  : RISCVReg<5, "x5", ["t0"]>, DwarfRegNum<[5]>;
93  def X6  : RISCVReg<6, "x6", ["t1"]>, DwarfRegNum<[6]>;
94  def X7  : RISCVReg<7, "x7", ["t2"]>, DwarfRegNum<[7]>;
95  }
96  def X8  : RISCVReg<8, "x8", ["s0", "fp"]>, DwarfRegNum<[8]>;
97  def X9  : RISCVReg<9, "x9", ["s1"]>, DwarfRegNum<[9]>;
98  def X10 : RISCVReg<10,"x10", ["a0"]>, DwarfRegNum<[10]>;
99  def X11 : RISCVReg<11,"x11", ["a1"]>, DwarfRegNum<[11]>;
100  def X12 : RISCVReg<12,"x12", ["a2"]>, DwarfRegNum<[12]>;
101  def X13 : RISCVReg<13,"x13", ["a3"]>, DwarfRegNum<[13]>;
102  def X14 : RISCVReg<14,"x14", ["a4"]>, DwarfRegNum<[14]>;
103  def X15 : RISCVReg<15,"x15", ["a5"]>, DwarfRegNum<[15]>;
104  let CostPerUse = [0, 1] in {
105  def X16 : RISCVReg<16,"x16", ["a6"]>, DwarfRegNum<[16]>;
106  def X17 : RISCVReg<17,"x17", ["a7"]>, DwarfRegNum<[17]>;
107  def X18 : RISCVReg<18,"x18", ["s2"]>, DwarfRegNum<[18]>;
108  def X19 : RISCVReg<19,"x19", ["s3"]>, DwarfRegNum<[19]>;
109  def X20 : RISCVReg<20,"x20", ["s4"]>, DwarfRegNum<[20]>;
110  def X21 : RISCVReg<21,"x21", ["s5"]>, DwarfRegNum<[21]>;
111  def X22 : RISCVReg<22,"x22", ["s6"]>, DwarfRegNum<[22]>;
112  def X23 : RISCVReg<23,"x23", ["s7"]>, DwarfRegNum<[23]>;
113  def X24 : RISCVReg<24,"x24", ["s8"]>, DwarfRegNum<[24]>;
114  def X25 : RISCVReg<25,"x25", ["s9"]>, DwarfRegNum<[25]>;
115  def X26 : RISCVReg<26,"x26", ["s10"]>, DwarfRegNum<[26]>;
116  def X27 : RISCVReg<27,"x27", ["s11"]>, DwarfRegNum<[27]>;
117  def X28 : RISCVReg<28,"x28", ["t3"]>, DwarfRegNum<[28]>;
118  def X29 : RISCVReg<29,"x29", ["t4"]>, DwarfRegNum<[29]>;
119  def X30 : RISCVReg<30,"x30", ["t5"]>, DwarfRegNum<[30]>;
120  def X31 : RISCVReg<31,"x31", ["t6"]>, DwarfRegNum<[31]>;
121  }
122}
123
124def XLenVT : ValueTypeByHwMode<[RV32, RV64],
125                               [i32,  i64]>;
126// Allow f64 in GPR for ZDINX on RV64.
127def XLenFVT : ValueTypeByHwMode<[RV64],
128                                [f64]>;
129def XLenPairFVT : ValueTypeByHwMode<[RV32],
130                                    [f64]>;
131def XLenRI : RegInfoByHwMode<
132      [RV32,              RV64],
133      [RegInfo<32,32,32>, RegInfo<64,64,64>]>;
134
135class RISCVRegisterClass<list<ValueType> regTypes, int align, dag regList>
136    : RegisterClass<"RISCV", regTypes, align, regList> {
137  bit IsVRegClass = 0;
138  int VLMul = 1;
139  int NF = 1;
140
141  let Size = !if(IsVRegClass, !mul(VLMul, NF, 64), 0);
142
143  let TSFlags{0} = IsVRegClass;
144  let TSFlags{3-1} = !logtwo(VLMul);
145  let TSFlags{6-4} = !sub(NF, 1);
146}
147
148class GPRRegisterClass<dag regList>
149    : RISCVRegisterClass<[XLenVT, XLenFVT, i32], 32, regList> {
150  let RegInfos = XLenRI;
151}
152
153// The order of registers represents the preferred allocation sequence.
154// Registers are listed in the order caller-save, callee-save, specials.
155def GPR : GPRRegisterClass<(add (sequence "X%u", 10, 17),
156                                (sequence "X%u", 5, 7),
157                                (sequence "X%u", 28, 31),
158                                (sequence "X%u", 8, 9),
159                                (sequence "X%u", 18, 27),
160                                (sequence "X%u", 0, 4))>;
161
162def GPRX0 : GPRRegisterClass<(add X0)>;
163def GPRX1 : GPRRegisterClass<(add X1)>;
164def GPRX5 : GPRRegisterClass<(add X5)>;
165
166def GPRNoX0 : GPRRegisterClass<(sub GPR, X0)>;
167
168def GPRNoX0X2 : GPRRegisterClass<(sub GPR, X0, X2)>;
169
170def GPRX7 : GPRRegisterClass<(add X7)>;
171
172// Don't use X1 or X5 for JALR since that is a hint to pop the return address
173// stack on some microarchitectures. Also remove the reserved registers X0, X2,
174// X3, and X4 as it reduces the number of register classes that get synthesized
175// by tablegen.
176def GPRJALR : GPRRegisterClass<(sub GPR, (sequence "X%u", 0, 5))>;
177
178def GPRJALRNonX7 : GPRRegisterClass<(sub GPRJALR, X7)>;
179
180def GPRC : GPRRegisterClass<(add (sequence "X%u", 10, 15),
181                                 (sequence "X%u", 8, 9))>;
182
183// For indirect tail calls, we can't use callee-saved registers, as they are
184// restored to the saved value before the tail call, which would clobber a call
185// address. We shouldn't use x5 since that is a hint for to pop the return
186// address stack on some microarchitectures.
187def GPRTC : GPRRegisterClass<(add (sequence "X%u", 6, 7),
188                                  (sequence "X%u", 10, 17),
189                                  (sequence "X%u", 28, 31))>;
190def GPRTCNonX7 : GPRRegisterClass<(sub GPRTC, X7)>;
191
192def SP : GPRRegisterClass<(add X2)>;
193
194// Saved Registers from s0 to s7, for C.MVA01S07 instruction in Zcmp extension
195def SR07 : GPRRegisterClass<(add (sequence "X%u", 8, 9),
196                                 (sequence "X%u", 18, 23))>;
197
198def GPRX1X5 :  GPRRegisterClass<(add X1, X5)>;
199
200// Floating point registers
201let RegAltNameIndices = [ABIRegAltName] in {
202  def F0_H  : RISCVReg16<0, "f0", ["ft0"]>, DwarfRegNum<[32]>;
203  def F1_H  : RISCVReg16<1, "f1", ["ft1"]>, DwarfRegNum<[33]>;
204  def F2_H  : RISCVReg16<2, "f2", ["ft2"]>, DwarfRegNum<[34]>;
205  def F3_H  : RISCVReg16<3, "f3", ["ft3"]>, DwarfRegNum<[35]>;
206  def F4_H  : RISCVReg16<4, "f4", ["ft4"]>, DwarfRegNum<[36]>;
207  def F5_H  : RISCVReg16<5, "f5", ["ft5"]>, DwarfRegNum<[37]>;
208  def F6_H  : RISCVReg16<6, "f6", ["ft6"]>, DwarfRegNum<[38]>;
209  def F7_H  : RISCVReg16<7, "f7", ["ft7"]>, DwarfRegNum<[39]>;
210  def F8_H  : RISCVReg16<8, "f8", ["fs0"]>, DwarfRegNum<[40]>;
211  def F9_H  : RISCVReg16<9, "f9", ["fs1"]>, DwarfRegNum<[41]>;
212  def F10_H : RISCVReg16<10,"f10", ["fa0"]>, DwarfRegNum<[42]>;
213  def F11_H : RISCVReg16<11,"f11", ["fa1"]>, DwarfRegNum<[43]>;
214  def F12_H : RISCVReg16<12,"f12", ["fa2"]>, DwarfRegNum<[44]>;
215  def F13_H : RISCVReg16<13,"f13", ["fa3"]>, DwarfRegNum<[45]>;
216  def F14_H : RISCVReg16<14,"f14", ["fa4"]>, DwarfRegNum<[46]>;
217  def F15_H : RISCVReg16<15,"f15", ["fa5"]>, DwarfRegNum<[47]>;
218  def F16_H : RISCVReg16<16,"f16", ["fa6"]>, DwarfRegNum<[48]>;
219  def F17_H : RISCVReg16<17,"f17", ["fa7"]>, DwarfRegNum<[49]>;
220  def F18_H : RISCVReg16<18,"f18", ["fs2"]>, DwarfRegNum<[50]>;
221  def F19_H : RISCVReg16<19,"f19", ["fs3"]>, DwarfRegNum<[51]>;
222  def F20_H : RISCVReg16<20,"f20", ["fs4"]>, DwarfRegNum<[52]>;
223  def F21_H : RISCVReg16<21,"f21", ["fs5"]>, DwarfRegNum<[53]>;
224  def F22_H : RISCVReg16<22,"f22", ["fs6"]>, DwarfRegNum<[54]>;
225  def F23_H : RISCVReg16<23,"f23", ["fs7"]>, DwarfRegNum<[55]>;
226  def F24_H : RISCVReg16<24,"f24", ["fs8"]>, DwarfRegNum<[56]>;
227  def F25_H : RISCVReg16<25,"f25", ["fs9"]>, DwarfRegNum<[57]>;
228  def F26_H : RISCVReg16<26,"f26", ["fs10"]>, DwarfRegNum<[58]>;
229  def F27_H : RISCVReg16<27,"f27", ["fs11"]>, DwarfRegNum<[59]>;
230  def F28_H : RISCVReg16<28,"f28", ["ft8"]>, DwarfRegNum<[60]>;
231  def F29_H : RISCVReg16<29,"f29", ["ft9"]>, DwarfRegNum<[61]>;
232  def F30_H : RISCVReg16<30,"f30", ["ft10"]>, DwarfRegNum<[62]>;
233  def F31_H : RISCVReg16<31,"f31", ["ft11"]>, DwarfRegNum<[63]>;
234
235  foreach Index = 0-31 in {
236    def F#Index#_F : RISCVReg32<!cast<RISCVReg16>("F"#Index#"_H")>,
237      DwarfRegNum<[!add(Index, 32)]>;
238  }
239
240  foreach Index = 0-31 in {
241    def F#Index#_D : RISCVReg64<!cast<RISCVReg32>("F"#Index#"_F")>,
242      DwarfRegNum<[!add(Index, 32)]>;
243  }
244}
245
246// The order of registers represents the preferred allocation sequence,
247// meaning caller-save regs are listed before callee-save.
248// We start by allocating argument registers in reverse order since they are
249// compressible.
250def FPR16 : RISCVRegisterClass<[f16, bf16], 16, (add
251    (sequence "F%u_H", 15, 10), // fa5-fa0
252    (sequence "F%u_H", 0, 7),   // ft0-f7
253    (sequence "F%u_H", 16, 17), // fa6-fa7
254    (sequence "F%u_H", 28, 31), // ft8-ft11
255    (sequence "F%u_H", 8, 9),   // fs0-fs1
256    (sequence "F%u_H", 18, 27)  // fs2-fs11
257)>;
258
259def FPR32 : RISCVRegisterClass<[f32], 32, (add
260    (sequence "F%u_F", 15, 10),
261    (sequence "F%u_F", 0, 7),
262    (sequence "F%u_F", 16, 17),
263    (sequence "F%u_F", 28, 31),
264    (sequence "F%u_F", 8, 9),
265    (sequence "F%u_F", 18, 27)
266)>;
267
268def FPR32C : RISCVRegisterClass<[f32], 32, (add
269  (sequence "F%u_F", 15, 10),
270  (sequence "F%u_F", 8, 9)
271)>;
272
273// The order of registers represents the preferred allocation sequence,
274// meaning caller-save regs are listed before callee-save.
275def FPR64 : RISCVRegisterClass<[f64], 64, (add
276    (sequence "F%u_D", 15, 10),
277    (sequence "F%u_D", 0, 7),
278    (sequence "F%u_D", 16, 17),
279    (sequence "F%u_D", 28, 31),
280    (sequence "F%u_D", 8, 9),
281    (sequence "F%u_D", 18, 27)
282)>;
283
284def FPR64C : RISCVRegisterClass<[f64], 64, (add
285  (sequence "F%u_D", 15, 10),
286  (sequence "F%u_D", 8, 9)
287)>;
288
289// Vector type mapping to LLVM types.
290//
291// The V vector extension requires that VLEN >= 128 and <= 65536.
292// Additionally, the only supported ELEN values are 32 and 64,
293// thus `vscale` can be defined as VLEN/64,
294// allowing the same types with either ELEN value.
295//
296//         MF8    MF4     MF2     M1      M2      M4       M8
297// i64*    N/A    N/A     N/A     nxv1i64 nxv2i64 nxv4i64  nxv8i64
298// i32     N/A    N/A     nxv1i32 nxv2i32 nxv4i32 nxv8i32  nxv16i32
299// i16     N/A    nxv1i16 nxv2i16 nxv4i16 nxv8i16 nxv16i16 nxv32i16
300// i8      nxv1i8 nxv2i8  nxv4i8  nxv8i8  nxv16i8 nxv32i8  nxv64i8
301// double* N/A    N/A     N/A     nxv1f64 nxv2f64 nxv4f64  nxv8f64
302// float   N/A    N/A     nxv1f32 nxv2f32 nxv4f32 nxv8f32  nxv16f32
303// half    N/A    nxv1f16 nxv2f16 nxv4f16 nxv8f16 nxv16f16 nxv32f16
304// * ELEN=64
305
306defvar vint8mf8_t = nxv1i8;
307defvar vint8mf4_t = nxv2i8;
308defvar vint8mf2_t = nxv4i8;
309defvar vint8m1_t = nxv8i8;
310defvar vint8m2_t = nxv16i8;
311defvar vint8m4_t = nxv32i8;
312defvar vint8m8_t = nxv64i8;
313
314defvar vint16mf4_t = nxv1i16;
315defvar vint16mf2_t = nxv2i16;
316defvar vint16m1_t  = nxv4i16;
317defvar vint16m2_t  = nxv8i16;
318defvar vint16m4_t  = nxv16i16;
319defvar vint16m8_t  = nxv32i16;
320
321defvar vint32mf2_t = nxv1i32;
322defvar vint32m1_t  = nxv2i32;
323defvar vint32m2_t  = nxv4i32;
324defvar vint32m4_t  = nxv8i32;
325defvar vint32m8_t  = nxv16i32;
326
327defvar vint64m1_t = nxv1i64;
328defvar vint64m2_t = nxv2i64;
329defvar vint64m4_t = nxv4i64;
330defvar vint64m8_t = nxv8i64;
331
332defvar vfloat16mf4_t = nxv1f16;
333defvar vfloat16mf2_t = nxv2f16;
334defvar vfloat16m1_t  = nxv4f16;
335defvar vfloat16m2_t  = nxv8f16;
336defvar vfloat16m4_t  = nxv16f16;
337defvar vfloat16m8_t  = nxv32f16;
338
339defvar vbfloat16mf4_t = nxv1bf16;
340defvar vbfloat16mf2_t = nxv2bf16;
341defvar vbfloat16m1_t  = nxv4bf16;
342defvar vbfloat16m2_t  = nxv8bf16;
343defvar vbfloat16m4_t  = nxv16bf16;
344defvar vbfloat16m8_t  = nxv32bf16;
345
346defvar vfloat32mf2_t = nxv1f32;
347defvar vfloat32m1_t  = nxv2f32;
348defvar vfloat32m2_t  = nxv4f32;
349defvar vfloat32m4_t  = nxv8f32;
350defvar vfloat32m8_t  = nxv16f32;
351
352defvar vfloat64m1_t = nxv1f64;
353defvar vfloat64m2_t = nxv2f64;
354defvar vfloat64m4_t = nxv4f64;
355defvar vfloat64m8_t = nxv8f64;
356
357defvar vbool1_t  = nxv64i1;
358defvar vbool2_t  = nxv32i1;
359defvar vbool4_t  = nxv16i1;
360defvar vbool8_t  = nxv8i1;
361defvar vbool16_t = nxv4i1;
362defvar vbool32_t = nxv2i1;
363defvar vbool64_t = nxv1i1;
364
365// There is no need to define register classes for fractional LMUL.
366defvar LMULList = [1, 2, 4, 8];
367
368//===----------------------------------------------------------------------===//
369// Utility classes for segment load/store.
370//===----------------------------------------------------------------------===//
371// The set of legal NF for LMUL = lmul.
372// LMUL <= 1, NF = 2, 3, 4, 5, 6, 7, 8
373// LMUL == 2, NF = 2, 3, 4
374// LMUL == 4, NF = 2
375// LMUL == 8, no legal NF
376class NFList<int lmul> {
377  list<int> L = !cond(!eq(lmul, 8): [],
378                      !eq(lmul, 4): [2],
379                      !eq(lmul, 2): [2, 3, 4],
380                      true: [2, 3, 4, 5, 6, 7, 8]);
381}
382
383// Generate [start, end) SubRegIndex list.
384class SubRegSet<int nf, int lmul> {
385  list<SubRegIndex> L = !foldl([]<SubRegIndex>,
386                               !range(0, 8),
387                               AccList, i,
388                               !listconcat(AccList,
389                                 !if(!lt(i, nf),
390                                   [!cast<SubRegIndex>("sub_vrm" # lmul # "_" # i)],
391                                   [])));
392}
393
394// Collect the valid indexes into 'R' under NF and LMUL values from TUPLE_INDEX.
395// When NF = 2, the valid TUPLE_INDEX is 0 and 1.
396// For example, when LMUL = 4, the potential valid indexes is
397// [8, 12, 16, 20, 24, 28, 4]. However, not all these indexes are valid under
398// NF = 2. For example, 28 is not valid under LMUL = 4, NF = 2 and TUPLE_INDEX = 0.
399// The filter is
400//   (tuple_index + i) x lmul <= (tuple_index x lmul) + 32 - (nf x lmul)
401//
402// Use START = 0, LMUL = 4 and NF = 2 as the example,
403//   i x 4 <= 24
404// The class will return [8, 12, 16, 20, 24, 4].
405// Use START = 1, LMUL = 4 and NF = 2 as the example,
406//   (1 + i) x 4 <= 28
407// The class will return [12, 16, 20, 24, 28, 8].
408//
409class IndexSet<int tuple_index, int nf, int lmul, bit isV0 = false> {
410  list<int> R =
411    !foldl([]<int>,
412              !if(isV0, [0],
413                !cond(
414                  !eq(lmul, 1): !listconcat(!range(8, 32), !range(1, 8)),
415                  !eq(lmul, 2): !listconcat(!range(4, 16), !range(1, 4)),
416                  !eq(lmul, 4): !listconcat(!range(2, 8), !range(1, 2)))),
417              L, i,
418              !listconcat(L,
419                          !if(!le(!mul(!add(i, tuple_index), lmul),
420                                  !sub(!add(32, !mul(tuple_index, lmul)), !mul(nf, lmul))),
421                              [!mul(!add(i, tuple_index), lmul)], [])));
422}
423
424// This class returns a list of vector register collections.
425// For example, for NF = 2 and LMUL = 4,
426// it will return
427//   ([ V8M4, V12M4, V16M4, V20M4, V24M4, V4M4],
428//    [V12M4, V16M4, V20M4, V24M4, V28M4, V8M4])
429//
430class VRegList<list<dag> LIn, int start, int nf, int lmul, bit isV0> {
431  list<dag> L =
432    !if(!ge(start, nf),
433        LIn,
434        !listconcat(
435          [!dag(add,
436                !foreach(i, IndexSet<start, nf, lmul, isV0>.R,
437                  !cast<Register>("V" # i # !cond(!eq(lmul, 2): "M2",
438                                                  !eq(lmul, 4): "M4",
439                                                  true: ""))),
440                !listsplat("",
441                  !size(IndexSet<start, nf, lmul, isV0>.R)))],
442          VRegList<LIn, !add(start, 1), nf, lmul, isV0>.L));
443}
444
445// Vector registers
446foreach Index = !range(0, 32, 1) in {
447  def V#Index : RISCVReg<Index, "v"#Index>, DwarfRegNum<[!add(Index, 96)]>;
448}
449
450foreach Index = !range(0, 32, 2) in {
451  def V#Index#M2 : RISCVRegWithSubRegs<Index, "v"#Index,
452                     [!cast<Register>("V"#Index),
453                      !cast<Register>("V"#!add(Index, 1))]>,
454                   DwarfRegAlias<!cast<Register>("V"#Index)> {
455    let SubRegIndices = [sub_vrm1_0, sub_vrm1_1];
456  }
457}
458
459foreach Index = !range(0, 32, 4) in {
460  def V#Index#M4 : RISCVRegWithSubRegs<Index, "v"#Index,
461                     [!cast<Register>("V"#Index#"M2"),
462                      !cast<Register>("V"#!add(Index, 2)#"M2")]>,
463                   DwarfRegAlias<!cast<Register>("V"#Index)> {
464    let SubRegIndices = [sub_vrm2_0, sub_vrm2_1];
465  }
466}
467
468foreach Index = !range(0, 32, 8) in {
469  def V#Index#M8 : RISCVRegWithSubRegs<Index, "v"#Index,
470                     [!cast<Register>("V"#Index#"M4"),
471                      !cast<Register>("V"#!add(Index, 4)#"M4")]>,
472                   DwarfRegAlias<!cast<Register>("V"#Index)> {
473    let SubRegIndices = [sub_vrm4_0, sub_vrm4_1];
474  }
475}
476
477def VTYPE  : RISCVReg<0, "vtype">;
478def VL     : RISCVReg<0, "vl">;
479def VXSAT  : RISCVReg<0, "vxsat">;
480def VXRM   : RISCVReg<0, "vxrm">;
481let isConstant = true in
482def VLENB  : RISCVReg<0, "vlenb">,
483             DwarfRegNum<[!add(4096, SysRegVLENB.Encoding)]>;
484
485def VCSR : RISCVRegisterClass<[XLenVT], 32,
486                              (add VTYPE, VL, VLENB)> {
487  let RegInfos = XLenRI;
488  let isAllocatable = 0;
489}
490
491
492foreach m = [1, 2, 4] in {
493  foreach n = NFList<m>.L in {
494    def "VN" # n # "M" # m # "NoV0": RegisterTuples<
495                                       SubRegSet<n, m>.L,
496                                       VRegList<[], 0, n, m, false>.L>;
497    def "VN" # n # "M" # m # "V0" : RegisterTuples<
498                                       SubRegSet<n, m>.L,
499                                       VRegList<[], 0, n, m, true>.L>;
500  }
501}
502
503class VReg<list<ValueType> regTypes, dag regList, int Vlmul>
504    : RISCVRegisterClass<regTypes,
505                         64, // The maximum supported ELEN is 64.
506                         regList> {
507  let IsVRegClass = 1;
508  let VLMul = Vlmul;
509}
510
511defvar VMaskVTs = [vbool1_t, vbool2_t, vbool4_t, vbool8_t, vbool16_t,
512                   vbool32_t, vbool64_t];
513
514defvar VM1VTs = [vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t,
515                 vbfloat16m1_t, vfloat16m1_t, vfloat32m1_t,
516                 vfloat64m1_t, vint8mf2_t, vint8mf4_t, vint8mf8_t,
517                 vint16mf2_t, vint16mf4_t, vint32mf2_t,
518                 vfloat16mf4_t, vfloat16mf2_t, vbfloat16mf4_t,
519                 vbfloat16mf2_t, vfloat32mf2_t];
520
521defvar VM2VTs = [vint8m2_t, vint16m2_t, vint32m2_t, vint64m2_t,
522                 vfloat16m2_t, vbfloat16m2_t,
523                 vfloat32m2_t, vfloat64m2_t];
524
525defvar VM4VTs = [vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t,
526                 vfloat16m4_t, vbfloat16m4_t,
527                 vfloat32m4_t, vfloat64m4_t];
528
529defvar VM8VTs = [vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t,
530                 vfloat16m8_t, vbfloat16m8_t,
531                 vfloat32m8_t, vfloat64m8_t];
532
533// We reverse the order of last 8 registers so that we don't needlessly prevent
534// allocation of higher lmul register groups while still putting v0 last in the
535// allocation order.
536
537def VR : VReg<!listconcat(VM1VTs, VMaskVTs),
538              (add (sequence "V%u", 8, 31),
539                   (sequence "V%u", 7, 0)), 1>;
540
541def VRNoV0 : VReg<!listconcat(VM1VTs, VMaskVTs), (sub VR, V0), 1>;
542
543def VRM2 : VReg<VM2VTs, (add (sequence "V%uM2", 8, 31, 2),
544                             (sequence "V%uM2", 6, 0, 2)), 2>;
545
546def VRM2NoV0 : VReg<VM2VTs, (sub VRM2, V0M2), 2>;
547
548def VRM4 : VReg<VM4VTs, (add V8M4, V12M4, V16M4, V20M4,
549                             V24M4, V28M4, V4M4, V0M4), 4>;
550
551def VRM4NoV0 : VReg<VM4VTs, (sub VRM4, V0M4), 4>;
552
553def VRM8 : VReg<VM8VTs, (add V8M8, V16M8, V24M8, V0M8), 8>;
554
555def VRM8NoV0 : VReg<VM8VTs, (sub VRM8, V0M8), 8>;
556
557def VMV0 : VReg<VMaskVTs, (add V0), 1>;
558
559let RegInfos = XLenRI in {
560def GPRF16  : RISCVRegisterClass<[f16], 16, (add GPR)>;
561def GPRF32  : RISCVRegisterClass<[f32], 32, (add GPR)>;
562} // RegInfos = XLenRI
563
564// Dummy zero register for use in the register pair containing X0 (as X1 is
565// not read to or written when the X0 register pair is used).
566def DUMMY_REG_PAIR_WITH_X0 : RISCVReg<0, "0">;
567
568// Must add DUMMY_REG_PAIR_WITH_X0 to a separate register class to prevent the
569// register's existence from changing codegen (due to the regPressureSetLimit
570// for the GPR register class being altered).
571def GPRAll : GPRRegisterClass<(add GPR, DUMMY_REG_PAIR_WITH_X0)>;
572
573let RegAltNameIndices = [ABIRegAltName] in {
574  def X0_Pair : RISCVRegWithSubRegs<0, X0.AsmName,
575                                    [X0, DUMMY_REG_PAIR_WITH_X0],
576                                    X0.AltNames> {
577    let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
578    let CoveredBySubRegs = 1;
579  }
580  foreach I = 1-15 in {
581    defvar Index = !shl(I, 1);
582    defvar IndexP1 = !add(Index, 1);
583    defvar Reg = !cast<Register>("X"#Index);
584    defvar RegP1 = !cast<Register>("X"#IndexP1);
585    def "X" # Index #"_X" # IndexP1 : RISCVRegWithSubRegs<Index,
586                                                          Reg.AsmName,
587                                                          [Reg, RegP1],
588                                                          Reg.AltNames> {
589      let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
590      let CoveredBySubRegs = 1;
591    }
592  }
593}
594
595let RegInfos = RegInfoByHwMode<[RV32, RV64],
596                               [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>,
597    DecoderMethod = "DecodeGPRPairRegisterClass" in
598def GPRPair : RISCVRegisterClass<[XLenPairFVT], 64, (add
599    X10_X11, X12_X13, X14_X15, X16_X17,
600    X6_X7,
601    X28_X29, X30_X31,
602    X8_X9,
603    X18_X19, X20_X21, X22_X23, X24_X25, X26_X27,
604    X0_Pair, X2_X3, X4_X5
605)>;
606
607// The register class is added for inline assembly for vector mask types.
608def VM : VReg<VMaskVTs, (add VR), 1>;
609
610foreach m = LMULList in {
611  foreach nf = NFList<m>.L in {
612    let NF = nf in {
613      def "VRN" # nf # "M" # m # "NoV0"
614        : VReg<[untyped],
615               (add !cast<RegisterTuples>("VN" # nf # "M" # m # "NoV0")),
616               m>;
617      def "VRN" # nf # "M" # m
618        : VReg<[untyped],
619               (add !cast<RegisterTuples>("VN" # nf # "M" # m # "NoV0"),
620                    !cast<RegisterTuples>("VN" # nf # "M" # m # "V0")),
621               m>;
622    }
623  }
624}
625
626// Special registers
627def FFLAGS : RISCVReg<0, "fflags">;
628def FRM    : RISCVReg<0, "frm">;
629
630// Shadow Stack register
631def SSP    : RISCVReg<0, "ssp">;
632
633// Dummy VCIX state register
634def VCIX_STATE : RISCVReg<0, "vcix_state">;
635