xref: /llvm-project/llvm/lib/Target/RISCV/RISCVRegisterInfo.td (revision 408659c5b5c7d745042ae71db344d1ed10601512)
1//===-- RISCVRegisterInfo.td - RISC-V Register defs --------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9//===----------------------------------------------------------------------===//
10//  Declarations that describe the RISC-V register files
11//===----------------------------------------------------------------------===//
12
13let Namespace = "RISCV" in {
14class RISCVReg<bits<5> Enc, string n, list<string> alt = []> : Register<n> {
15  let HWEncoding{4-0} = Enc;
16  let AltNames = alt;
17}
18
19class RISCVRegWithSubRegs<bits<5> Enc, string n, list<Register> subregs,
20                          list<string> alt = []>
21      : RegisterWithSubRegs<n, subregs> {
22  let HWEncoding{4-0} = Enc;
23  let AltNames = alt;
24}
25
26class RISCVReg16<bits<5> Enc, string n, list<string> alt = []> : Register<n> {
27  let HWEncoding{4-0} = Enc;
28  let AltNames = alt;
29}
30
31def sub_16 : SubRegIndex<16>;
32class RISCVReg32<RISCVReg16 subreg>
33  : RISCVRegWithSubRegs<subreg.HWEncoding{4-0}, subreg.AsmName, [subreg],
34                        subreg.AltNames> {
35  let SubRegIndices = [sub_16];
36}
37
38// Because RISCVReg64 register have AsmName and AltNames that alias with their
39// 16/32-bit sub-register, RISCVAsmParser will need to coerce a register number
40// from a RISCVReg16/RISCVReg32 to the equivalent RISCVReg64 when appropriate.
41def sub_32 : SubRegIndex<32>;
42class RISCVReg64<RISCVReg32 subreg>
43  : RISCVRegWithSubRegs<subreg.HWEncoding{4-0}, subreg.AsmName, [subreg],
44                        subreg.AltNames> {
45  let SubRegIndices = [sub_32];
46}
47
48let FallbackRegAltNameIndex = NoRegAltName in
49def ABIRegAltName : RegAltNameIndex;
50
51def sub_vrm4_0 : SubRegIndex<256>;
52def sub_vrm4_1 : SubRegIndex<256, 256>;
53def sub_vrm2_0 : SubRegIndex<128>;
54def sub_vrm2_1 : SubRegIndex<128, 128>;
55def sub_vrm2_2 : ComposedSubRegIndex<sub_vrm4_1, sub_vrm2_0>;
56def sub_vrm2_3 : ComposedSubRegIndex<sub_vrm4_1, sub_vrm2_1>;
57def sub_vrm1_0 : SubRegIndex<64>;
58def sub_vrm1_1 : SubRegIndex<64, 64>;
59def sub_vrm1_2 : ComposedSubRegIndex<sub_vrm2_1, sub_vrm1_0>;
60def sub_vrm1_3 : ComposedSubRegIndex<sub_vrm2_1, sub_vrm1_1>;
61def sub_vrm1_4 : ComposedSubRegIndex<sub_vrm2_2, sub_vrm1_0>;
62def sub_vrm1_5 : ComposedSubRegIndex<sub_vrm2_2, sub_vrm1_1>;
63def sub_vrm1_6 : ComposedSubRegIndex<sub_vrm2_3, sub_vrm1_0>;
64def sub_vrm1_7 : ComposedSubRegIndex<sub_vrm2_3, sub_vrm1_1>;
65
66// GPR sizes change with HwMode.
67def sub_gpr_even : SubRegIndex<32> {
68  let SubRegRanges = SubRegRangeByHwMode<[RV32, RV64],
69                                         [SubRegRange<32>, SubRegRange<64>]>;
70}
71def sub_gpr_odd  : SubRegIndex<32, 32> {
72  let SubRegRanges = SubRegRangeByHwMode<[RV32, RV64],
73                                         [SubRegRange<32, 32>, SubRegRange<64, 64>]>;
74}
75} // Namespace = "RISCV"
76
77//===----------------------------------------------------------------------===//
78// General Purpose Registers (aka Integer Registers)
79//===----------------------------------------------------------------------===//
80
81// CostPerUse is set higher for registers that may not be compressible as they
82// are not part of GPRC, the most restrictive register class used by the
83// compressed instruction set. This will influence the greedy register
84// allocator to reduce the use of registers that can't be encoded in 16 bit
85// instructions.
86
87let RegAltNameIndices = [ABIRegAltName] in {
88  // 16-bit sub-registers for use by Zhinx. Having a 16-bit sub-register reduces
89  // the spill size for these operations.
90  let isConstant = true in
91  def X0_H  : RISCVReg<0, "x0", ["zero"]>;
92  let CostPerUse = [0, 1] in {
93  def X1_H  : RISCVReg<1, "x1", ["ra"]>;
94  def X2_H  : RISCVReg<2, "x2", ["sp"]>;
95  def X3_H  : RISCVReg<3, "x3", ["gp"]>;
96  def X4_H  : RISCVReg<4, "x4", ["tp"]>;
97  def X5_H  : RISCVReg<5, "x5", ["t0"]>;
98  def X6_H  : RISCVReg<6, "x6", ["t1"]>;
99  def X7_H  : RISCVReg<7, "x7", ["t2"]>;
100  }
101  def X8_H  : RISCVReg<8, "x8", ["s0", "fp"]>;
102  def X9_H  : RISCVReg<9, "x9", ["s1"]>;
103  def X10_H : RISCVReg<10,"x10", ["a0"]>;
104  def X11_H : RISCVReg<11,"x11", ["a1"]>;
105  def X12_H : RISCVReg<12,"x12", ["a2"]>;
106  def X13_H : RISCVReg<13,"x13", ["a3"]>;
107  def X14_H : RISCVReg<14,"x14", ["a4"]>;
108  def X15_H : RISCVReg<15,"x15", ["a5"]>;
109  let CostPerUse = [0, 1] in {
110  def X16_H : RISCVReg<16,"x16", ["a6"]>;
111  def X17_H : RISCVReg<17,"x17", ["a7"]>;
112  def X18_H : RISCVReg<18,"x18", ["s2"]>;
113  def X19_H : RISCVReg<19,"x19", ["s3"]>;
114  def X20_H : RISCVReg<20,"x20", ["s4"]>;
115  def X21_H : RISCVReg<21,"x21", ["s5"]>;
116  def X22_H : RISCVReg<22,"x22", ["s6"]>;
117  def X23_H : RISCVReg<23,"x23", ["s7"]>;
118  def X24_H : RISCVReg<24,"x24", ["s8"]>;
119  def X25_H : RISCVReg<25,"x25", ["s9"]>;
120  def X26_H : RISCVReg<26,"x26", ["s10"]>;
121  def X27_H : RISCVReg<27,"x27", ["s11"]>;
122  def X28_H : RISCVReg<28,"x28", ["t3"]>;
123  def X29_H : RISCVReg<29,"x29", ["t4"]>;
124  def X30_H : RISCVReg<30,"x30", ["t5"]>;
125  def X31_H : RISCVReg<31,"x31", ["t6"]>;
126  }
127
128  let SubRegIndices = [sub_16] in {
129  let isConstant = true in
130  def X0_W  : RISCVRegWithSubRegs<0, "x0", [X0_H], ["zero"]>;
131  let CostPerUse = [0, 1] in {
132  def X1_W  : RISCVRegWithSubRegs<1, "x1", [X1_H], ["ra"]>;
133  def X2_W  : RISCVRegWithSubRegs<2, "x2", [X2_H], ["sp"]>;
134  def X3_W  : RISCVRegWithSubRegs<3, "x3", [X3_H], ["gp"]>;
135  def X4_W  : RISCVRegWithSubRegs<4, "x4", [X4_H], ["tp"]>;
136  def X5_W  : RISCVRegWithSubRegs<5, "x5", [X5_H], ["t0"]>;
137  def X6_W  : RISCVRegWithSubRegs<6, "x6", [X6_H], ["t1"]>;
138  def X7_W  : RISCVRegWithSubRegs<7, "x7", [X7_H], ["t2"]>;
139  }
140  def X8_W  : RISCVRegWithSubRegs<8, "x8", [X8_H], ["s0", "fp"]>;
141  def X9_W  : RISCVRegWithSubRegs<9, "x9", [X9_H], ["s1"]>;
142  def X10_W : RISCVRegWithSubRegs<10,"x10", [X10_H], ["a0"]>;
143  def X11_W : RISCVRegWithSubRegs<11,"x11", [X11_H], ["a1"]>;
144  def X12_W : RISCVRegWithSubRegs<12,"x12", [X12_H], ["a2"]>;
145  def X13_W : RISCVRegWithSubRegs<13,"x13", [X13_H], ["a3"]>;
146  def X14_W : RISCVRegWithSubRegs<14,"x14", [X14_H], ["a4"]>;
147  def X15_W : RISCVRegWithSubRegs<15,"x15", [X15_H], ["a5"]>;
148  let CostPerUse = [0, 1] in {
149  def X16_W : RISCVRegWithSubRegs<16,"x16", [X16_H], ["a6"]>;
150  def X17_W : RISCVRegWithSubRegs<17,"x17", [X17_H], ["a7"]>;
151  def X18_W : RISCVRegWithSubRegs<18,"x18", [X18_H], ["s2"]>;
152  def X19_W : RISCVRegWithSubRegs<19,"x19", [X19_H], ["s3"]>;
153  def X20_W : RISCVRegWithSubRegs<20,"x20", [X20_H], ["s4"]>;
154  def X21_W : RISCVRegWithSubRegs<21,"x21", [X21_H], ["s5"]>;
155  def X22_W : RISCVRegWithSubRegs<22,"x22", [X22_H], ["s6"]>;
156  def X23_W : RISCVRegWithSubRegs<23,"x23", [X23_H], ["s7"]>;
157  def X24_W : RISCVRegWithSubRegs<24,"x24", [X24_H], ["s8"]>;
158  def X25_W : RISCVRegWithSubRegs<25,"x25", [X25_H], ["s9"]>;
159  def X26_W : RISCVRegWithSubRegs<26,"x26", [X26_H], ["s10"]>;
160  def X27_W : RISCVRegWithSubRegs<27,"x27", [X27_H], ["s11"]>;
161  def X28_W : RISCVRegWithSubRegs<28,"x28", [X28_H], ["t3"]>;
162  def X29_W : RISCVRegWithSubRegs<29,"x29", [X29_H], ["t4"]>;
163  def X30_W : RISCVRegWithSubRegs<30,"x30", [X30_H], ["t5"]>;
164  def X31_W : RISCVRegWithSubRegs<31,"x31", [X31_H], ["t6"]>;
165  }
166  }
167
168  let SubRegIndices = [sub_32] in {
169  let isConstant = true in
170  def X0  : RISCVRegWithSubRegs<0, "x0", [X0_W], ["zero"]>, DwarfRegNum<[0]>;
171  let CostPerUse = [0, 1] in {
172  def X1  : RISCVRegWithSubRegs<1, "x1", [X1_W], ["ra"]>, DwarfRegNum<[1]>;
173  def X2  : RISCVRegWithSubRegs<2, "x2", [X2_W], ["sp"]>, DwarfRegNum<[2]>;
174  def X3  : RISCVRegWithSubRegs<3, "x3", [X3_W], ["gp"]>, DwarfRegNum<[3]>;
175  def X4  : RISCVRegWithSubRegs<4, "x4", [X4_W], ["tp"]>, DwarfRegNum<[4]>;
176  def X5  : RISCVRegWithSubRegs<5, "x5", [X5_W], ["t0"]>, DwarfRegNum<[5]>;
177  def X6  : RISCVRegWithSubRegs<6, "x6", [X6_W], ["t1"]>, DwarfRegNum<[6]>;
178  def X7  : RISCVRegWithSubRegs<7, "x7", [X7_W], ["t2"]>, DwarfRegNum<[7]>;
179  }
180  def X8  : RISCVRegWithSubRegs<8, "x8", [X8_W], ["s0", "fp"]>, DwarfRegNum<[8]>;
181  def X9  : RISCVRegWithSubRegs<9, "x9", [X9_W], ["s1"]>, DwarfRegNum<[9]>;
182  def X10 : RISCVRegWithSubRegs<10,"x10", [X10_W], ["a0"]>, DwarfRegNum<[10]>;
183  def X11 : RISCVRegWithSubRegs<11,"x11", [X11_W], ["a1"]>, DwarfRegNum<[11]>;
184  def X12 : RISCVRegWithSubRegs<12,"x12", [X12_W], ["a2"]>, DwarfRegNum<[12]>;
185  def X13 : RISCVRegWithSubRegs<13,"x13", [X13_W], ["a3"]>, DwarfRegNum<[13]>;
186  def X14 : RISCVRegWithSubRegs<14,"x14", [X14_W], ["a4"]>, DwarfRegNum<[14]>;
187  def X15 : RISCVRegWithSubRegs<15,"x15", [X15_W], ["a5"]>, DwarfRegNum<[15]>;
188  let CostPerUse = [0, 1] in {
189  def X16 : RISCVRegWithSubRegs<16,"x16", [X16_W], ["a6"]>, DwarfRegNum<[16]>;
190  def X17 : RISCVRegWithSubRegs<17,"x17", [X17_W], ["a7"]>, DwarfRegNum<[17]>;
191  def X18 : RISCVRegWithSubRegs<18,"x18", [X18_W], ["s2"]>, DwarfRegNum<[18]>;
192  def X19 : RISCVRegWithSubRegs<19,"x19", [X19_W], ["s3"]>, DwarfRegNum<[19]>;
193  def X20 : RISCVRegWithSubRegs<20,"x20", [X20_W], ["s4"]>, DwarfRegNum<[20]>;
194  def X21 : RISCVRegWithSubRegs<21,"x21", [X21_W], ["s5"]>, DwarfRegNum<[21]>;
195  def X22 : RISCVRegWithSubRegs<22,"x22", [X22_W], ["s6"]>, DwarfRegNum<[22]>;
196  def X23 : RISCVRegWithSubRegs<23,"x23", [X23_W], ["s7"]>, DwarfRegNum<[23]>;
197  def X24 : RISCVRegWithSubRegs<24,"x24", [X24_W], ["s8"]>, DwarfRegNum<[24]>;
198  def X25 : RISCVRegWithSubRegs<25,"x25", [X25_W], ["s9"]>, DwarfRegNum<[25]>;
199  def X26 : RISCVRegWithSubRegs<26,"x26", [X26_W], ["s10"]>, DwarfRegNum<[26]>;
200  def X27 : RISCVRegWithSubRegs<27,"x27", [X27_W], ["s11"]>, DwarfRegNum<[27]>;
201  def X28 : RISCVRegWithSubRegs<28,"x28", [X28_W], ["t3"]>, DwarfRegNum<[28]>;
202  def X29 : RISCVRegWithSubRegs<29,"x29", [X29_W], ["t4"]>, DwarfRegNum<[29]>;
203  def X30 : RISCVRegWithSubRegs<30,"x30", [X30_W], ["t5"]>, DwarfRegNum<[30]>;
204  def X31 : RISCVRegWithSubRegs<31,"x31", [X31_W], ["t6"]>, DwarfRegNum<[31]>;
205  }
206  }
207}
208
209def XLenVT : ValueTypeByHwMode<[RV32, RV64],
210                               [i32,  i64]>;
211defvar XLenPairVT = untyped;
212
213// Allow f64 in GPR for ZDINX on RV64.
214def XLenFVT : ValueTypeByHwMode<[RV64],
215                                [f64]>;
216def XLenPairFVT : ValueTypeByHwMode<[RV32],
217                                    [f64]>;
218def XLenRI : RegInfoByHwMode<
219      [RV32,              RV64],
220      [RegInfo<32,32,32>, RegInfo<64,64,64>]>;
221
222class RISCVRegisterClass<list<ValueType> regTypes, int align, dag regList>
223    : RegisterClass<"RISCV", regTypes, align, regList> {
224  bit IsVRegClass = 0;
225  int VLMul = 1;
226  int NF = 1;
227
228  let Size = !if(IsVRegClass, !mul(VLMul, NF, 64), 0);
229
230  let TSFlags{0} = IsVRegClass;
231  let TSFlags{3-1} = !logtwo(VLMul);
232  let TSFlags{6-4} = !sub(NF, 1);
233}
234
235class GPRRegisterClass<dag regList>
236    : RISCVRegisterClass<[XLenVT, XLenFVT, i32, i16], 32, regList> {
237  let RegInfos = XLenRI;
238}
239
240// The order of registers represents the preferred allocation sequence.
241// Registers are listed in the order caller-save, callee-save, specials.
242def GPR : GPRRegisterClass<(add (sequence "X%u", 10, 17),
243                                (sequence "X%u", 5, 7),
244                                (sequence "X%u", 28, 31),
245                                (sequence "X%u", 8, 9),
246                                (sequence "X%u", 18, 27),
247                                (sequence "X%u", 0, 4))>;
248
249def GPRX0 : GPRRegisterClass<(add X0)>;
250def GPRX1 : GPRRegisterClass<(add X1)>;
251def GPRX5 : GPRRegisterClass<(add X5)>;
252
253def GPRNoX0 : GPRRegisterClass<(sub GPR, X0)>;
254
255def GPRNoX0X2 : GPRRegisterClass<(sub GPR, X0, X2)>;
256
257def GPRX7 : GPRRegisterClass<(add X7)>;
258
259// Don't use X1 or X5 for JALR since that is a hint to pop the return address
260// stack on some microarchitectures. Also remove the reserved registers X0, X2,
261// X3, and X4 as it reduces the number of register classes that get synthesized
262// by tablegen.
263def GPRJALR : GPRRegisterClass<(sub GPR, (sequence "X%u", 0, 5))>;
264
265def GPRJALRNonX7 : GPRRegisterClass<(sub GPRJALR, X7)>;
266
267def GPRC : GPRRegisterClass<(add (sequence "X%u", 10, 15),
268                                 (sequence "X%u", 8, 9))>;
269
270// For indirect tail calls, we can't use callee-saved registers, as they are
271// restored to the saved value before the tail call, which would clobber a call
272// address. We shouldn't use x5 since that is a hint for to pop the return
273// address stack on some microarchitectures.
274def GPRTC : GPRRegisterClass<(add (sequence "X%u", 6, 7),
275                                  (sequence "X%u", 10, 17),
276                                  (sequence "X%u", 28, 31))>;
277def GPRTCNonX7 : GPRRegisterClass<(sub GPRTC, X7)>;
278
279def SP : GPRRegisterClass<(add X2)>;
280
281// Saved Registers from s0 to s7, for C.MVA01S07 instruction in Zcmp extension
282def SR07 : GPRRegisterClass<(add (sequence "X%u", 8, 9),
283                                 (sequence "X%u", 18, 23))>;
284
285def GPRX1X5 :  GPRRegisterClass<(add X1, X5)>;
286
287//===----------------------------------------------------------------------===//
288// Even-Odd GPR Pairs
289//===----------------------------------------------------------------------===//
290
291def XLenPairRI : RegInfoByHwMode<
292      [RV32,                RV64],
293      [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>;
294
295// Dummy zero register for use in the register pair containing X0 (as X1 is
296// not read to or written when the X0 register pair is used).
297def DUMMY_REG_PAIR_WITH_X0 : RISCVReg<0, "0">;
298
299// Must add DUMMY_REG_PAIR_WITH_X0 to a separate register class to prevent the
300// register's existence from changing codegen (due to the regPressureSetLimit
301// for the GPR register class being altered).
302def GPRAll : GPRRegisterClass<(add GPR, DUMMY_REG_PAIR_WITH_X0)>;
303
304let RegAltNameIndices = [ABIRegAltName] in {
305  def X0_Pair : RISCVRegWithSubRegs<0, X0.AsmName,
306                                    [X0, DUMMY_REG_PAIR_WITH_X0],
307                                    X0.AltNames> {
308    let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
309    let CoveredBySubRegs = 1;
310  }
311  foreach I = 1-15 in {
312    defvar Index = !shl(I, 1);
313    defvar IndexP1 = !add(Index, 1);
314    defvar Reg = !cast<Register>("X"#Index);
315    defvar RegP1 = !cast<Register>("X"#IndexP1);
316    def "X" # Index #"_X" # IndexP1 : RISCVRegWithSubRegs<Index,
317                                                          Reg.AsmName,
318                                                          [Reg, RegP1],
319                                                          Reg.AltNames> {
320      let SubRegIndices = [sub_gpr_even, sub_gpr_odd];
321      let CoveredBySubRegs = 1;
322    }
323  }
324}
325
326let RegInfos = XLenPairRI,
327    DecoderMethod = "DecodeGPRPairRegisterClass" in {
328def GPRPair : RISCVRegisterClass<[XLenPairVT, XLenPairFVT], 64, (add
329    X10_X11, X12_X13, X14_X15, X16_X17,
330    X6_X7,
331    X28_X29, X30_X31,
332    X8_X9,
333    X18_X19, X20_X21, X22_X23, X24_X25, X26_X27,
334    X0_Pair, X2_X3, X4_X5
335)>;
336
337def GPRPairNoX0 : RISCVRegisterClass<[XLenPairVT, XLenPairFVT], 64, (sub GPRPair, X0_Pair)>;
338} // let RegInfos = XLenPairRI, DecoderMethod = "DecodeGPRPairRegisterClass"
339
340let RegInfos = XLenPairRI in
341def GPRPairC : RISCVRegisterClass<[XLenPairVT, XLenPairFVT], 64, (add
342  X10_X11, X12_X13, X14_X15, X8_X9
343)>;
344
345//===----------------------------------------------------------------------===//
346// Floating Point registers
347//===----------------------------------------------------------------------===//
348
349let RegAltNameIndices = [ABIRegAltName] in {
350  def F0_H  : RISCVReg16<0, "f0", ["ft0"]>, DwarfRegNum<[32]>;
351  def F1_H  : RISCVReg16<1, "f1", ["ft1"]>, DwarfRegNum<[33]>;
352  def F2_H  : RISCVReg16<2, "f2", ["ft2"]>, DwarfRegNum<[34]>;
353  def F3_H  : RISCVReg16<3, "f3", ["ft3"]>, DwarfRegNum<[35]>;
354  def F4_H  : RISCVReg16<4, "f4", ["ft4"]>, DwarfRegNum<[36]>;
355  def F5_H  : RISCVReg16<5, "f5", ["ft5"]>, DwarfRegNum<[37]>;
356  def F6_H  : RISCVReg16<6, "f6", ["ft6"]>, DwarfRegNum<[38]>;
357  def F7_H  : RISCVReg16<7, "f7", ["ft7"]>, DwarfRegNum<[39]>;
358  def F8_H  : RISCVReg16<8, "f8", ["fs0"]>, DwarfRegNum<[40]>;
359  def F9_H  : RISCVReg16<9, "f9", ["fs1"]>, DwarfRegNum<[41]>;
360  def F10_H : RISCVReg16<10,"f10", ["fa0"]>, DwarfRegNum<[42]>;
361  def F11_H : RISCVReg16<11,"f11", ["fa1"]>, DwarfRegNum<[43]>;
362  def F12_H : RISCVReg16<12,"f12", ["fa2"]>, DwarfRegNum<[44]>;
363  def F13_H : RISCVReg16<13,"f13", ["fa3"]>, DwarfRegNum<[45]>;
364  def F14_H : RISCVReg16<14,"f14", ["fa4"]>, DwarfRegNum<[46]>;
365  def F15_H : RISCVReg16<15,"f15", ["fa5"]>, DwarfRegNum<[47]>;
366  def F16_H : RISCVReg16<16,"f16", ["fa6"]>, DwarfRegNum<[48]>;
367  def F17_H : RISCVReg16<17,"f17", ["fa7"]>, DwarfRegNum<[49]>;
368  def F18_H : RISCVReg16<18,"f18", ["fs2"]>, DwarfRegNum<[50]>;
369  def F19_H : RISCVReg16<19,"f19", ["fs3"]>, DwarfRegNum<[51]>;
370  def F20_H : RISCVReg16<20,"f20", ["fs4"]>, DwarfRegNum<[52]>;
371  def F21_H : RISCVReg16<21,"f21", ["fs5"]>, DwarfRegNum<[53]>;
372  def F22_H : RISCVReg16<22,"f22", ["fs6"]>, DwarfRegNum<[54]>;
373  def F23_H : RISCVReg16<23,"f23", ["fs7"]>, DwarfRegNum<[55]>;
374  def F24_H : RISCVReg16<24,"f24", ["fs8"]>, DwarfRegNum<[56]>;
375  def F25_H : RISCVReg16<25,"f25", ["fs9"]>, DwarfRegNum<[57]>;
376  def F26_H : RISCVReg16<26,"f26", ["fs10"]>, DwarfRegNum<[58]>;
377  def F27_H : RISCVReg16<27,"f27", ["fs11"]>, DwarfRegNum<[59]>;
378  def F28_H : RISCVReg16<28,"f28", ["ft8"]>, DwarfRegNum<[60]>;
379  def F29_H : RISCVReg16<29,"f29", ["ft9"]>, DwarfRegNum<[61]>;
380  def F30_H : RISCVReg16<30,"f30", ["ft10"]>, DwarfRegNum<[62]>;
381  def F31_H : RISCVReg16<31,"f31", ["ft11"]>, DwarfRegNum<[63]>;
382
383  foreach Index = 0-31 in {
384    def F#Index#_F : RISCVReg32<!cast<RISCVReg16>("F"#Index#"_H")>,
385      DwarfRegAlias<!cast<Register>("F"#Index#"_H")>;
386  }
387
388  foreach Index = 0-31 in {
389    def F#Index#_D : RISCVReg64<!cast<RISCVReg32>("F"#Index#"_F")>,
390      DwarfRegAlias<!cast<Register>("F"#Index#"_H")>;
391  }
392}
393
394// The order of registers represents the preferred allocation sequence,
395// meaning caller-save regs are listed before callee-save.
396// We start by allocating argument registers in reverse order since they are
397// compressible.
398def FPR16 : RISCVRegisterClass<[f16, bf16], 16, (add
399    (sequence "F%u_H", 15, 10), // fa5-fa0
400    (sequence "F%u_H", 0, 7),   // ft0-f7
401    (sequence "F%u_H", 16, 17), // fa6-fa7
402    (sequence "F%u_H", 28, 31), // ft8-ft11
403    (sequence "F%u_H", 8, 9),   // fs0-fs1
404    (sequence "F%u_H", 18, 27)  // fs2-fs11
405)>;
406
407def FPR16C : RISCVRegisterClass<[f16, bf16], 16, (add
408    (sequence "F%u_H", 15, 10),
409    (sequence "F%u_H", 8, 9)
410)>;
411
412def FPR32 : RISCVRegisterClass<[f32], 32, (add
413    (sequence "F%u_F", 15, 10),
414    (sequence "F%u_F", 0, 7),
415    (sequence "F%u_F", 16, 17),
416    (sequence "F%u_F", 28, 31),
417    (sequence "F%u_F", 8, 9),
418    (sequence "F%u_F", 18, 27)
419)>;
420
421def FPR32C : RISCVRegisterClass<[f32], 32, (add
422  (sequence "F%u_F", 15, 10),
423  (sequence "F%u_F", 8, 9)
424)>;
425
426// The order of registers represents the preferred allocation sequence,
427// meaning caller-save regs are listed before callee-save.
428def FPR64 : RISCVRegisterClass<[f64], 64, (add
429    (sequence "F%u_D", 15, 10),
430    (sequence "F%u_D", 0, 7),
431    (sequence "F%u_D", 16, 17),
432    (sequence "F%u_D", 28, 31),
433    (sequence "F%u_D", 8, 9),
434    (sequence "F%u_D", 18, 27)
435)>;
436
437def FPR64C : RISCVRegisterClass<[f64], 64, (add
438  (sequence "F%u_D", 15, 10),
439  (sequence "F%u_D", 8, 9)
440)>;
441
442//===----------------------------------------------------------------------===//
443// GPR Classes for "H/F/D in X"
444//===----------------------------------------------------------------------===//
445
446// 16-bit GPR sub-register class used by Zhinx instructions.
447def GPRF16 : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 17),
448                                                (sequence "X%u_H", 5, 7),
449                                                (sequence "X%u_H", 28, 31),
450                                                (sequence "X%u_H", 8, 9),
451                                                (sequence "X%u_H", 18, 27),
452                                                (sequence "X%u_H", 0, 4))>;
453def GPRF16C : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 15),
454                                                 (sequence "X%u_H", 8, 9))>;
455def GPRF16NoX0 : RISCVRegisterClass<[f16], 16, (sub GPRF16, X0_H)>;
456
457def GPRF32 : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 17),
458                                                (sequence "X%u_W", 5, 7),
459                                                (sequence "X%u_W", 28, 31),
460                                                (sequence "X%u_W", 8, 9),
461                                                (sequence "X%u_W", 18, 27),
462                                                (sequence "X%u_W", 0, 4))>;
463def GPRF32C : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 15),
464                                                 (sequence "X%u_W", 8, 9))>;
465def GPRF32NoX0 : RISCVRegisterClass<[f32], 32, (sub GPRF32, X0_W)>;
466
467//===----------------------------------------------------------------------===//
468// Vector type mapping to LLVM types.
469//===----------------------------------------------------------------------===//
470
471// The V vector extension requires that VLEN >= 128 and <= 65536.
472// Additionally, the only supported ELEN values are 32 and 64,
473// thus `vscale` can be defined as VLEN/64,
474// allowing the same types with either ELEN value.
475//
476//         MF8    MF4     MF2     M1      M2      M4       M8
477// i64*    N/A    N/A     N/A     nxv1i64 nxv2i64 nxv4i64  nxv8i64
478// i32     N/A    N/A     nxv1i32 nxv2i32 nxv4i32 nxv8i32  nxv16i32
479// i16     N/A    nxv1i16 nxv2i16 nxv4i16 nxv8i16 nxv16i16 nxv32i16
480// i8      nxv1i8 nxv2i8  nxv4i8  nxv8i8  nxv16i8 nxv32i8  nxv64i8
481// double* N/A    N/A     N/A     nxv1f64 nxv2f64 nxv4f64  nxv8f64
482// float   N/A    N/A     nxv1f32 nxv2f32 nxv4f32 nxv8f32  nxv16f32
483// half    N/A    nxv1f16 nxv2f16 nxv4f16 nxv8f16 nxv16f16 nxv32f16
484// * ELEN=64
485
486defvar vint8mf8_t = nxv1i8;
487defvar vint8mf4_t = nxv2i8;
488defvar vint8mf2_t = nxv4i8;
489defvar vint8m1_t = nxv8i8;
490defvar vint8m2_t = nxv16i8;
491defvar vint8m4_t = nxv32i8;
492defvar vint8m8_t = nxv64i8;
493
494defvar vint16mf4_t = nxv1i16;
495defvar vint16mf2_t = nxv2i16;
496defvar vint16m1_t  = nxv4i16;
497defvar vint16m2_t  = nxv8i16;
498defvar vint16m4_t  = nxv16i16;
499defvar vint16m8_t  = nxv32i16;
500
501defvar vint32mf2_t = nxv1i32;
502defvar vint32m1_t  = nxv2i32;
503defvar vint32m2_t  = nxv4i32;
504defvar vint32m4_t  = nxv8i32;
505defvar vint32m8_t  = nxv16i32;
506
507defvar vint64m1_t = nxv1i64;
508defvar vint64m2_t = nxv2i64;
509defvar vint64m4_t = nxv4i64;
510defvar vint64m8_t = nxv8i64;
511
512defvar vfloat16mf4_t = nxv1f16;
513defvar vfloat16mf2_t = nxv2f16;
514defvar vfloat16m1_t  = nxv4f16;
515defvar vfloat16m2_t  = nxv8f16;
516defvar vfloat16m4_t  = nxv16f16;
517defvar vfloat16m8_t  = nxv32f16;
518
519defvar vbfloat16mf4_t = nxv1bf16;
520defvar vbfloat16mf2_t = nxv2bf16;
521defvar vbfloat16m1_t  = nxv4bf16;
522defvar vbfloat16m2_t  = nxv8bf16;
523defvar vbfloat16m4_t  = nxv16bf16;
524defvar vbfloat16m8_t  = nxv32bf16;
525
526defvar vfloat32mf2_t = nxv1f32;
527defvar vfloat32m1_t  = nxv2f32;
528defvar vfloat32m2_t  = nxv4f32;
529defvar vfloat32m4_t  = nxv8f32;
530defvar vfloat32m8_t  = nxv16f32;
531
532defvar vfloat64m1_t = nxv1f64;
533defvar vfloat64m2_t = nxv2f64;
534defvar vfloat64m4_t = nxv4f64;
535defvar vfloat64m8_t = nxv8f64;
536
537defvar vbool1_t  = nxv64i1;
538defvar vbool2_t  = nxv32i1;
539defvar vbool4_t  = nxv16i1;
540defvar vbool8_t  = nxv8i1;
541defvar vbool16_t = nxv4i1;
542defvar vbool32_t = nxv2i1;
543defvar vbool64_t = nxv1i1;
544
545// There is no need to define register classes for fractional LMUL.
546defvar LMULList = [1, 2, 4, 8];
547
548//===----------------------------------------------------------------------===//
549// Utility classes for segment load/store.
550//===----------------------------------------------------------------------===//
551// The set of legal NF for LMUL = lmul.
552// LMUL <= 1, NF = 2, 3, 4, 5, 6, 7, 8
553// LMUL == 2, NF = 2, 3, 4
554// LMUL == 4, NF = 2
555// LMUL == 8, no legal NF
556class NFList<int lmul> {
557  list<int> L = !cond(!eq(lmul, 8): [],
558                      !eq(lmul, 4): [2],
559                      !eq(lmul, 2): [2, 3, 4],
560                      true: [2, 3, 4, 5, 6, 7, 8]);
561}
562
563// Generate [start, end) SubRegIndex list.
564class SubRegSet<int nf, int lmul> {
565  list<SubRegIndex> L = !foldl([]<SubRegIndex>,
566                               !range(0, 8),
567                               AccList, i,
568                               !listconcat(AccList,
569                                 !if(!lt(i, nf),
570                                   [!cast<SubRegIndex>("sub_vrm" # lmul # "_" # i)],
571                                   [])));
572}
573
574// Collect the valid indexes into 'R' under NF and LMUL values from TUPLE_INDEX.
575// When NF = 2, the valid TUPLE_INDEX is 0 and 1.
576// For example, when LMUL = 4, the potential valid indexes is
577// [8, 12, 16, 20, 24, 28, 4]. However, not all these indexes are valid under
578// NF = 2. For example, 28 is not valid under LMUL = 4, NF = 2 and TUPLE_INDEX = 0.
579// The filter is
580//   (tuple_index + i) x lmul <= (tuple_index x lmul) + 32 - (nf x lmul)
581//
582// Use START = 0, LMUL = 4 and NF = 2 as the example,
583//   i x 4 <= 24
584// The class will return [8, 12, 16, 20, 24, 4].
585// Use START = 1, LMUL = 4 and NF = 2 as the example,
586//   (1 + i) x 4 <= 28
587// The class will return [12, 16, 20, 24, 28, 8].
588//
589class IndexSet<int tuple_index, int nf, int lmul, bit isV0 = false> {
590  list<int> R =
591    !foldl([]<int>,
592              !if(isV0, [0],
593                !cond(
594                  !eq(lmul, 1): !listconcat(!range(8, 32), !range(1, 8)),
595                  !eq(lmul, 2): !listconcat(!range(4, 16), !range(1, 4)),
596                  !eq(lmul, 4): !listconcat(!range(2, 8), !range(1, 2)))),
597              L, i,
598              !listconcat(L,
599                          !if(!le(!mul(!add(i, tuple_index), lmul),
600                                  !sub(!add(32, !mul(tuple_index, lmul)), !mul(nf, lmul))),
601                              [!mul(!add(i, tuple_index), lmul)], [])));
602}
603
604// This class returns a list of vector register collections.
605// For example, for NF = 2 and LMUL = 4,
606// L would be:
607//   ([ V8M4, V12M4, V16M4, V20M4, V24M4, V4M4],
608//    [V12M4, V16M4, V20M4, V24M4, V28M4, V8M4])
609// Names are the starting register of each register list,
610// in this example:
611//   ["v8", "v12", "v16", "v20", "v24", "v4"]
612class VRegList<list<dag> LIn, int start, int nf, int lmul, bit isV0> {
613  list<dag> L =
614    !if(!ge(start, nf),
615        LIn,
616        !listconcat(
617          [!dag(add,
618                !foreach(i, IndexSet<start, nf, lmul, isV0>.R,
619                  !cast<Register>("V" # i # !cond(!eq(lmul, 2): "M2",
620                                                  !eq(lmul, 4): "M4",
621                                                  true: ""))),
622                !listsplat("",
623                  !size(IndexSet<start, nf, lmul, isV0>.R)))],
624          VRegList<LIn, !add(start, 1), nf, lmul, isV0>.L));
625  list<string> Names =
626    !if(!ge(start, nf), [],
627        !foreach(i, IndexSet<start, nf, lmul, isV0>.R, "v" # i));
628}
629
630//===----------------------------------------------------------------------===//
631// Vector registers
632//===----------------------------------------------------------------------===//
633
634foreach Index = !range(0, 32, 1) in {
635  def V#Index : RISCVReg<Index, "v"#Index>, DwarfRegNum<[!add(Index, 96)]>;
636}
637
638foreach Index = !range(0, 32, 2) in {
639  def V#Index#M2 : RISCVRegWithSubRegs<Index, "v"#Index,
640                     [!cast<Register>("V"#Index),
641                      !cast<Register>("V"#!add(Index, 1))]>,
642                   DwarfRegAlias<!cast<Register>("V"#Index)> {
643    let SubRegIndices = [sub_vrm1_0, sub_vrm1_1];
644  }
645}
646
647foreach Index = !range(0, 32, 4) in {
648  def V#Index#M4 : RISCVRegWithSubRegs<Index, "v"#Index,
649                     [!cast<Register>("V"#Index#"M2"),
650                      !cast<Register>("V"#!add(Index, 2)#"M2")]>,
651                   DwarfRegAlias<!cast<Register>("V"#Index)> {
652    let SubRegIndices = [sub_vrm2_0, sub_vrm2_1];
653  }
654}
655
656foreach Index = !range(0, 32, 8) in {
657  def V#Index#M8 : RISCVRegWithSubRegs<Index, "v"#Index,
658                     [!cast<Register>("V"#Index#"M4"),
659                      !cast<Register>("V"#!add(Index, 4)#"M4")]>,
660                   DwarfRegAlias<!cast<Register>("V"#Index)> {
661    let SubRegIndices = [sub_vrm4_0, sub_vrm4_1];
662  }
663}
664
665def VTYPE  : RISCVReg<0, "vtype">;
666def VL     : RISCVReg<0, "vl">;
667def VXSAT  : RISCVReg<0, "vxsat">;
668def VXRM   : RISCVReg<0, "vxrm">;
669let isConstant = true in
670def VLENB  : RISCVReg<0, "vlenb">,
671             DwarfRegNum<[!add(4096, SysRegVLENB.Encoding)]>;
672
673def VCSR : RISCVRegisterClass<[XLenVT], 32,
674                              (add VTYPE, VL, VLENB)> {
675  let RegInfos = XLenRI;
676  let isAllocatable = 0;
677}
678
679
680foreach m = [1, 2, 4] in {
681  foreach n = NFList<m>.L in {
682    defvar RegListWOV0 = VRegList<[], 0, n, m, false>;
683    defvar RegListWV0 = VRegList<[], 0, n, m, true>;
684    def "VN" # n # "M" # m # "NoV0": RegisterTuples<
685                                       SubRegSet<n, m>.L,
686                                       RegListWOV0.L,
687                                       RegListWOV0.Names>;
688    def "VN" # n # "M" # m # "V0" : RegisterTuples<
689                                       SubRegSet<n, m>.L,
690                                       RegListWV0.L,
691                                       RegListWV0.Names>;
692  }
693}
694
695class VReg<list<ValueType> regTypes, dag regList, int Vlmul>
696    : RISCVRegisterClass<regTypes,
697                         64, // The maximum supported ELEN is 64.
698                         regList> {
699  let IsVRegClass = 1;
700  let VLMul = Vlmul;
701}
702
703defvar VMaskVTs = [vbool1_t, vbool2_t, vbool4_t, vbool8_t, vbool16_t,
704                   vbool32_t, vbool64_t];
705
706defvar VM1VTs = [vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t,
707                 vbfloat16m1_t, vfloat16m1_t, vfloat32m1_t,
708                 vfloat64m1_t, vint8mf2_t, vint8mf4_t, vint8mf8_t,
709                 vint16mf2_t, vint16mf4_t, vint32mf2_t,
710                 vfloat16mf4_t, vfloat16mf2_t, vbfloat16mf4_t,
711                 vbfloat16mf2_t, vfloat32mf2_t];
712
713defvar VM2VTs = [vint8m2_t, vint16m2_t, vint32m2_t, vint64m2_t,
714                 vfloat16m2_t, vbfloat16m2_t,
715                 vfloat32m2_t, vfloat64m2_t];
716
717defvar VM4VTs = [vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t,
718                 vfloat16m4_t, vbfloat16m4_t,
719                 vfloat32m4_t, vfloat64m4_t];
720
721defvar VM8VTs = [vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t,
722                 vfloat16m8_t, vbfloat16m8_t,
723                 vfloat32m8_t, vfloat64m8_t];
724
725// We reverse the order of last 8 registers so that we don't needlessly prevent
726// allocation of higher lmul register groups while still putting v0 last in the
727// allocation order.
728
729def VR : VReg<!listconcat(VM1VTs, VMaskVTs),
730              (add (sequence "V%u", 8, 31),
731                   (sequence "V%u", 7, 0)), 1>;
732
733def VRNoV0 : VReg<!listconcat(VM1VTs, VMaskVTs), (sub VR, V0), 1>;
734
735def VRM2 : VReg<VM2VTs, (add (sequence "V%uM2", 8, 31, 2),
736                             (sequence "V%uM2", 6, 0, 2)), 2>;
737
738def VRM2NoV0 : VReg<VM2VTs, (sub VRM2, V0M2), 2>;
739
740def VRM4 : VReg<VM4VTs, (add V8M4, V12M4, V16M4, V20M4,
741                             V24M4, V28M4, V4M4, V0M4), 4>;
742
743def VRM4NoV0 : VReg<VM4VTs, (sub VRM4, V0M4), 4>;
744
745def VRM8 : VReg<VM8VTs, (add V8M8, V16M8, V24M8, V0M8), 8>;
746
747def VRM8NoV0 : VReg<VM8VTs, (sub VRM8, V0M8), 8>;
748
749def VMV0 : VReg<VMaskVTs, (add V0), 1>;
750
751// The register class is added for inline assembly for vector mask types.
752def VM : VReg<VMaskVTs, (add VR), 1>;
753
754defvar VTupM1N2VTs = [riscv_nxv8i8x2, riscv_nxv4i8x2, riscv_nxv2i8x2, riscv_nxv1i8x2];
755defvar VTupM1N3VTs = [riscv_nxv8i8x3, riscv_nxv4i8x3, riscv_nxv2i8x3, riscv_nxv1i8x3];
756defvar VTupM1N4VTs = [riscv_nxv8i8x4, riscv_nxv4i8x4, riscv_nxv2i8x4, riscv_nxv1i8x4];
757defvar VTupM1N5VTs = [riscv_nxv8i8x5, riscv_nxv4i8x5, riscv_nxv2i8x5, riscv_nxv1i8x5];
758defvar VTupM1N6VTs = [riscv_nxv8i8x6, riscv_nxv4i8x6, riscv_nxv2i8x6, riscv_nxv1i8x6];
759defvar VTupM1N7VTs = [riscv_nxv8i8x7, riscv_nxv4i8x7, riscv_nxv2i8x7, riscv_nxv1i8x7];
760defvar VTupM1N8VTs = [riscv_nxv8i8x8, riscv_nxv4i8x8, riscv_nxv2i8x8, riscv_nxv1i8x8];
761defvar VTupM2N2VTs = [riscv_nxv16i8x2];
762defvar VTupM2N3VTs = [riscv_nxv16i8x3];
763defvar VTupM2N4VTs = [riscv_nxv16i8x4];
764defvar VTupM4N2VTs = [riscv_nxv32i8x2];
765class VTupRegList<int LMUL, int NF> {
766  list<ValueType> L = !cond(!and(!eq(LMUL, 1), !eq(NF, 2)): VTupM1N2VTs,
767                            !and(!eq(LMUL, 1), !eq(NF, 3)): VTupM1N3VTs,
768                            !and(!eq(LMUL, 1), !eq(NF, 4)): VTupM1N4VTs,
769                            !and(!eq(LMUL, 1), !eq(NF, 5)): VTupM1N5VTs,
770                            !and(!eq(LMUL, 1), !eq(NF, 6)): VTupM1N6VTs,
771                            !and(!eq(LMUL, 1), !eq(NF, 7)): VTupM1N7VTs,
772                            !and(!eq(LMUL, 1), !eq(NF, 8)): VTupM1N8VTs,
773                            !and(!eq(LMUL, 2), !eq(NF, 2)): VTupM2N2VTs,
774                            !and(!eq(LMUL, 2), !eq(NF, 3)): VTupM2N3VTs,
775                            !and(!eq(LMUL, 2), !eq(NF, 4)): VTupM2N4VTs,
776                            !and(!eq(LMUL, 4), !eq(NF, 2)): VTupM4N2VTs);
777}
778
779foreach m = LMULList in {
780  foreach nf = NFList<m>.L in {
781    let NF = nf in {
782      def "VRN" # nf # "M" # m # "NoV0"
783        : VReg<VTupRegList<m, nf>.L,
784               (add !cast<RegisterTuples>("VN" # nf # "M" # m # "NoV0")),
785               m>;
786      def "VRN" # nf # "M" # m
787        : VReg<VTupRegList<m, nf>.L,
788               (add !cast<RegisterTuples>("VN" # nf # "M" # m # "NoV0"),
789                    !cast<RegisterTuples>("VN" # nf # "M" # m # "V0")),
790               m>;
791    }
792  }
793}
794
795//===----------------------------------------------------------------------===//
796// Special registers
797//===----------------------------------------------------------------------===//
798
799def FFLAGS : RISCVReg<0, "fflags">;
800def FRM    : RISCVReg<0, "frm">;
801
802// Shadow Stack register
803def SSP    : RISCVReg<0, "ssp">;
804
805// Dummy SiFive VCIX state register
806def SF_VCIX_STATE : RISCVReg<0, "sf.vcix_state">;
807def : RISCVRegisterClass<[XLenVT], 32, (add SF_VCIX_STATE)> {
808  let RegInfos = XLenRI;
809  let isAllocatable = 0;
810}
811