xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/remat.ll (revision 97982a8c605fac7c86d02e641a6cd7898b3ca343)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s
3
4define void @vid(ptr %p) {
5; CHECK-LABEL: vid:
6; CHECK:       # %bb.0:
7; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
8; CHECK-NEXT:    vid.v v8
9; CHECK-NEXT:    vs8r.v v8, (a0)
10; CHECK-NEXT:    vl8re64.v v16, (a0)
11; CHECK-NEXT:    vl8re64.v v24, (a0)
12; CHECK-NEXT:    vl8re64.v v0, (a0)
13; CHECK-NEXT:    vl8re64.v v8, (a0)
14; CHECK-NEXT:    vs8r.v v8, (a0)
15; CHECK-NEXT:    vs8r.v v0, (a0)
16; CHECK-NEXT:    vs8r.v v24, (a0)
17; CHECK-NEXT:    vs8r.v v16, (a0)
18; CHECK-NEXT:    vid.v v8
19; CHECK-NEXT:    vs8r.v v8, (a0)
20; CHECK-NEXT:    ret
21  %vid = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(<vscale x 8 x i64> poison, i64 -1)
22  store volatile <vscale x 8 x i64> %vid, ptr %p
23
24  %a = load volatile <vscale x 8 x i64>, ptr %p
25  %b = load volatile <vscale x 8 x i64>, ptr %p
26  %c = load volatile <vscale x 8 x i64>, ptr %p
27  %d = load volatile <vscale x 8 x i64>, ptr %p
28  store volatile <vscale x 8 x i64> %d, ptr %p
29  store volatile <vscale x 8 x i64> %c, ptr %p
30  store volatile <vscale x 8 x i64> %b, ptr %p
31  store volatile <vscale x 8 x i64> %a, ptr %p
32
33  store volatile <vscale x 8 x i64> %vid, ptr %p
34  ret void
35}
36
37
38define void @vid_passthru(ptr %p, <vscale x 8 x i64> %v) {
39; CHECK-LABEL: vid_passthru:
40; CHECK:       # %bb.0:
41; CHECK-NEXT:    addi sp, sp, -16
42; CHECK-NEXT:    .cfi_def_cfa_offset 16
43; CHECK-NEXT:    csrr a1, vlenb
44; CHECK-NEXT:    slli a1, a1, 3
45; CHECK-NEXT:    sub sp, sp, a1
46; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
47; CHECK-NEXT:    vsetivli zero, 1, e64, m8, tu, ma
48; CHECK-NEXT:    vid.v v8
49; CHECK-NEXT:    vs8r.v v8, (a0)
50; CHECK-NEXT:    vl8re64.v v16, (a0)
51; CHECK-NEXT:    addi a1, sp, 16
52; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
53; CHECK-NEXT:    vl8re64.v v24, (a0)
54; CHECK-NEXT:    vl8re64.v v0, (a0)
55; CHECK-NEXT:    vl8re64.v v16, (a0)
56; CHECK-NEXT:    vs8r.v v16, (a0)
57; CHECK-NEXT:    vs8r.v v0, (a0)
58; CHECK-NEXT:    vs8r.v v24, (a0)
59; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
60; CHECK-NEXT:    vs8r.v v16, (a0)
61; CHECK-NEXT:    vs8r.v v8, (a0)
62; CHECK-NEXT:    csrr a0, vlenb
63; CHECK-NEXT:    slli a0, a0, 3
64; CHECK-NEXT:    add sp, sp, a0
65; CHECK-NEXT:    .cfi_def_cfa sp, 16
66; CHECK-NEXT:    addi sp, sp, 16
67; CHECK-NEXT:    .cfi_def_cfa_offset 0
68; CHECK-NEXT:    ret
69  %vid = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(<vscale x 8 x i64> %v, i64 1)
70  store volatile <vscale x 8 x i64> %vid, ptr %p
71
72  %a = load volatile <vscale x 8 x i64>, ptr %p
73  %b = load volatile <vscale x 8 x i64>, ptr %p
74  %c = load volatile <vscale x 8 x i64>, ptr %p
75  %d = load volatile <vscale x 8 x i64>, ptr %p
76  store volatile <vscale x 8 x i64> %d, ptr %p
77  store volatile <vscale x 8 x i64> %c, ptr %p
78  store volatile <vscale x 8 x i64> %b, ptr %p
79  store volatile <vscale x 8 x i64> %a, ptr %p
80
81  store volatile <vscale x 8 x i64> %vid, ptr %p
82  ret void
83}
84
85define void @vmv.v.i(ptr %p) {
86; CHECK-LABEL: vmv.v.i:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
89; CHECK-NEXT:    vmv.v.i v8, 1
90; CHECK-NEXT:    vs8r.v v8, (a0)
91; CHECK-NEXT:    vl8re64.v v16, (a0)
92; CHECK-NEXT:    vl8re64.v v24, (a0)
93; CHECK-NEXT:    vl8re64.v v0, (a0)
94; CHECK-NEXT:    vl8re64.v v8, (a0)
95; CHECK-NEXT:    vs8r.v v8, (a0)
96; CHECK-NEXT:    vs8r.v v0, (a0)
97; CHECK-NEXT:    vs8r.v v24, (a0)
98; CHECK-NEXT:    vs8r.v v16, (a0)
99; CHECK-NEXT:    vmv.v.i v8, 1
100; CHECK-NEXT:    vs8r.v v8, (a0)
101; CHECK-NEXT:    ret
102  %vmv.v.i = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 1, i64 -1)
103  store volatile <vscale x 8 x i64> %vmv.v.i, ptr %p
104
105  %a = load volatile <vscale x 8 x i64>, ptr %p
106  %b = load volatile <vscale x 8 x i64>, ptr %p
107  %c = load volatile <vscale x 8 x i64>, ptr %p
108  %d = load volatile <vscale x 8 x i64>, ptr %p
109  store volatile <vscale x 8 x i64> %d, ptr %p
110  store volatile <vscale x 8 x i64> %c, ptr %p
111  store volatile <vscale x 8 x i64> %b, ptr %p
112  store volatile <vscale x 8 x i64> %a, ptr %p
113
114  store volatile <vscale x 8 x i64> %vmv.v.i, ptr %p
115  ret void
116}
117
118define void @vmv.v.x_needs_extended(ptr %p, i64 %x) {
119; CHECK-LABEL: vmv.v.x_needs_extended:
120; CHECK:       # %bb.0:
121; CHECK-NEXT:    addi sp, sp, -16
122; CHECK-NEXT:    .cfi_def_cfa_offset 16
123; CHECK-NEXT:    csrr a2, vlenb
124; CHECK-NEXT:    slli a2, a2, 3
125; CHECK-NEXT:    sub sp, sp, a2
126; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
127; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
128; CHECK-NEXT:    vmv.v.x v8, a1
129; CHECK-NEXT:    addi a1, sp, 16
130; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
131; CHECK-NEXT:    vs8r.v v8, (a0)
132; CHECK-NEXT:    vl8re64.v v16, (a0)
133; CHECK-NEXT:    vl8re64.v v24, (a0)
134; CHECK-NEXT:    vl8re64.v v0, (a0)
135; CHECK-NEXT:    vl8re64.v v8, (a0)
136; CHECK-NEXT:    vs8r.v v8, (a0)
137; CHECK-NEXT:    vs8r.v v0, (a0)
138; CHECK-NEXT:    vs8r.v v24, (a0)
139; CHECK-NEXT:    vs8r.v v16, (a0)
140; CHECK-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
141; CHECK-NEXT:    vs8r.v v8, (a0)
142; CHECK-NEXT:    csrr a0, vlenb
143; CHECK-NEXT:    slli a0, a0, 3
144; CHECK-NEXT:    add sp, sp, a0
145; CHECK-NEXT:    .cfi_def_cfa sp, 16
146; CHECK-NEXT:    addi sp, sp, 16
147; CHECK-NEXT:    .cfi_def_cfa_offset 0
148; CHECK-NEXT:    ret
149  %vmv.v.x = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1)
150  store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p
151
152  %a = load volatile <vscale x 8 x i64>, ptr %p
153  %b = load volatile <vscale x 8 x i64>, ptr %p
154  %c = load volatile <vscale x 8 x i64>, ptr %p
155  %d = load volatile <vscale x 8 x i64>, ptr %p
156  store volatile <vscale x 8 x i64> %d, ptr %p
157  store volatile <vscale x 8 x i64> %c, ptr %p
158  store volatile <vscale x 8 x i64> %b, ptr %p
159  store volatile <vscale x 8 x i64> %a, ptr %p
160
161  store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p
162  ret void
163}
164
165define void @vmv.v.x_live(ptr %p, i64 %x) {
166; CHECK-LABEL: vmv.v.x_live:
167; CHECK:       # %bb.0:
168; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
169; CHECK-NEXT:    vmv.v.x v8, a1
170; CHECK-NEXT:    vs8r.v v8, (a0)
171; CHECK-NEXT:    vl8re64.v v16, (a0)
172; CHECK-NEXT:    vl8re64.v v24, (a0)
173; CHECK-NEXT:    vl8re64.v v0, (a0)
174; CHECK-NEXT:    vl8re64.v v8, (a0)
175; CHECK-NEXT:    vs8r.v v8, (a0)
176; CHECK-NEXT:    vs8r.v v0, (a0)
177; CHECK-NEXT:    vs8r.v v24, (a0)
178; CHECK-NEXT:    vs8r.v v16, (a0)
179; CHECK-NEXT:    vmv.v.x v8, a1
180; CHECK-NEXT:    vs8r.v v8, (a0)
181; CHECK-NEXT:    sd a1, 0(a0)
182; CHECK-NEXT:    ret
183  %vmv.v.x = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1)
184  store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p
185
186  %a = load volatile <vscale x 8 x i64>, ptr %p
187  %b = load volatile <vscale x 8 x i64>, ptr %p
188  %c = load volatile <vscale x 8 x i64>, ptr %p
189  %d = load volatile <vscale x 8 x i64>, ptr %p
190  store volatile <vscale x 8 x i64> %d, ptr %p
191  store volatile <vscale x 8 x i64> %c, ptr %p
192  store volatile <vscale x 8 x i64> %b, ptr %p
193  store volatile <vscale x 8 x i64> %a, ptr %p
194
195  store volatile <vscale x 8 x i64> %vmv.v.x, ptr %p
196  store volatile i64 %x, ptr %p
197  ret void
198}
199
200define void @vfmv.v.f(ptr %p, double %x) {
201; CHECK-LABEL: vfmv.v.f:
202; CHECK:       # %bb.0:
203; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
204; CHECK-NEXT:    vfmv.v.f v8, fa0
205; CHECK-NEXT:    vs8r.v v8, (a0)
206; CHECK-NEXT:    vl8re64.v v16, (a0)
207; CHECK-NEXT:    vl8re64.v v24, (a0)
208; CHECK-NEXT:    vl8re64.v v0, (a0)
209; CHECK-NEXT:    vl8re64.v v8, (a0)
210; CHECK-NEXT:    vs8r.v v8, (a0)
211; CHECK-NEXT:    vs8r.v v0, (a0)
212; CHECK-NEXT:    vs8r.v v24, (a0)
213; CHECK-NEXT:    vs8r.v v16, (a0)
214; CHECK-NEXT:    vfmv.v.f v8, fa0
215; CHECK-NEXT:    vs8r.v v8, (a0)
216; CHECK-NEXT:    fsd fa0, 0(a0)
217; CHECK-NEXT:    ret
218  %vfmv.v.f = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(<vscale x 8 x double> poison, double %x, i64 -1)
219  store volatile <vscale x 8 x double> %vfmv.v.f, ptr %p
220
221  %a = load volatile <vscale x 8 x double>, ptr %p
222  %b = load volatile <vscale x 8 x double>, ptr %p
223  %c = load volatile <vscale x 8 x double>, ptr %p
224  %d = load volatile <vscale x 8 x double>, ptr %p
225  store volatile <vscale x 8 x double> %d, ptr %p
226  store volatile <vscale x 8 x double> %c, ptr %p
227  store volatile <vscale x 8 x double> %b, ptr %p
228  store volatile <vscale x 8 x double> %a, ptr %p
229
230  store volatile <vscale x 8 x double> %vfmv.v.f, ptr %p
231  store volatile double %x, ptr %p
232  ret void
233}
234
235define void @vmv.s.x(ptr %p, i64 %x) {
236; CHECK-LABEL: vmv.s.x:
237; CHECK:       # %bb.0:
238; CHECK-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
239; CHECK-NEXT:    vmv.s.x v8, a1
240; CHECK-NEXT:    vs8r.v v8, (a0)
241; CHECK-NEXT:    vl8re64.v v16, (a0)
242; CHECK-NEXT:    vl8re64.v v24, (a0)
243; CHECK-NEXT:    vl8re64.v v0, (a0)
244; CHECK-NEXT:    vl8re64.v v8, (a0)
245; CHECK-NEXT:    vs8r.v v8, (a0)
246; CHECK-NEXT:    vs8r.v v0, (a0)
247; CHECK-NEXT:    vs8r.v v24, (a0)
248; CHECK-NEXT:    vs8r.v v16, (a0)
249; CHECK-NEXT:    vmv.s.x v8, a1
250; CHECK-NEXT:    vs8r.v v8, (a0)
251; CHECK-NEXT:    sd a1, 0(a0)
252; CHECK-NEXT:    ret
253  %vmv.s.x = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64> poison, i64 %x, i64 -1)
254  store volatile <vscale x 8 x i64> %vmv.s.x, ptr %p
255
256  %a = load volatile <vscale x 8 x i64>, ptr %p
257  %b = load volatile <vscale x 8 x i64>, ptr %p
258  %c = load volatile <vscale x 8 x i64>, ptr %p
259  %d = load volatile <vscale x 8 x i64>, ptr %p
260  store volatile <vscale x 8 x i64> %d, ptr %p
261  store volatile <vscale x 8 x i64> %c, ptr %p
262  store volatile <vscale x 8 x i64> %b, ptr %p
263  store volatile <vscale x 8 x i64> %a, ptr %p
264
265  store volatile <vscale x 8 x i64> %vmv.s.x, ptr %p
266  store volatile i64 %x, ptr %p
267  ret void
268}
269
270define void @vfmv.s.f(ptr %p, double %x) {
271; CHECK-LABEL: vfmv.s.f:
272; CHECK:       # %bb.0:
273; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
274; CHECK-NEXT:    vfmv.s.f v8, fa0
275; CHECK-NEXT:    vs8r.v v8, (a0)
276; CHECK-NEXT:    vl8re64.v v16, (a0)
277; CHECK-NEXT:    vl8re64.v v24, (a0)
278; CHECK-NEXT:    vl8re64.v v0, (a0)
279; CHECK-NEXT:    vl8re64.v v8, (a0)
280; CHECK-NEXT:    vs8r.v v8, (a0)
281; CHECK-NEXT:    vs8r.v v0, (a0)
282; CHECK-NEXT:    vs8r.v v24, (a0)
283; CHECK-NEXT:    vs8r.v v16, (a0)
284; CHECK-NEXT:    vfmv.s.f v8, fa0
285; CHECK-NEXT:    vs8r.v v8, (a0)
286; CHECK-NEXT:    fsd fa0, 0(a0)
287; CHECK-NEXT:    ret
288  %vfmv.s.f = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double> poison, double %x, i64 -1)
289  store volatile <vscale x 8 x double> %vfmv.s.f, ptr %p
290
291  %a = load volatile <vscale x 8 x double>, ptr %p
292  %b = load volatile <vscale x 8 x double>, ptr %p
293  %c = load volatile <vscale x 8 x double>, ptr %p
294  %d = load volatile <vscale x 8 x double>, ptr %p
295  store volatile <vscale x 8 x double> %d, ptr %p
296  store volatile <vscale x 8 x double> %c, ptr %p
297  store volatile <vscale x 8 x double> %b, ptr %p
298  store volatile <vscale x 8 x double> %a, ptr %p
299
300  store volatile <vscale x 8 x double> %vfmv.s.f, ptr %p
301  store volatile double %x, ptr %p
302  ret void
303}
304