xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load-store.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV32 %s
3; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV64 %s
4
5define void @v2i8(ptr %p, ptr %q) {
6; CHECK-LABEL: v2i8:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    lh a0, 0(a0)
9; CHECK-NEXT:    sh a0, 0(a1)
10; CHECK-NEXT:    ret
11  %v = load <2 x i8>, ptr %p
12  store <2 x i8> %v, ptr %q
13  ret void
14}
15
16define void @v2i16(ptr %p, ptr %q) {
17; CHECK-LABEL: v2i16:
18; CHECK:       # %bb.0:
19; CHECK-NEXT:    lw a0, 0(a0)
20; CHECK-NEXT:    sw a0, 0(a1)
21; CHECK-NEXT:    ret
22  %v = load <2 x i16>, ptr %p
23  store <2 x i16> %v, ptr %q
24  ret void
25}
26
27define void @v2i32(ptr %p, ptr %q) {
28; RV32-LABEL: v2i32:
29; RV32:       # %bb.0:
30; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
31; RV32-NEXT:    vle32.v v8, (a0)
32; RV32-NEXT:    vse32.v v8, (a1)
33; RV32-NEXT:    ret
34;
35; RV64-LABEL: v2i32:
36; RV64:       # %bb.0:
37; RV64-NEXT:    ld a0, 0(a0)
38; RV64-NEXT:    sd a0, 0(a1)
39; RV64-NEXT:    ret
40  %v = load <2 x i32>, ptr %p
41  store <2 x i32> %v, ptr %q
42  ret void
43}
44
45define void @v2i64(ptr %p, ptr %q) {
46; CHECK-LABEL: v2i64:
47; CHECK:       # %bb.0:
48; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
49; CHECK-NEXT:    vle64.v v8, (a0)
50; CHECK-NEXT:    vse64.v v8, (a1)
51; CHECK-NEXT:    ret
52  %v = load <2 x i64>, ptr %p
53  store <2 x i64> %v, ptr %q
54  ret void
55}
56
57define void @v2f16(ptr %p, ptr %q) {
58; CHECK-LABEL: v2f16:
59; CHECK:       # %bb.0:
60; CHECK-NEXT:    lw a0, 0(a0)
61; CHECK-NEXT:    sw a0, 0(a1)
62; CHECK-NEXT:    ret
63  %v = load <2 x half>, ptr %p
64  store <2 x half> %v, ptr %q
65  ret void
66}
67
68define void @v2f32(ptr %p, ptr %q) {
69; RV32-LABEL: v2f32:
70; RV32:       # %bb.0:
71; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
72; RV32-NEXT:    vle32.v v8, (a0)
73; RV32-NEXT:    vse32.v v8, (a1)
74; RV32-NEXT:    ret
75;
76; RV64-LABEL: v2f32:
77; RV64:       # %bb.0:
78; RV64-NEXT:    ld a0, 0(a0)
79; RV64-NEXT:    sd a0, 0(a1)
80; RV64-NEXT:    ret
81  %v = load <2 x float>, ptr %p
82  store <2 x float> %v, ptr %q
83  ret void
84}
85
86define void @v2f64(ptr %p, ptr %q) {
87; CHECK-LABEL: v2f64:
88; CHECK:       # %bb.0:
89; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
90; CHECK-NEXT:    vle64.v v8, (a0)
91; CHECK-NEXT:    vse64.v v8, (a1)
92; CHECK-NEXT:    ret
93  %v = load <2 x double>, ptr %p
94  store <2 x double> %v, ptr %q
95  ret void
96}
97
98define void @v4i8(ptr %p, ptr %q) {
99; CHECK-LABEL: v4i8:
100; CHECK:       # %bb.0:
101; CHECK-NEXT:    lw a0, 0(a0)
102; CHECK-NEXT:    sw a0, 0(a1)
103; CHECK-NEXT:    ret
104  %v = load <4 x i8>, ptr %p
105  store <4 x i8> %v, ptr %q
106  ret void
107}
108
109define void @v4i16(ptr %p, ptr %q) {
110; RV32-LABEL: v4i16:
111; RV32:       # %bb.0:
112; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
113; RV32-NEXT:    vle16.v v8, (a0)
114; RV32-NEXT:    vse16.v v8, (a1)
115; RV32-NEXT:    ret
116;
117; RV64-LABEL: v4i16:
118; RV64:       # %bb.0:
119; RV64-NEXT:    ld a0, 0(a0)
120; RV64-NEXT:    sd a0, 0(a1)
121; RV64-NEXT:    ret
122  %v = load <4 x i16>, ptr %p
123  store <4 x i16> %v, ptr %q
124  ret void
125}
126
127define void @v4i32(ptr %p, ptr %q) {
128; CHECK-LABEL: v4i32:
129; CHECK:       # %bb.0:
130; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
131; CHECK-NEXT:    vle32.v v8, (a0)
132; CHECK-NEXT:    vse32.v v8, (a1)
133; CHECK-NEXT:    ret
134  %v = load <4 x i32>, ptr %p
135  store <4 x i32> %v, ptr %q
136  ret void
137}
138
139define void @v4i64(ptr %p, ptr %q) {
140; CHECK-LABEL: v4i64:
141; CHECK:       # %bb.0:
142; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
143; CHECK-NEXT:    vle64.v v8, (a0)
144; CHECK-NEXT:    vse64.v v8, (a1)
145; CHECK-NEXT:    ret
146  %v = load <4 x i64>, ptr %p
147  store <4 x i64> %v, ptr %q
148  ret void
149}
150
151define void @v4f16(ptr %p, ptr %q) {
152; RV32-LABEL: v4f16:
153; RV32:       # %bb.0:
154; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
155; RV32-NEXT:    vle16.v v8, (a0)
156; RV32-NEXT:    vse16.v v8, (a1)
157; RV32-NEXT:    ret
158;
159; RV64-LABEL: v4f16:
160; RV64:       # %bb.0:
161; RV64-NEXT:    ld a0, 0(a0)
162; RV64-NEXT:    sd a0, 0(a1)
163; RV64-NEXT:    ret
164  %v = load <4 x half>, ptr %p
165  store <4 x half> %v, ptr %q
166  ret void
167}
168
169define void @v4f32(ptr %p, ptr %q) {
170; CHECK-LABEL: v4f32:
171; CHECK:       # %bb.0:
172; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
173; CHECK-NEXT:    vle32.v v8, (a0)
174; CHECK-NEXT:    vse32.v v8, (a1)
175; CHECK-NEXT:    ret
176  %v = load <4 x float>, ptr %p
177  store <4 x float> %v, ptr %q
178  ret void
179}
180
181define void @v4f64(ptr %p, ptr %q) {
182; CHECK-LABEL: v4f64:
183; CHECK:       # %bb.0:
184; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
185; CHECK-NEXT:    vle64.v v8, (a0)
186; CHECK-NEXT:    vse64.v v8, (a1)
187; CHECK-NEXT:    ret
188  %v = load <4 x double>, ptr %p
189  store <4 x double> %v, ptr %q
190  ret void
191}
192
193define void @v8i8(ptr %p, ptr %q) {
194; RV32-LABEL: v8i8:
195; RV32:       # %bb.0:
196; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
197; RV32-NEXT:    vle8.v v8, (a0)
198; RV32-NEXT:    vse8.v v8, (a1)
199; RV32-NEXT:    ret
200;
201; RV64-LABEL: v8i8:
202; RV64:       # %bb.0:
203; RV64-NEXT:    ld a0, 0(a0)
204; RV64-NEXT:    sd a0, 0(a1)
205; RV64-NEXT:    ret
206  %v = load <8 x i8>, ptr %p
207  store <8 x i8> %v, ptr %q
208  ret void
209}
210
211define void @v8i16(ptr %p, ptr %q) {
212; CHECK-LABEL: v8i16:
213; CHECK:       # %bb.0:
214; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
215; CHECK-NEXT:    vle16.v v8, (a0)
216; CHECK-NEXT:    vse16.v v8, (a1)
217; CHECK-NEXT:    ret
218  %v = load <8 x i16>, ptr %p
219  store <8 x i16> %v, ptr %q
220  ret void
221}
222
223define void @v8i32(ptr %p, ptr %q) {
224; CHECK-LABEL: v8i32:
225; CHECK:       # %bb.0:
226; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
227; CHECK-NEXT:    vle32.v v8, (a0)
228; CHECK-NEXT:    vse32.v v8, (a1)
229; CHECK-NEXT:    ret
230  %v = load <8 x i32>, ptr %p
231  store <8 x i32> %v, ptr %q
232  ret void
233}
234
235define void @v8i64(ptr %p, ptr %q) {
236; CHECK-LABEL: v8i64:
237; CHECK:       # %bb.0:
238; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
239; CHECK-NEXT:    vle64.v v8, (a0)
240; CHECK-NEXT:    vse64.v v8, (a1)
241; CHECK-NEXT:    ret
242  %v = load <8 x i64>, ptr %p
243  store <8 x i64> %v, ptr %q
244  ret void
245}
246
247define void @v2i8_align1(ptr %p, ptr %q) {
248; CHECK-LABEL: v2i8_align1:
249; CHECK:       # %bb.0:
250; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
251; CHECK-NEXT:    vle8.v v8, (a0)
252; CHECK-NEXT:    vse8.v v8, (a1)
253; CHECK-NEXT:    ret
254  %v = load <2 x i8>, ptr %p, align 1
255  store <2 x i8> %v, ptr %q
256  ret void
257}
258
259define void @v2i8_align2(ptr %p, ptr %q) {
260; CHECK-LABEL: v2i8_align2:
261; CHECK:       # %bb.0:
262; CHECK-NEXT:    lh a0, 0(a0)
263; CHECK-NEXT:    sh a0, 0(a1)
264; CHECK-NEXT:    ret
265  %v = load <2 x i8>, ptr %p, align 2
266  store <2 x i8> %v, ptr %q
267  ret void
268}
269
270define void @v2i8_align4(ptr %p, ptr %q) {
271; CHECK-LABEL: v2i8_align4:
272; CHECK:       # %bb.0:
273; CHECK-NEXT:    lh a0, 0(a0)
274; CHECK-NEXT:    sh a0, 0(a1)
275; CHECK-NEXT:    ret
276  %v = load <2 x i8>, ptr %p, align 4
277  store <2 x i8> %v, ptr %q
278  ret void
279}
280
281define void @v2i8_volatile_load(ptr %p, ptr %q) {
282; CHECK-LABEL: v2i8_volatile_load:
283; CHECK:       # %bb.0:
284; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
285; CHECK-NEXT:    vle8.v v8, (a0)
286; CHECK-NEXT:    vse8.v v8, (a1)
287; CHECK-NEXT:    ret
288  %v = load volatile <2 x i8>, ptr %p
289  store <2 x i8> %v, ptr %q
290  ret void
291}
292
293define void @v2i8_volatile_store(ptr %p, ptr %q) {
294; CHECK-LABEL: v2i8_volatile_store:
295; CHECK:       # %bb.0:
296; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
297; CHECK-NEXT:    vle8.v v8, (a0)
298; CHECK-NEXT:    vse8.v v8, (a1)
299; CHECK-NEXT:    ret
300  %v = load <2 x i8>, ptr %p
301  store volatile <2 x i8> %v, ptr %q
302  ret void
303}
304
305define void @v8bf16(ptr %p, ptr %q) {
306; CHECK-LABEL: v8bf16:
307; CHECK:       # %bb.0:
308; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
309; CHECK-NEXT:    vle16.v v8, (a0)
310; CHECK-NEXT:    vse16.v v8, (a1)
311; CHECK-NEXT:    ret
312  %v = load <8 x bfloat>, ptr %p
313  store <8 x bfloat> %v, ptr %q
314  ret void
315}
316