xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/load-add-store.ll (revision de423cfe3d5de0110b4f55ff1742988b529be6d2)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple riscv32 -mattr=+v %s -o - \
3; RUN:     -verify-machineinstrs | FileCheck %s
4; RUN: llc -mtriple riscv64 -mattr=+v %s -o - \
5; RUN:     -verify-machineinstrs | FileCheck %s
6
7define void @vadd_vint8m1(ptr %pc, ptr %pa, ptr %pb) nounwind {
8; CHECK-LABEL: vadd_vint8m1:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vl1r.v v8, (a1)
11; CHECK-NEXT:    vl1r.v v9, (a2)
12; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
13; CHECK-NEXT:    vadd.vv v8, v8, v9
14; CHECK-NEXT:    vs1r.v v8, (a0)
15; CHECK-NEXT:    ret
16  %va = load <vscale x 8 x i8>, ptr %pa
17  %vb = load <vscale x 8 x i8>, ptr %pb
18  %vc = add <vscale x 8 x i8> %va, %vb
19  store <vscale x 8 x i8> %vc, ptr %pc
20  ret void
21}
22
23define void @vadd_vint8m2(ptr %pc, ptr %pa, ptr %pb) nounwind {
24; CHECK-LABEL: vadd_vint8m2:
25; CHECK:       # %bb.0:
26; CHECK-NEXT:    vl2r.v v8, (a1)
27; CHECK-NEXT:    vl2r.v v10, (a2)
28; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
29; CHECK-NEXT:    vadd.vv v8, v8, v10
30; CHECK-NEXT:    vs2r.v v8, (a0)
31; CHECK-NEXT:    ret
32  %va = load <vscale x 16 x i8>, ptr %pa
33  %vb = load <vscale x 16 x i8>, ptr %pb
34  %vc = add <vscale x 16 x i8> %va, %vb
35  store <vscale x 16 x i8> %vc, ptr %pc
36  ret void
37}
38
39define void @vadd_vint8m4(ptr %pc, ptr %pa, ptr %pb) nounwind {
40; CHECK-LABEL: vadd_vint8m4:
41; CHECK:       # %bb.0:
42; CHECK-NEXT:    vl4r.v v8, (a1)
43; CHECK-NEXT:    vl4r.v v12, (a2)
44; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
45; CHECK-NEXT:    vadd.vv v8, v8, v12
46; CHECK-NEXT:    vs4r.v v8, (a0)
47; CHECK-NEXT:    ret
48  %va = load <vscale x 32 x i8>, ptr %pa
49  %vb = load <vscale x 32 x i8>, ptr %pb
50  %vc = add <vscale x 32 x i8> %va, %vb
51  store <vscale x 32 x i8> %vc, ptr %pc
52  ret void
53}
54
55define void @vadd_vint8m8(ptr %pc, ptr %pa, ptr %pb) nounwind {
56; CHECK-LABEL: vadd_vint8m8:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vl8r.v v8, (a1)
59; CHECK-NEXT:    vl8r.v v16, (a2)
60; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
61; CHECK-NEXT:    vadd.vv v8, v8, v16
62; CHECK-NEXT:    vs8r.v v8, (a0)
63; CHECK-NEXT:    ret
64  %va = load <vscale x 64 x i8>, ptr %pa
65  %vb = load <vscale x 64 x i8>, ptr %pb
66  %vc = add <vscale x 64 x i8> %va, %vb
67  store <vscale x 64 x i8> %vc, ptr %pc
68  ret void
69}
70
71define void @vadd_vint8mf2(ptr %pc, ptr %pa, ptr %pb) nounwind {
72; CHECK-LABEL: vadd_vint8mf2:
73; CHECK:       # %bb.0:
74; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
75; CHECK-NEXT:    vle8.v v8, (a1)
76; CHECK-NEXT:    vle8.v v9, (a2)
77; CHECK-NEXT:    vadd.vv v8, v8, v9
78; CHECK-NEXT:    vse8.v v8, (a0)
79; CHECK-NEXT:    ret
80  %va = load <vscale x 4 x i8>, ptr %pa
81  %vb = load <vscale x 4 x i8>, ptr %pb
82  %vc = add <vscale x 4 x i8> %va, %vb
83  store <vscale x 4 x i8> %vc, ptr %pc
84  ret void
85}
86
87define void @vadd_vint8mf4(ptr %pc, ptr %pa, ptr %pb) nounwind {
88; CHECK-LABEL: vadd_vint8mf4:
89; CHECK:       # %bb.0:
90; CHECK-NEXT:    vsetvli a3, zero, e8, mf4, ta, ma
91; CHECK-NEXT:    vle8.v v8, (a1)
92; CHECK-NEXT:    vle8.v v9, (a2)
93; CHECK-NEXT:    vadd.vv v8, v8, v9
94; CHECK-NEXT:    vse8.v v8, (a0)
95; CHECK-NEXT:    ret
96  %va = load <vscale x 2 x i8>, ptr %pa
97  %vb = load <vscale x 2 x i8>, ptr %pb
98  %vc = add <vscale x 2 x i8> %va, %vb
99  store <vscale x 2 x i8> %vc, ptr %pc
100  ret void
101}
102
103define void @vadd_vint8mf8(ptr %pc, ptr %pa, ptr %pb) nounwind {
104; CHECK-LABEL: vadd_vint8mf8:
105; CHECK:       # %bb.0:
106; CHECK-NEXT:    vsetvli a3, zero, e8, mf8, ta, ma
107; CHECK-NEXT:    vle8.v v8, (a1)
108; CHECK-NEXT:    vle8.v v9, (a2)
109; CHECK-NEXT:    vadd.vv v8, v8, v9
110; CHECK-NEXT:    vse8.v v8, (a0)
111; CHECK-NEXT:    ret
112  %va = load <vscale x 1 x i8>, ptr %pa
113  %vb = load <vscale x 1 x i8>, ptr %pb
114  %vc = add <vscale x 1 x i8> %va, %vb
115  store <vscale x 1 x i8> %vc, ptr %pc
116  ret void
117}
118
119define void @vadd_vint16m1(ptr %pc, ptr %pa, ptr %pb) nounwind {
120; CHECK-LABEL: vadd_vint16m1:
121; CHECK:       # %bb.0:
122; CHECK-NEXT:    vl1re16.v v8, (a1)
123; CHECK-NEXT:    vl1re16.v v9, (a2)
124; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
125; CHECK-NEXT:    vadd.vv v8, v8, v9
126; CHECK-NEXT:    vs1r.v v8, (a0)
127; CHECK-NEXT:    ret
128  %va = load <vscale x 4 x i16>, ptr %pa
129  %vb = load <vscale x 4 x i16>, ptr %pb
130  %vc = add <vscale x 4 x i16> %va, %vb
131  store <vscale x 4 x i16> %vc, ptr %pc
132  ret void
133}
134
135define void @vadd_vint16m2(ptr %pc, ptr %pa, ptr %pb) nounwind {
136; CHECK-LABEL: vadd_vint16m2:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vl2re16.v v8, (a1)
139; CHECK-NEXT:    vl2re16.v v10, (a2)
140; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
141; CHECK-NEXT:    vadd.vv v8, v8, v10
142; CHECK-NEXT:    vs2r.v v8, (a0)
143; CHECK-NEXT:    ret
144  %va = load <vscale x 8 x i16>, ptr %pa
145  %vb = load <vscale x 8 x i16>, ptr %pb
146  %vc = add <vscale x 8 x i16> %va, %vb
147  store <vscale x 8 x i16> %vc, ptr %pc
148  ret void
149}
150
151define void @vadd_vint16m4(ptr %pc, ptr %pa, ptr %pb) nounwind {
152; CHECK-LABEL: vadd_vint16m4:
153; CHECK:       # %bb.0:
154; CHECK-NEXT:    vl4re16.v v8, (a1)
155; CHECK-NEXT:    vl4re16.v v12, (a2)
156; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
157; CHECK-NEXT:    vadd.vv v8, v8, v12
158; CHECK-NEXT:    vs4r.v v8, (a0)
159; CHECK-NEXT:    ret
160  %va = load <vscale x 16 x i16>, ptr %pa
161  %vb = load <vscale x 16 x i16>, ptr %pb
162  %vc = add <vscale x 16 x i16> %va, %vb
163  store <vscale x 16 x i16> %vc, ptr %pc
164  ret void
165}
166
167define void @vadd_vint16m8(ptr %pc, ptr %pa, ptr %pb) nounwind {
168; CHECK-LABEL: vadd_vint16m8:
169; CHECK:       # %bb.0:
170; CHECK-NEXT:    vl8re16.v v8, (a1)
171; CHECK-NEXT:    vl8re16.v v16, (a2)
172; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
173; CHECK-NEXT:    vadd.vv v8, v8, v16
174; CHECK-NEXT:    vs8r.v v8, (a0)
175; CHECK-NEXT:    ret
176  %va = load <vscale x 32 x i16>, ptr %pa
177  %vb = load <vscale x 32 x i16>, ptr %pb
178  %vc = add <vscale x 32 x i16> %va, %vb
179  store <vscale x 32 x i16> %vc, ptr %pc
180  ret void
181}
182
183define void @vadd_vint16mf2(ptr %pc, ptr %pa, ptr %pb) nounwind {
184; CHECK-LABEL: vadd_vint16mf2:
185; CHECK:       # %bb.0:
186; CHECK-NEXT:    vsetvli a3, zero, e16, mf2, ta, ma
187; CHECK-NEXT:    vle16.v v8, (a1)
188; CHECK-NEXT:    vle16.v v9, (a2)
189; CHECK-NEXT:    vadd.vv v8, v8, v9
190; CHECK-NEXT:    vse16.v v8, (a0)
191; CHECK-NEXT:    ret
192  %va = load <vscale x 2 x i16>, ptr %pa
193  %vb = load <vscale x 2 x i16>, ptr %pb
194  %vc = add <vscale x 2 x i16> %va, %vb
195  store <vscale x 2 x i16> %vc, ptr %pc
196  ret void
197}
198
199define void @vadd_vint16mf4(ptr %pc, ptr %pa, ptr %pb) nounwind {
200; CHECK-LABEL: vadd_vint16mf4:
201; CHECK:       # %bb.0:
202; CHECK-NEXT:    vsetvli a3, zero, e16, mf4, ta, ma
203; CHECK-NEXT:    vle16.v v8, (a1)
204; CHECK-NEXT:    vle16.v v9, (a2)
205; CHECK-NEXT:    vadd.vv v8, v8, v9
206; CHECK-NEXT:    vse16.v v8, (a0)
207; CHECK-NEXT:    ret
208  %va = load <vscale x 1 x i16>, ptr %pa
209  %vb = load <vscale x 1 x i16>, ptr %pb
210  %vc = add <vscale x 1 x i16> %va, %vb
211  store <vscale x 1 x i16> %vc, ptr %pc
212  ret void
213}
214
215define void @vadd_vint32m1(ptr %pc, ptr %pa, ptr %pb) nounwind {
216; CHECK-LABEL: vadd_vint32m1:
217; CHECK:       # %bb.0:
218; CHECK-NEXT:    vl1re32.v v8, (a1)
219; CHECK-NEXT:    vl1re32.v v9, (a2)
220; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
221; CHECK-NEXT:    vadd.vv v8, v8, v9
222; CHECK-NEXT:    vs1r.v v8, (a0)
223; CHECK-NEXT:    ret
224  %va = load <vscale x 2 x i32>, ptr %pa
225  %vb = load <vscale x 2 x i32>, ptr %pb
226  %vc = add <vscale x 2 x i32> %va, %vb
227  store <vscale x 2 x i32> %vc, ptr %pc
228  ret void
229}
230
231define void @vadd_vint32m2(ptr %pc, ptr %pa, ptr %pb) nounwind {
232; CHECK-LABEL: vadd_vint32m2:
233; CHECK:       # %bb.0:
234; CHECK-NEXT:    vl2re32.v v8, (a1)
235; CHECK-NEXT:    vl2re32.v v10, (a2)
236; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
237; CHECK-NEXT:    vadd.vv v8, v8, v10
238; CHECK-NEXT:    vs2r.v v8, (a0)
239; CHECK-NEXT:    ret
240  %va = load <vscale x 4 x i32>, ptr %pa
241  %vb = load <vscale x 4 x i32>, ptr %pb
242  %vc = add <vscale x 4 x i32> %va, %vb
243  store <vscale x 4 x i32> %vc, ptr %pc
244  ret void
245}
246
247define void @vadd_vint32m4(ptr %pc, ptr %pa, ptr %pb) nounwind {
248; CHECK-LABEL: vadd_vint32m4:
249; CHECK:       # %bb.0:
250; CHECK-NEXT:    vl4re32.v v8, (a1)
251; CHECK-NEXT:    vl4re32.v v12, (a2)
252; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
253; CHECK-NEXT:    vadd.vv v8, v8, v12
254; CHECK-NEXT:    vs4r.v v8, (a0)
255; CHECK-NEXT:    ret
256  %va = load <vscale x 8 x i32>, ptr %pa
257  %vb = load <vscale x 8 x i32>, ptr %pb
258  %vc = add <vscale x 8 x i32> %va, %vb
259  store <vscale x 8 x i32> %vc, ptr %pc
260  ret void
261}
262
263define void @vadd_vint32m8(ptr %pc, ptr %pa, ptr %pb) nounwind {
264; CHECK-LABEL: vadd_vint32m8:
265; CHECK:       # %bb.0:
266; CHECK-NEXT:    vl8re32.v v8, (a1)
267; CHECK-NEXT:    vl8re32.v v16, (a2)
268; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
269; CHECK-NEXT:    vadd.vv v8, v8, v16
270; CHECK-NEXT:    vs8r.v v8, (a0)
271; CHECK-NEXT:    ret
272  %va = load <vscale x 16 x i32>, ptr %pa
273  %vb = load <vscale x 16 x i32>, ptr %pb
274  %vc = add <vscale x 16 x i32> %va, %vb
275  store <vscale x 16 x i32> %vc, ptr %pc
276  ret void
277}
278
279define void @vadd_vint32mf2(ptr %pc, ptr %pa, ptr %pb) nounwind {
280; CHECK-LABEL: vadd_vint32mf2:
281; CHECK:       # %bb.0:
282; CHECK-NEXT:    vsetvli a3, zero, e32, mf2, ta, ma
283; CHECK-NEXT:    vle32.v v8, (a1)
284; CHECK-NEXT:    vle32.v v9, (a2)
285; CHECK-NEXT:    vadd.vv v8, v8, v9
286; CHECK-NEXT:    vse32.v v8, (a0)
287; CHECK-NEXT:    ret
288  %va = load <vscale x 1 x i32>, ptr %pa
289  %vb = load <vscale x 1 x i32>, ptr %pb
290  %vc = add <vscale x 1 x i32> %va, %vb
291  store <vscale x 1 x i32> %vc, ptr %pc
292  ret void
293}
294
295define void @vadd_vint64m1(ptr %pc, ptr %pa, ptr %pb) nounwind {
296; CHECK-LABEL: vadd_vint64m1:
297; CHECK:       # %bb.0:
298; CHECK-NEXT:    vl1re64.v v8, (a1)
299; CHECK-NEXT:    vl1re64.v v9, (a2)
300; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
301; CHECK-NEXT:    vadd.vv v8, v8, v9
302; CHECK-NEXT:    vs1r.v v8, (a0)
303; CHECK-NEXT:    ret
304  %va = load <vscale x 1 x i64>, ptr %pa
305  %vb = load <vscale x 1 x i64>, ptr %pb
306  %vc = add <vscale x 1 x i64> %va, %vb
307  store <vscale x 1 x i64> %vc, ptr %pc
308  ret void
309}
310
311define void @vadd_vint64m2(ptr %pc, ptr %pa, ptr %pb) nounwind {
312; CHECK-LABEL: vadd_vint64m2:
313; CHECK:       # %bb.0:
314; CHECK-NEXT:    vl2re64.v v8, (a1)
315; CHECK-NEXT:    vl2re64.v v10, (a2)
316; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
317; CHECK-NEXT:    vadd.vv v8, v8, v10
318; CHECK-NEXT:    vs2r.v v8, (a0)
319; CHECK-NEXT:    ret
320  %va = load <vscale x 2 x i64>, ptr %pa
321  %vb = load <vscale x 2 x i64>, ptr %pb
322  %vc = add <vscale x 2 x i64> %va, %vb
323  store <vscale x 2 x i64> %vc, ptr %pc
324  ret void
325}
326
327define void @vadd_vint64m4(ptr %pc, ptr %pa, ptr %pb) nounwind {
328; CHECK-LABEL: vadd_vint64m4:
329; CHECK:       # %bb.0:
330; CHECK-NEXT:    vl4re64.v v8, (a1)
331; CHECK-NEXT:    vl4re64.v v12, (a2)
332; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
333; CHECK-NEXT:    vadd.vv v8, v8, v12
334; CHECK-NEXT:    vs4r.v v8, (a0)
335; CHECK-NEXT:    ret
336  %va = load <vscale x 4 x i64>, ptr %pa
337  %vb = load <vscale x 4 x i64>, ptr %pb
338  %vc = add <vscale x 4 x i64> %va, %vb
339  store <vscale x 4 x i64> %vc, ptr %pc
340  ret void
341}
342
343define void @vadd_vint64m8(ptr %pc, ptr %pa, ptr %pb) nounwind {
344; CHECK-LABEL: vadd_vint64m8:
345; CHECK:       # %bb.0:
346; CHECK-NEXT:    vl8re64.v v8, (a1)
347; CHECK-NEXT:    vl8re64.v v16, (a2)
348; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
349; CHECK-NEXT:    vadd.vv v8, v8, v16
350; CHECK-NEXT:    vs8r.v v8, (a0)
351; CHECK-NEXT:    ret
352  %va = load <vscale x 8 x i64>, ptr %pa
353  %vb = load <vscale x 8 x i64>, ptr %pb
354  %vc = add <vscale x 8 x i64> %va, %vb
355  store <vscale x 8 x i64> %vc, ptr %pc
356  ret void
357}
358
359
360define void @exact_vlen_vadd_vint8m1(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
361; CHECK-LABEL: exact_vlen_vadd_vint8m1:
362; CHECK:       # %bb.0:
363; CHECK-NEXT:    vl1r.v v8, (a1)
364; CHECK-NEXT:    vl1r.v v9, (a2)
365; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
366; CHECK-NEXT:    vadd.vv v8, v8, v9
367; CHECK-NEXT:    vs1r.v v8, (a0)
368; CHECK-NEXT:    ret
369  %va = load <vscale x 8 x i8>, ptr %pa
370  %vb = load <vscale x 8 x i8>, ptr %pb
371  %vc = add <vscale x 8 x i8> %va, %vb
372  store <vscale x 8 x i8> %vc, ptr %pc
373  ret void
374}
375
376define void @exact_vlen_vadd_vint8m2(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
377; CHECK-LABEL: exact_vlen_vadd_vint8m2:
378; CHECK:       # %bb.0:
379; CHECK-NEXT:    vl2r.v v8, (a1)
380; CHECK-NEXT:    vl2r.v v10, (a2)
381; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
382; CHECK-NEXT:    vadd.vv v8, v8, v10
383; CHECK-NEXT:    vs2r.v v8, (a0)
384; CHECK-NEXT:    ret
385  %va = load <vscale x 16 x i8>, ptr %pa
386  %vb = load <vscale x 16 x i8>, ptr %pb
387  %vc = add <vscale x 16 x i8> %va, %vb
388  store <vscale x 16 x i8> %vc, ptr %pc
389  ret void
390}
391
392define void @exact_vlen_vadd_vint8mf2(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
393; CHECK-LABEL: exact_vlen_vadd_vint8mf2:
394; CHECK:       # %bb.0:
395; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
396; CHECK-NEXT:    vle8.v v8, (a1)
397; CHECK-NEXT:    vle8.v v9, (a2)
398; CHECK-NEXT:    vadd.vv v8, v8, v9
399; CHECK-NEXT:    vse8.v v8, (a0)
400; CHECK-NEXT:    ret
401  %va = load <vscale x 4 x i8>, ptr %pa
402  %vb = load <vscale x 4 x i8>, ptr %pb
403  %vc = add <vscale x 4 x i8> %va, %vb
404  store <vscale x 4 x i8> %vc, ptr %pc
405  ret void
406}
407
408define void @exact_vlen_vadd_vint8mf4(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
409; CHECK-LABEL: exact_vlen_vadd_vint8mf4:
410; CHECK:       # %bb.0:
411; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
412; CHECK-NEXT:    vle8.v v8, (a1)
413; CHECK-NEXT:    vle8.v v9, (a2)
414; CHECK-NEXT:    vadd.vv v8, v8, v9
415; CHECK-NEXT:    vse8.v v8, (a0)
416; CHECK-NEXT:    ret
417  %va = load <vscale x 2 x i8>, ptr %pa
418  %vb = load <vscale x 2 x i8>, ptr %pb
419  %vc = add <vscale x 2 x i8> %va, %vb
420  store <vscale x 2 x i8> %vc, ptr %pc
421  ret void
422}
423
424define void @exact_vlen_vadd_vint8mf8(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
425; CHECK-LABEL: exact_vlen_vadd_vint8mf8:
426; CHECK:       # %bb.0:
427; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
428; CHECK-NEXT:    vle8.v v8, (a1)
429; CHECK-NEXT:    vle8.v v9, (a2)
430; CHECK-NEXT:    vadd.vv v8, v8, v9
431; CHECK-NEXT:    vse8.v v8, (a0)
432; CHECK-NEXT:    ret
433  %va = load <vscale x 1 x i8>, ptr %pa
434  %vb = load <vscale x 1 x i8>, ptr %pb
435  %vc = add <vscale x 1 x i8> %va, %vb
436  store <vscale x 1 x i8> %vc, ptr %pc
437  ret void
438}
439
440define void @exact_vlen_vadd_vint32m1(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
441; CHECK-LABEL: exact_vlen_vadd_vint32m1:
442; CHECK:       # %bb.0:
443; CHECK-NEXT:    vl1re32.v v8, (a1)
444; CHECK-NEXT:    vl1re32.v v9, (a2)
445; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
446; CHECK-NEXT:    vadd.vv v8, v8, v9
447; CHECK-NEXT:    vs1r.v v8, (a0)
448; CHECK-NEXT:    ret
449  %va = load <vscale x 2 x i32>, ptr %pa
450  %vb = load <vscale x 2 x i32>, ptr %pb
451  %vc = add <vscale x 2 x i32> %va, %vb
452  store <vscale x 2 x i32> %vc, ptr %pc
453  ret void
454}
455
456define void @exact_vlen_vadd_vint32m2(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
457; CHECK-LABEL: exact_vlen_vadd_vint32m2:
458; CHECK:       # %bb.0:
459; CHECK-NEXT:    vl2re32.v v8, (a1)
460; CHECK-NEXT:    vl2re32.v v10, (a2)
461; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
462; CHECK-NEXT:    vadd.vv v8, v8, v10
463; CHECK-NEXT:    vs2r.v v8, (a0)
464; CHECK-NEXT:    ret
465  %va = load <vscale x 4 x i32>, ptr %pa
466  %vb = load <vscale x 4 x i32>, ptr %pb
467  %vc = add <vscale x 4 x i32> %va, %vb
468  store <vscale x 4 x i32> %vc, ptr %pc
469  ret void
470}
471
472define void @exact_vlen_vadd_vint32m4(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
473; CHECK-LABEL: exact_vlen_vadd_vint32m4:
474; CHECK:       # %bb.0:
475; CHECK-NEXT:    vl4re32.v v8, (a1)
476; CHECK-NEXT:    vl4re32.v v12, (a2)
477; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
478; CHECK-NEXT:    vadd.vv v8, v8, v12
479; CHECK-NEXT:    vs4r.v v8, (a0)
480; CHECK-NEXT:    ret
481  %va = load <vscale x 8 x i32>, ptr %pa
482  %vb = load <vscale x 8 x i32>, ptr %pb
483  %vc = add <vscale x 8 x i32> %va, %vb
484  store <vscale x 8 x i32> %vc, ptr %pc
485  ret void
486}
487
488define void @exact_vlen_vadd_vint32m8(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
489; CHECK-LABEL: exact_vlen_vadd_vint32m8:
490; CHECK:       # %bb.0:
491; CHECK-NEXT:    vl8re32.v v8, (a1)
492; CHECK-NEXT:    vl8re32.v v16, (a2)
493; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
494; CHECK-NEXT:    vadd.vv v8, v8, v16
495; CHECK-NEXT:    vs8r.v v8, (a0)
496; CHECK-NEXT:    ret
497  %va = load <vscale x 16 x i32>, ptr %pa
498  %vb = load <vscale x 16 x i32>, ptr %pb
499  %vc = add <vscale x 16 x i32> %va, %vb
500  store <vscale x 16 x i32> %vc, ptr %pc
501  ret void
502}
503