xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/load-add-store.ll (revision de423cfe3d5de0110b4f55ff1742988b529be6d2)
1b7ebba3dSPhilip Reames; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2b7ebba3dSPhilip Reames; RUN: llc -mtriple riscv32 -mattr=+v %s -o - \
3b7ebba3dSPhilip Reames; RUN:     -verify-machineinstrs | FileCheck %s
4b7ebba3dSPhilip Reames; RUN: llc -mtriple riscv64 -mattr=+v %s -o - \
5b7ebba3dSPhilip Reames; RUN:     -verify-machineinstrs | FileCheck %s
6b7ebba3dSPhilip Reames
7b7ebba3dSPhilip Reamesdefine void @vadd_vint8m1(ptr %pc, ptr %pa, ptr %pb) nounwind {
8b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint8m1:
9b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
10b7ebba3dSPhilip Reames; CHECK-NEXT:    vl1r.v v8, (a1)
11b7ebba3dSPhilip Reames; CHECK-NEXT:    vl1r.v v9, (a2)
12b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
13b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
14b7ebba3dSPhilip Reames; CHECK-NEXT:    vs1r.v v8, (a0)
15b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
16b7ebba3dSPhilip Reames  %va = load <vscale x 8 x i8>, ptr %pa
17b7ebba3dSPhilip Reames  %vb = load <vscale x 8 x i8>, ptr %pb
18b7ebba3dSPhilip Reames  %vc = add <vscale x 8 x i8> %va, %vb
19b7ebba3dSPhilip Reames  store <vscale x 8 x i8> %vc, ptr %pc
20b7ebba3dSPhilip Reames  ret void
21b7ebba3dSPhilip Reames}
22b7ebba3dSPhilip Reames
23b7ebba3dSPhilip Reamesdefine void @vadd_vint8m2(ptr %pc, ptr %pa, ptr %pb) nounwind {
24b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint8m2:
25b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
26b7ebba3dSPhilip Reames; CHECK-NEXT:    vl2r.v v8, (a1)
27b7ebba3dSPhilip Reames; CHECK-NEXT:    vl2r.v v10, (a2)
28b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
29b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v10
30b7ebba3dSPhilip Reames; CHECK-NEXT:    vs2r.v v8, (a0)
31b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
32b7ebba3dSPhilip Reames  %va = load <vscale x 16 x i8>, ptr %pa
33b7ebba3dSPhilip Reames  %vb = load <vscale x 16 x i8>, ptr %pb
34b7ebba3dSPhilip Reames  %vc = add <vscale x 16 x i8> %va, %vb
35b7ebba3dSPhilip Reames  store <vscale x 16 x i8> %vc, ptr %pc
36b7ebba3dSPhilip Reames  ret void
37b7ebba3dSPhilip Reames}
38b7ebba3dSPhilip Reames
39b7ebba3dSPhilip Reamesdefine void @vadd_vint8m4(ptr %pc, ptr %pa, ptr %pb) nounwind {
40b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint8m4:
41b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
42b7ebba3dSPhilip Reames; CHECK-NEXT:    vl4r.v v8, (a1)
43b7ebba3dSPhilip Reames; CHECK-NEXT:    vl4r.v v12, (a2)
44b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
45b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v12
46b7ebba3dSPhilip Reames; CHECK-NEXT:    vs4r.v v8, (a0)
47b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
48b7ebba3dSPhilip Reames  %va = load <vscale x 32 x i8>, ptr %pa
49b7ebba3dSPhilip Reames  %vb = load <vscale x 32 x i8>, ptr %pb
50b7ebba3dSPhilip Reames  %vc = add <vscale x 32 x i8> %va, %vb
51b7ebba3dSPhilip Reames  store <vscale x 32 x i8> %vc, ptr %pc
52b7ebba3dSPhilip Reames  ret void
53b7ebba3dSPhilip Reames}
54b7ebba3dSPhilip Reames
55b7ebba3dSPhilip Reamesdefine void @vadd_vint8m8(ptr %pc, ptr %pa, ptr %pb) nounwind {
56b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint8m8:
57b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
58b7ebba3dSPhilip Reames; CHECK-NEXT:    vl8r.v v8, (a1)
59b7ebba3dSPhilip Reames; CHECK-NEXT:    vl8r.v v16, (a2)
60b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
61b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v16
62b7ebba3dSPhilip Reames; CHECK-NEXT:    vs8r.v v8, (a0)
63b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
64b7ebba3dSPhilip Reames  %va = load <vscale x 64 x i8>, ptr %pa
65b7ebba3dSPhilip Reames  %vb = load <vscale x 64 x i8>, ptr %pb
66b7ebba3dSPhilip Reames  %vc = add <vscale x 64 x i8> %va, %vb
67b7ebba3dSPhilip Reames  store <vscale x 64 x i8> %vc, ptr %pc
68b7ebba3dSPhilip Reames  ret void
69b7ebba3dSPhilip Reames}
70b7ebba3dSPhilip Reames
71b7ebba3dSPhilip Reamesdefine void @vadd_vint8mf2(ptr %pc, ptr %pa, ptr %pb) nounwind {
72b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint8mf2:
73b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
74b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
75b7ebba3dSPhilip Reames; CHECK-NEXT:    vle8.v v8, (a1)
76b7ebba3dSPhilip Reames; CHECK-NEXT:    vle8.v v9, (a2)
77b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
78b7ebba3dSPhilip Reames; CHECK-NEXT:    vse8.v v8, (a0)
79b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
80b7ebba3dSPhilip Reames  %va = load <vscale x 4 x i8>, ptr %pa
81b7ebba3dSPhilip Reames  %vb = load <vscale x 4 x i8>, ptr %pb
82b7ebba3dSPhilip Reames  %vc = add <vscale x 4 x i8> %va, %vb
83b7ebba3dSPhilip Reames  store <vscale x 4 x i8> %vc, ptr %pc
84b7ebba3dSPhilip Reames  ret void
85b7ebba3dSPhilip Reames}
86b7ebba3dSPhilip Reames
87b7ebba3dSPhilip Reamesdefine void @vadd_vint8mf4(ptr %pc, ptr %pa, ptr %pb) nounwind {
88b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint8mf4:
89b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
90b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a3, zero, e8, mf4, ta, ma
91b7ebba3dSPhilip Reames; CHECK-NEXT:    vle8.v v8, (a1)
92b7ebba3dSPhilip Reames; CHECK-NEXT:    vle8.v v9, (a2)
93b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
94b7ebba3dSPhilip Reames; CHECK-NEXT:    vse8.v v8, (a0)
95b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
96b7ebba3dSPhilip Reames  %va = load <vscale x 2 x i8>, ptr %pa
97b7ebba3dSPhilip Reames  %vb = load <vscale x 2 x i8>, ptr %pb
98b7ebba3dSPhilip Reames  %vc = add <vscale x 2 x i8> %va, %vb
99b7ebba3dSPhilip Reames  store <vscale x 2 x i8> %vc, ptr %pc
100b7ebba3dSPhilip Reames  ret void
101b7ebba3dSPhilip Reames}
102b7ebba3dSPhilip Reames
103b7ebba3dSPhilip Reamesdefine void @vadd_vint8mf8(ptr %pc, ptr %pa, ptr %pb) nounwind {
104b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint8mf8:
105b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
106b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a3, zero, e8, mf8, ta, ma
107b7ebba3dSPhilip Reames; CHECK-NEXT:    vle8.v v8, (a1)
108b7ebba3dSPhilip Reames; CHECK-NEXT:    vle8.v v9, (a2)
109b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
110b7ebba3dSPhilip Reames; CHECK-NEXT:    vse8.v v8, (a0)
111b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
112b7ebba3dSPhilip Reames  %va = load <vscale x 1 x i8>, ptr %pa
113b7ebba3dSPhilip Reames  %vb = load <vscale x 1 x i8>, ptr %pb
114b7ebba3dSPhilip Reames  %vc = add <vscale x 1 x i8> %va, %vb
115b7ebba3dSPhilip Reames  store <vscale x 1 x i8> %vc, ptr %pc
116b7ebba3dSPhilip Reames  ret void
117b7ebba3dSPhilip Reames}
118b7ebba3dSPhilip Reames
119b7ebba3dSPhilip Reamesdefine void @vadd_vint16m1(ptr %pc, ptr %pa, ptr %pb) nounwind {
120b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint16m1:
121b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
122b7ebba3dSPhilip Reames; CHECK-NEXT:    vl1re16.v v8, (a1)
123b7ebba3dSPhilip Reames; CHECK-NEXT:    vl1re16.v v9, (a2)
124b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
125b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
126b7ebba3dSPhilip Reames; CHECK-NEXT:    vs1r.v v8, (a0)
127b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
128b7ebba3dSPhilip Reames  %va = load <vscale x 4 x i16>, ptr %pa
129b7ebba3dSPhilip Reames  %vb = load <vscale x 4 x i16>, ptr %pb
130b7ebba3dSPhilip Reames  %vc = add <vscale x 4 x i16> %va, %vb
131b7ebba3dSPhilip Reames  store <vscale x 4 x i16> %vc, ptr %pc
132b7ebba3dSPhilip Reames  ret void
133b7ebba3dSPhilip Reames}
134b7ebba3dSPhilip Reames
135b7ebba3dSPhilip Reamesdefine void @vadd_vint16m2(ptr %pc, ptr %pa, ptr %pb) nounwind {
136b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint16m2:
137b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
138b7ebba3dSPhilip Reames; CHECK-NEXT:    vl2re16.v v8, (a1)
139b7ebba3dSPhilip Reames; CHECK-NEXT:    vl2re16.v v10, (a2)
140b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
141b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v10
142b7ebba3dSPhilip Reames; CHECK-NEXT:    vs2r.v v8, (a0)
143b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
144b7ebba3dSPhilip Reames  %va = load <vscale x 8 x i16>, ptr %pa
145b7ebba3dSPhilip Reames  %vb = load <vscale x 8 x i16>, ptr %pb
146b7ebba3dSPhilip Reames  %vc = add <vscale x 8 x i16> %va, %vb
147b7ebba3dSPhilip Reames  store <vscale x 8 x i16> %vc, ptr %pc
148b7ebba3dSPhilip Reames  ret void
149b7ebba3dSPhilip Reames}
150b7ebba3dSPhilip Reames
151b7ebba3dSPhilip Reamesdefine void @vadd_vint16m4(ptr %pc, ptr %pa, ptr %pb) nounwind {
152b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint16m4:
153b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
154b7ebba3dSPhilip Reames; CHECK-NEXT:    vl4re16.v v8, (a1)
155b7ebba3dSPhilip Reames; CHECK-NEXT:    vl4re16.v v12, (a2)
156b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
157b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v12
158b7ebba3dSPhilip Reames; CHECK-NEXT:    vs4r.v v8, (a0)
159b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
160b7ebba3dSPhilip Reames  %va = load <vscale x 16 x i16>, ptr %pa
161b7ebba3dSPhilip Reames  %vb = load <vscale x 16 x i16>, ptr %pb
162b7ebba3dSPhilip Reames  %vc = add <vscale x 16 x i16> %va, %vb
163b7ebba3dSPhilip Reames  store <vscale x 16 x i16> %vc, ptr %pc
164b7ebba3dSPhilip Reames  ret void
165b7ebba3dSPhilip Reames}
166b7ebba3dSPhilip Reames
167b7ebba3dSPhilip Reamesdefine void @vadd_vint16m8(ptr %pc, ptr %pa, ptr %pb) nounwind {
168b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint16m8:
169b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
170b7ebba3dSPhilip Reames; CHECK-NEXT:    vl8re16.v v8, (a1)
171b7ebba3dSPhilip Reames; CHECK-NEXT:    vl8re16.v v16, (a2)
172b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
173b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v16
174b7ebba3dSPhilip Reames; CHECK-NEXT:    vs8r.v v8, (a0)
175b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
176b7ebba3dSPhilip Reames  %va = load <vscale x 32 x i16>, ptr %pa
177b7ebba3dSPhilip Reames  %vb = load <vscale x 32 x i16>, ptr %pb
178b7ebba3dSPhilip Reames  %vc = add <vscale x 32 x i16> %va, %vb
179b7ebba3dSPhilip Reames  store <vscale x 32 x i16> %vc, ptr %pc
180b7ebba3dSPhilip Reames  ret void
181b7ebba3dSPhilip Reames}
182b7ebba3dSPhilip Reames
183b7ebba3dSPhilip Reamesdefine void @vadd_vint16mf2(ptr %pc, ptr %pa, ptr %pb) nounwind {
184b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint16mf2:
185b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
186b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a3, zero, e16, mf2, ta, ma
187b7ebba3dSPhilip Reames; CHECK-NEXT:    vle16.v v8, (a1)
188b7ebba3dSPhilip Reames; CHECK-NEXT:    vle16.v v9, (a2)
189b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
190b7ebba3dSPhilip Reames; CHECK-NEXT:    vse16.v v8, (a0)
191b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
192b7ebba3dSPhilip Reames  %va = load <vscale x 2 x i16>, ptr %pa
193b7ebba3dSPhilip Reames  %vb = load <vscale x 2 x i16>, ptr %pb
194b7ebba3dSPhilip Reames  %vc = add <vscale x 2 x i16> %va, %vb
195b7ebba3dSPhilip Reames  store <vscale x 2 x i16> %vc, ptr %pc
196b7ebba3dSPhilip Reames  ret void
197b7ebba3dSPhilip Reames}
198b7ebba3dSPhilip Reames
199b7ebba3dSPhilip Reamesdefine void @vadd_vint16mf4(ptr %pc, ptr %pa, ptr %pb) nounwind {
200b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint16mf4:
201b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
202b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a3, zero, e16, mf4, ta, ma
203b7ebba3dSPhilip Reames; CHECK-NEXT:    vle16.v v8, (a1)
204b7ebba3dSPhilip Reames; CHECK-NEXT:    vle16.v v9, (a2)
205b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
206b7ebba3dSPhilip Reames; CHECK-NEXT:    vse16.v v8, (a0)
207b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
208b7ebba3dSPhilip Reames  %va = load <vscale x 1 x i16>, ptr %pa
209b7ebba3dSPhilip Reames  %vb = load <vscale x 1 x i16>, ptr %pb
210b7ebba3dSPhilip Reames  %vc = add <vscale x 1 x i16> %va, %vb
211b7ebba3dSPhilip Reames  store <vscale x 1 x i16> %vc, ptr %pc
212b7ebba3dSPhilip Reames  ret void
213b7ebba3dSPhilip Reames}
214b7ebba3dSPhilip Reames
215b7ebba3dSPhilip Reamesdefine void @vadd_vint32m1(ptr %pc, ptr %pa, ptr %pb) nounwind {
216b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint32m1:
217b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
218b7ebba3dSPhilip Reames; CHECK-NEXT:    vl1re32.v v8, (a1)
219b7ebba3dSPhilip Reames; CHECK-NEXT:    vl1re32.v v9, (a2)
220b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
221b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
222b7ebba3dSPhilip Reames; CHECK-NEXT:    vs1r.v v8, (a0)
223b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
224b7ebba3dSPhilip Reames  %va = load <vscale x 2 x i32>, ptr %pa
225b7ebba3dSPhilip Reames  %vb = load <vscale x 2 x i32>, ptr %pb
226b7ebba3dSPhilip Reames  %vc = add <vscale x 2 x i32> %va, %vb
227b7ebba3dSPhilip Reames  store <vscale x 2 x i32> %vc, ptr %pc
228b7ebba3dSPhilip Reames  ret void
229b7ebba3dSPhilip Reames}
230b7ebba3dSPhilip Reames
231b7ebba3dSPhilip Reamesdefine void @vadd_vint32m2(ptr %pc, ptr %pa, ptr %pb) nounwind {
232b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint32m2:
233b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
234b7ebba3dSPhilip Reames; CHECK-NEXT:    vl2re32.v v8, (a1)
235b7ebba3dSPhilip Reames; CHECK-NEXT:    vl2re32.v v10, (a2)
236b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
237b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v10
238b7ebba3dSPhilip Reames; CHECK-NEXT:    vs2r.v v8, (a0)
239b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
240b7ebba3dSPhilip Reames  %va = load <vscale x 4 x i32>, ptr %pa
241b7ebba3dSPhilip Reames  %vb = load <vscale x 4 x i32>, ptr %pb
242b7ebba3dSPhilip Reames  %vc = add <vscale x 4 x i32> %va, %vb
243b7ebba3dSPhilip Reames  store <vscale x 4 x i32> %vc, ptr %pc
244b7ebba3dSPhilip Reames  ret void
245b7ebba3dSPhilip Reames}
246b7ebba3dSPhilip Reames
247b7ebba3dSPhilip Reamesdefine void @vadd_vint32m4(ptr %pc, ptr %pa, ptr %pb) nounwind {
248b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint32m4:
249b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
250b7ebba3dSPhilip Reames; CHECK-NEXT:    vl4re32.v v8, (a1)
251b7ebba3dSPhilip Reames; CHECK-NEXT:    vl4re32.v v12, (a2)
252b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
253b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v12
254b7ebba3dSPhilip Reames; CHECK-NEXT:    vs4r.v v8, (a0)
255b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
256b7ebba3dSPhilip Reames  %va = load <vscale x 8 x i32>, ptr %pa
257b7ebba3dSPhilip Reames  %vb = load <vscale x 8 x i32>, ptr %pb
258b7ebba3dSPhilip Reames  %vc = add <vscale x 8 x i32> %va, %vb
259b7ebba3dSPhilip Reames  store <vscale x 8 x i32> %vc, ptr %pc
260b7ebba3dSPhilip Reames  ret void
261b7ebba3dSPhilip Reames}
262b7ebba3dSPhilip Reames
263b7ebba3dSPhilip Reamesdefine void @vadd_vint32m8(ptr %pc, ptr %pa, ptr %pb) nounwind {
264b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint32m8:
265b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
266b7ebba3dSPhilip Reames; CHECK-NEXT:    vl8re32.v v8, (a1)
267b7ebba3dSPhilip Reames; CHECK-NEXT:    vl8re32.v v16, (a2)
268b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
269b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v16
270b7ebba3dSPhilip Reames; CHECK-NEXT:    vs8r.v v8, (a0)
271b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
272b7ebba3dSPhilip Reames  %va = load <vscale x 16 x i32>, ptr %pa
273b7ebba3dSPhilip Reames  %vb = load <vscale x 16 x i32>, ptr %pb
274b7ebba3dSPhilip Reames  %vc = add <vscale x 16 x i32> %va, %vb
275b7ebba3dSPhilip Reames  store <vscale x 16 x i32> %vc, ptr %pc
276b7ebba3dSPhilip Reames  ret void
277b7ebba3dSPhilip Reames}
278b7ebba3dSPhilip Reames
279b7ebba3dSPhilip Reamesdefine void @vadd_vint32mf2(ptr %pc, ptr %pa, ptr %pb) nounwind {
280b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint32mf2:
281b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
282b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a3, zero, e32, mf2, ta, ma
283b7ebba3dSPhilip Reames; CHECK-NEXT:    vle32.v v8, (a1)
284b7ebba3dSPhilip Reames; CHECK-NEXT:    vle32.v v9, (a2)
285b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
286b7ebba3dSPhilip Reames; CHECK-NEXT:    vse32.v v8, (a0)
287b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
288b7ebba3dSPhilip Reames  %va = load <vscale x 1 x i32>, ptr %pa
289b7ebba3dSPhilip Reames  %vb = load <vscale x 1 x i32>, ptr %pb
290b7ebba3dSPhilip Reames  %vc = add <vscale x 1 x i32> %va, %vb
291b7ebba3dSPhilip Reames  store <vscale x 1 x i32> %vc, ptr %pc
292b7ebba3dSPhilip Reames  ret void
293b7ebba3dSPhilip Reames}
294b7ebba3dSPhilip Reames
295b7ebba3dSPhilip Reamesdefine void @vadd_vint64m1(ptr %pc, ptr %pa, ptr %pb) nounwind {
296b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint64m1:
297b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
298b7ebba3dSPhilip Reames; CHECK-NEXT:    vl1re64.v v8, (a1)
299b7ebba3dSPhilip Reames; CHECK-NEXT:    vl1re64.v v9, (a2)
300b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
301b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
302b7ebba3dSPhilip Reames; CHECK-NEXT:    vs1r.v v8, (a0)
303b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
304b7ebba3dSPhilip Reames  %va = load <vscale x 1 x i64>, ptr %pa
305b7ebba3dSPhilip Reames  %vb = load <vscale x 1 x i64>, ptr %pb
306b7ebba3dSPhilip Reames  %vc = add <vscale x 1 x i64> %va, %vb
307b7ebba3dSPhilip Reames  store <vscale x 1 x i64> %vc, ptr %pc
308b7ebba3dSPhilip Reames  ret void
309b7ebba3dSPhilip Reames}
310b7ebba3dSPhilip Reames
311b7ebba3dSPhilip Reamesdefine void @vadd_vint64m2(ptr %pc, ptr %pa, ptr %pb) nounwind {
312b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint64m2:
313b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
314b7ebba3dSPhilip Reames; CHECK-NEXT:    vl2re64.v v8, (a1)
315b7ebba3dSPhilip Reames; CHECK-NEXT:    vl2re64.v v10, (a2)
316b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
317b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v10
318b7ebba3dSPhilip Reames; CHECK-NEXT:    vs2r.v v8, (a0)
319b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
320b7ebba3dSPhilip Reames  %va = load <vscale x 2 x i64>, ptr %pa
321b7ebba3dSPhilip Reames  %vb = load <vscale x 2 x i64>, ptr %pb
322b7ebba3dSPhilip Reames  %vc = add <vscale x 2 x i64> %va, %vb
323b7ebba3dSPhilip Reames  store <vscale x 2 x i64> %vc, ptr %pc
324b7ebba3dSPhilip Reames  ret void
325b7ebba3dSPhilip Reames}
326b7ebba3dSPhilip Reames
327b7ebba3dSPhilip Reamesdefine void @vadd_vint64m4(ptr %pc, ptr %pa, ptr %pb) nounwind {
328b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint64m4:
329b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
330b7ebba3dSPhilip Reames; CHECK-NEXT:    vl4re64.v v8, (a1)
331b7ebba3dSPhilip Reames; CHECK-NEXT:    vl4re64.v v12, (a2)
332b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
333b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v12
334b7ebba3dSPhilip Reames; CHECK-NEXT:    vs4r.v v8, (a0)
335b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
336b7ebba3dSPhilip Reames  %va = load <vscale x 4 x i64>, ptr %pa
337b7ebba3dSPhilip Reames  %vb = load <vscale x 4 x i64>, ptr %pb
338b7ebba3dSPhilip Reames  %vc = add <vscale x 4 x i64> %va, %vb
339b7ebba3dSPhilip Reames  store <vscale x 4 x i64> %vc, ptr %pc
340b7ebba3dSPhilip Reames  ret void
341b7ebba3dSPhilip Reames}
342b7ebba3dSPhilip Reames
343b7ebba3dSPhilip Reamesdefine void @vadd_vint64m8(ptr %pc, ptr %pa, ptr %pb) nounwind {
344b7ebba3dSPhilip Reames; CHECK-LABEL: vadd_vint64m8:
345b7ebba3dSPhilip Reames; CHECK:       # %bb.0:
346b7ebba3dSPhilip Reames; CHECK-NEXT:    vl8re64.v v8, (a1)
347b7ebba3dSPhilip Reames; CHECK-NEXT:    vl8re64.v v16, (a2)
348b7ebba3dSPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
349b7ebba3dSPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v16
350b7ebba3dSPhilip Reames; CHECK-NEXT:    vs8r.v v8, (a0)
351b7ebba3dSPhilip Reames; CHECK-NEXT:    ret
352b7ebba3dSPhilip Reames  %va = load <vscale x 8 x i64>, ptr %pa
353b7ebba3dSPhilip Reames  %vb = load <vscale x 8 x i64>, ptr %pb
354b7ebba3dSPhilip Reames  %vc = add <vscale x 8 x i64> %va, %vb
355b7ebba3dSPhilip Reames  store <vscale x 8 x i64> %vc, ptr %pc
356b7ebba3dSPhilip Reames  ret void
357b7ebba3dSPhilip Reames}
3587537c3c4SPhilip Reames
3597537c3c4SPhilip Reames
3607537c3c4SPhilip Reamesdefine void @exact_vlen_vadd_vint8m1(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
3617537c3c4SPhilip Reames; CHECK-LABEL: exact_vlen_vadd_vint8m1:
3627537c3c4SPhilip Reames; CHECK:       # %bb.0:
3637537c3c4SPhilip Reames; CHECK-NEXT:    vl1r.v v8, (a1)
3647537c3c4SPhilip Reames; CHECK-NEXT:    vl1r.v v9, (a2)
365*de423cfeSPhilip Reames; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
3667537c3c4SPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
3677537c3c4SPhilip Reames; CHECK-NEXT:    vs1r.v v8, (a0)
3687537c3c4SPhilip Reames; CHECK-NEXT:    ret
3697537c3c4SPhilip Reames  %va = load <vscale x 8 x i8>, ptr %pa
3707537c3c4SPhilip Reames  %vb = load <vscale x 8 x i8>, ptr %pb
3717537c3c4SPhilip Reames  %vc = add <vscale x 8 x i8> %va, %vb
3727537c3c4SPhilip Reames  store <vscale x 8 x i8> %vc, ptr %pc
3737537c3c4SPhilip Reames  ret void
3747537c3c4SPhilip Reames}
3757537c3c4SPhilip Reames
3767537c3c4SPhilip Reamesdefine void @exact_vlen_vadd_vint8m2(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
3777537c3c4SPhilip Reames; CHECK-LABEL: exact_vlen_vadd_vint8m2:
3787537c3c4SPhilip Reames; CHECK:       # %bb.0:
3797537c3c4SPhilip Reames; CHECK-NEXT:    vl2r.v v8, (a1)
3807537c3c4SPhilip Reames; CHECK-NEXT:    vl2r.v v10, (a2)
3817537c3c4SPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
3827537c3c4SPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v10
3837537c3c4SPhilip Reames; CHECK-NEXT:    vs2r.v v8, (a0)
3847537c3c4SPhilip Reames; CHECK-NEXT:    ret
3857537c3c4SPhilip Reames  %va = load <vscale x 16 x i8>, ptr %pa
3867537c3c4SPhilip Reames  %vb = load <vscale x 16 x i8>, ptr %pb
3877537c3c4SPhilip Reames  %vc = add <vscale x 16 x i8> %va, %vb
3887537c3c4SPhilip Reames  store <vscale x 16 x i8> %vc, ptr %pc
3897537c3c4SPhilip Reames  ret void
3907537c3c4SPhilip Reames}
3917537c3c4SPhilip Reames
3927537c3c4SPhilip Reamesdefine void @exact_vlen_vadd_vint8mf2(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
3937537c3c4SPhilip Reames; CHECK-LABEL: exact_vlen_vadd_vint8mf2:
3947537c3c4SPhilip Reames; CHECK:       # %bb.0:
395*de423cfeSPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
3967537c3c4SPhilip Reames; CHECK-NEXT:    vle8.v v8, (a1)
3977537c3c4SPhilip Reames; CHECK-NEXT:    vle8.v v9, (a2)
3987537c3c4SPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
3997537c3c4SPhilip Reames; CHECK-NEXT:    vse8.v v8, (a0)
4007537c3c4SPhilip Reames; CHECK-NEXT:    ret
4017537c3c4SPhilip Reames  %va = load <vscale x 4 x i8>, ptr %pa
4027537c3c4SPhilip Reames  %vb = load <vscale x 4 x i8>, ptr %pb
4037537c3c4SPhilip Reames  %vc = add <vscale x 4 x i8> %va, %vb
4047537c3c4SPhilip Reames  store <vscale x 4 x i8> %vc, ptr %pc
4057537c3c4SPhilip Reames  ret void
4067537c3c4SPhilip Reames}
4077537c3c4SPhilip Reames
4087537c3c4SPhilip Reamesdefine void @exact_vlen_vadd_vint8mf4(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
4097537c3c4SPhilip Reames; CHECK-LABEL: exact_vlen_vadd_vint8mf4:
4107537c3c4SPhilip Reames; CHECK:       # %bb.0:
411*de423cfeSPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
4127537c3c4SPhilip Reames; CHECK-NEXT:    vle8.v v8, (a1)
4137537c3c4SPhilip Reames; CHECK-NEXT:    vle8.v v9, (a2)
4147537c3c4SPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
4157537c3c4SPhilip Reames; CHECK-NEXT:    vse8.v v8, (a0)
4167537c3c4SPhilip Reames; CHECK-NEXT:    ret
4177537c3c4SPhilip Reames  %va = load <vscale x 2 x i8>, ptr %pa
4187537c3c4SPhilip Reames  %vb = load <vscale x 2 x i8>, ptr %pb
4197537c3c4SPhilip Reames  %vc = add <vscale x 2 x i8> %va, %vb
4207537c3c4SPhilip Reames  store <vscale x 2 x i8> %vc, ptr %pc
4217537c3c4SPhilip Reames  ret void
4227537c3c4SPhilip Reames}
4237537c3c4SPhilip Reames
4247537c3c4SPhilip Reamesdefine void @exact_vlen_vadd_vint8mf8(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
4257537c3c4SPhilip Reames; CHECK-LABEL: exact_vlen_vadd_vint8mf8:
4267537c3c4SPhilip Reames; CHECK:       # %bb.0:
427*de423cfeSPhilip Reames; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
4287537c3c4SPhilip Reames; CHECK-NEXT:    vle8.v v8, (a1)
4297537c3c4SPhilip Reames; CHECK-NEXT:    vle8.v v9, (a2)
4307537c3c4SPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
4317537c3c4SPhilip Reames; CHECK-NEXT:    vse8.v v8, (a0)
4327537c3c4SPhilip Reames; CHECK-NEXT:    ret
4337537c3c4SPhilip Reames  %va = load <vscale x 1 x i8>, ptr %pa
4347537c3c4SPhilip Reames  %vb = load <vscale x 1 x i8>, ptr %pb
4357537c3c4SPhilip Reames  %vc = add <vscale x 1 x i8> %va, %vb
4367537c3c4SPhilip Reames  store <vscale x 1 x i8> %vc, ptr %pc
4377537c3c4SPhilip Reames  ret void
4387537c3c4SPhilip Reames}
4397537c3c4SPhilip Reames
4407537c3c4SPhilip Reamesdefine void @exact_vlen_vadd_vint32m1(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
4417537c3c4SPhilip Reames; CHECK-LABEL: exact_vlen_vadd_vint32m1:
4427537c3c4SPhilip Reames; CHECK:       # %bb.0:
4437537c3c4SPhilip Reames; CHECK-NEXT:    vl1re32.v v8, (a1)
4447537c3c4SPhilip Reames; CHECK-NEXT:    vl1re32.v v9, (a2)
445*de423cfeSPhilip Reames; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
4467537c3c4SPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v9
4477537c3c4SPhilip Reames; CHECK-NEXT:    vs1r.v v8, (a0)
4487537c3c4SPhilip Reames; CHECK-NEXT:    ret
4497537c3c4SPhilip Reames  %va = load <vscale x 2 x i32>, ptr %pa
4507537c3c4SPhilip Reames  %vb = load <vscale x 2 x i32>, ptr %pb
4517537c3c4SPhilip Reames  %vc = add <vscale x 2 x i32> %va, %vb
4527537c3c4SPhilip Reames  store <vscale x 2 x i32> %vc, ptr %pc
4537537c3c4SPhilip Reames  ret void
4547537c3c4SPhilip Reames}
4557537c3c4SPhilip Reames
4567537c3c4SPhilip Reamesdefine void @exact_vlen_vadd_vint32m2(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
4577537c3c4SPhilip Reames; CHECK-LABEL: exact_vlen_vadd_vint32m2:
4587537c3c4SPhilip Reames; CHECK:       # %bb.0:
4597537c3c4SPhilip Reames; CHECK-NEXT:    vl2re32.v v8, (a1)
4607537c3c4SPhilip Reames; CHECK-NEXT:    vl2re32.v v10, (a2)
461*de423cfeSPhilip Reames; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
4627537c3c4SPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v10
4637537c3c4SPhilip Reames; CHECK-NEXT:    vs2r.v v8, (a0)
4647537c3c4SPhilip Reames; CHECK-NEXT:    ret
4657537c3c4SPhilip Reames  %va = load <vscale x 4 x i32>, ptr %pa
4667537c3c4SPhilip Reames  %vb = load <vscale x 4 x i32>, ptr %pb
4677537c3c4SPhilip Reames  %vc = add <vscale x 4 x i32> %va, %vb
4687537c3c4SPhilip Reames  store <vscale x 4 x i32> %vc, ptr %pc
4697537c3c4SPhilip Reames  ret void
4707537c3c4SPhilip Reames}
4717537c3c4SPhilip Reames
4727537c3c4SPhilip Reamesdefine void @exact_vlen_vadd_vint32m4(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
4737537c3c4SPhilip Reames; CHECK-LABEL: exact_vlen_vadd_vint32m4:
4747537c3c4SPhilip Reames; CHECK:       # %bb.0:
4757537c3c4SPhilip Reames; CHECK-NEXT:    vl4re32.v v8, (a1)
4767537c3c4SPhilip Reames; CHECK-NEXT:    vl4re32.v v12, (a2)
477*de423cfeSPhilip Reames; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
4787537c3c4SPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v12
4797537c3c4SPhilip Reames; CHECK-NEXT:    vs4r.v v8, (a0)
4807537c3c4SPhilip Reames; CHECK-NEXT:    ret
4817537c3c4SPhilip Reames  %va = load <vscale x 8 x i32>, ptr %pa
4827537c3c4SPhilip Reames  %vb = load <vscale x 8 x i32>, ptr %pb
4837537c3c4SPhilip Reames  %vc = add <vscale x 8 x i32> %va, %vb
4847537c3c4SPhilip Reames  store <vscale x 8 x i32> %vc, ptr %pc
4857537c3c4SPhilip Reames  ret void
4867537c3c4SPhilip Reames}
4877537c3c4SPhilip Reames
4887537c3c4SPhilip Reamesdefine void @exact_vlen_vadd_vint32m8(ptr %pc, ptr %pa, ptr %pb) nounwind vscale_range(2,2) {
4897537c3c4SPhilip Reames; CHECK-LABEL: exact_vlen_vadd_vint32m8:
4907537c3c4SPhilip Reames; CHECK:       # %bb.0:
4917537c3c4SPhilip Reames; CHECK-NEXT:    vl8re32.v v8, (a1)
4927537c3c4SPhilip Reames; CHECK-NEXT:    vl8re32.v v16, (a2)
4937537c3c4SPhilip Reames; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
4947537c3c4SPhilip Reames; CHECK-NEXT:    vadd.vv v8, v8, v16
4957537c3c4SPhilip Reames; CHECK-NEXT:    vs8r.v v8, (a0)
4967537c3c4SPhilip Reames; CHECK-NEXT:    ret
4977537c3c4SPhilip Reames  %va = load <vscale x 16 x i32>, ptr %pa
4987537c3c4SPhilip Reames  %vb = load <vscale x 16 x i32>, ptr %pb
4997537c3c4SPhilip Reames  %vc = add <vscale x 16 x i32> %va, %vb
5007537c3c4SPhilip Reames  store <vscale x 16 x i32> %vc, ptr %pc
5017537c3c4SPhilip Reames  ret void
5027537c3c4SPhilip Reames}
503