xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2; RUN: llc -mtriple=riscv32 -mattr='+v' -O3 %s -o - | FileCheck %s
3
4declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
5  <vscale x 1 x i8>,
6  <vscale x 1 x i8>,
7  <vscale x 1 x i8>,
8  i32)
9
10declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
11  <vscale x 1 x i8>,
12  <vscale x 1 x i8>,
13  <vscale x 1 x i8>,
14  <vscale x 1 x i1>,
15  i32, i32)
16
17declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
18  <vscale x 1 x i8>,
19  <vscale x 1 x i8>,
20  <vscale x 1 x i8>,
21  i32)
22
23declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
24  <vscale x 1 x i8>,
25  <vscale x 1 x i8>,
26  <vscale x 1 x i8>,
27  i32)
28
29define <vscale x 1 x i8> @simple_vadd_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
30; CHECK-LABEL: simple_vadd_vv:
31; CHECK:       # %bb.0: # %entry
32; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
33; CHECK-NEXT:    vadd.vv v9, v8, v9
34; CHECK-NEXT:    vadd.vv v8, v8, v8
35; CHECK-NEXT:    vadd.vv v8, v8, v9
36; CHECK-NEXT:    ret
37entry:
38  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
39    <vscale x 1 x i8> undef,
40    <vscale x 1 x i8> %0,
41    <vscale x 1 x i8> %1,
42    i32 %2)
43
44  %b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
45    <vscale x 1 x i8> undef,
46    <vscale x 1 x i8> %0,
47    <vscale x 1 x i8> %a,
48    i32 %2)
49
50  %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
51    <vscale x 1 x i8> undef,
52    <vscale x 1 x i8> %0,
53    <vscale x 1 x i8> %b,
54    i32 %2)
55
56  ret <vscale x 1 x i8> %c
57}
58
59define <vscale x 1 x i8> @simple_vadd_vsub_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
60; CHECK-LABEL: simple_vadd_vsub_vv:
61; CHECK:       # %bb.0: # %entry
62; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
63; CHECK-NEXT:    vsub.vv v9, v8, v9
64; CHECK-NEXT:    vadd.vv v8, v8, v8
65; CHECK-NEXT:    vadd.vv v8, v8, v9
66; CHECK-NEXT:    ret
67entry:
68  %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
69    <vscale x 1 x i8> undef,
70    <vscale x 1 x i8> %0,
71    <vscale x 1 x i8> %1,
72    i32 %2)
73
74  %b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
75    <vscale x 1 x i8> undef,
76    <vscale x 1 x i8> %0,
77    <vscale x 1 x i8> %a,
78    i32 %2)
79
80  %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
81    <vscale x 1 x i8> undef,
82    <vscale x 1 x i8> %0,
83    <vscale x 1 x i8> %b,
84    i32 %2)
85
86  ret <vscale x 1 x i8> %c
87}
88
89define <vscale x 1 x i8> @simple_vmul_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
90; CHECK-LABEL: simple_vmul_vv:
91; CHECK:       # %bb.0: # %entry
92; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
93; CHECK-NEXT:    vmul.vv v9, v8, v9
94; CHECK-NEXT:    vmul.vv v8, v8, v8
95; CHECK-NEXT:    vmul.vv v8, v8, v9
96; CHECK-NEXT:    ret
97entry:
98  %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
99    <vscale x 1 x i8> undef,
100    <vscale x 1 x i8> %0,
101    <vscale x 1 x i8> %1,
102    i32 %2)
103
104  %b = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
105    <vscale x 1 x i8> undef,
106    <vscale x 1 x i8> %0,
107    <vscale x 1 x i8> %a,
108    i32 %2)
109
110  %c = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
111    <vscale x 1 x i8> undef,
112    <vscale x 1 x i8> %0,
113    <vscale x 1 x i8> %b,
114    i32 %2)
115
116  ret <vscale x 1 x i8> %c
117}
118
119; With passthru and masks.
120define <vscale x 1 x i8> @vadd_vv_passthru(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
121; CHECK-LABEL: vadd_vv_passthru:
122; CHECK:       # %bb.0: # %entry
123; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
124; CHECK-NEXT:    vmv1r.v v10, v8
125; CHECK-NEXT:    vadd.vv v10, v8, v9
126; CHECK-NEXT:    vmv1r.v v9, v8
127; CHECK-NEXT:    vadd.vv v9, v8, v8
128; CHECK-NEXT:    vadd.vv v8, v9, v10
129; CHECK-NEXT:    ret
130entry:
131  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
132    <vscale x 1 x i8> %0,
133    <vscale x 1 x i8> %0,
134    <vscale x 1 x i8> %1,
135    i32 %2)
136
137  %b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
138    <vscale x 1 x i8> %0,
139    <vscale x 1 x i8> %0,
140    <vscale x 1 x i8> %a,
141    i32 %2)
142
143  %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
144    <vscale x 1 x i8> %0,
145    <vscale x 1 x i8> %0,
146    <vscale x 1 x i8> %b,
147    i32 %2)
148
149  ret <vscale x 1 x i8> %c
150}
151
152define <vscale x 1 x i8> @vadd_vv_passthru_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
153; CHECK-LABEL: vadd_vv_passthru_negative:
154; CHECK:       # %bb.0: # %entry
155; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
156; CHECK-NEXT:    vmv1r.v v10, v8
157; CHECK-NEXT:    vadd.vv v10, v8, v9
158; CHECK-NEXT:    vadd.vv v9, v8, v10
159; CHECK-NEXT:    vadd.vv v8, v8, v9
160; CHECK-NEXT:    ret
161entry:
162  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
163    <vscale x 1 x i8> %0,
164    <vscale x 1 x i8> %0,
165    <vscale x 1 x i8> %1,
166    i32 %2)
167
168  %b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
169    <vscale x 1 x i8> %1,
170    <vscale x 1 x i8> %0,
171    <vscale x 1 x i8> %a,
172    i32 %2)
173
174  %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
175    <vscale x 1 x i8> %0,
176    <vscale x 1 x i8> %0,
177    <vscale x 1 x i8> %b,
178    i32 %2)
179
180  ret <vscale x 1 x i8> %c
181}
182
183define <vscale x 1 x i8> @vadd_vv_mask(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m) nounwind {
184; CHECK-LABEL: vadd_vv_mask:
185; CHECK:       # %bb.0: # %entry
186; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
187; CHECK-NEXT:    vmv1r.v v10, v8
188; CHECK-NEXT:    vadd.vv v10, v8, v9, v0.t
189; CHECK-NEXT:    vmv1r.v v9, v8
190; CHECK-NEXT:    vadd.vv v9, v8, v8, v0.t
191; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
192; CHECK-NEXT:    ret
193entry:
194  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
195    <vscale x 1 x i8> %0,
196    <vscale x 1 x i8> %0,
197    <vscale x 1 x i8> %1,
198    <vscale x 1 x i1> %m,
199    i32 %2, i32 1)
200
201  %b = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
202    <vscale x 1 x i8> %0,
203    <vscale x 1 x i8> %0,
204    <vscale x 1 x i8> %a,
205    <vscale x 1 x i1> %m,
206    i32 %2, i32 1)
207
208  %c = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
209    <vscale x 1 x i8> %0,
210    <vscale x 1 x i8> %0,
211    <vscale x 1 x i8> %b,
212    <vscale x 1 x i1> %m,
213    i32 %2, i32 1)
214
215  ret <vscale x 1 x i8> %c
216}
217
218define <vscale x 1 x i8> @vadd_vv_mask_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m, <vscale x 1 x i1> %m2) nounwind {
219; CHECK-LABEL: vadd_vv_mask_negative:
220; CHECK:       # %bb.0: # %entry
221; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
222; CHECK-NEXT:    vmv1r.v v11, v8
223; CHECK-NEXT:    vadd.vv v11, v8, v9, v0.t
224; CHECK-NEXT:    vmv1r.v v9, v8
225; CHECK-NEXT:    vadd.vv v9, v8, v11, v0.t
226; CHECK-NEXT:    vmv1r.v v0, v10
227; CHECK-NEXT:    vadd.vv v8, v8, v9, v0.t
228; CHECK-NEXT:    ret
229entry:
230  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
231    <vscale x 1 x i8> %0,
232    <vscale x 1 x i8> %0,
233    <vscale x 1 x i8> %1,
234    <vscale x 1 x i1> %m,
235    i32 %2, i32 1)
236
237  %b = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
238    <vscale x 1 x i8> %0,
239    <vscale x 1 x i8> %0,
240    <vscale x 1 x i8> %a,
241    <vscale x 1 x i1> %m,
242    i32 %2, i32 1)
243
244  %c = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
245    <vscale x 1 x i8> %0,
246    <vscale x 1 x i8> %0,
247    <vscale x 1 x i8> %b,
248    <vscale x 1 x i1> %m2,
249    i32 %2, i32 1)
250
251  ret <vscale x 1 x i8> %c
252}
253
254