xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/commutable.ll (revision 26766a00ff946c281b7dd517b2ba8d594012c21e)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7; vadd.vv
8declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
9define <vscale x 1 x i64> @commutable_vadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
10; CHECK-LABEL: commutable_vadd_vv:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
13; CHECK-NEXT:    vadd.vv v8, v8, v9
14; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
15; CHECK-NEXT:    vadd.vv v8, v8, v8
16; CHECK-NEXT:    ret
17entry:
18  %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
19  %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
20  %ret = add <vscale x 1 x i64> %a, %b
21  ret <vscale x 1 x i64> %ret
22}
23
24declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
25define <vscale x 1 x i64> @commutable_vadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
26; CHECK-LABEL: commutable_vadd_vv_masked:
27; CHECK:       # %bb.0:
28; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
29; CHECK-NEXT:    vadd.vv v10, v8, v9, v0.t
30; CHECK-NEXT:    vadd.vv v8, v8, v9, v0.t
31; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
32; CHECK-NEXT:    vadd.vv v8, v10, v8
33; CHECK-NEXT:    ret
34  %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
35  %b = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
36  %ret = add <vscale x 1 x i64> %a, %b
37  ret <vscale x 1 x i64> %ret
38}
39
40; vand.vv
41declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
42define <vscale x 1 x i64> @commutable_vand_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
43; CHECK-LABEL: commutable_vand_vv:
44; CHECK:       # %bb.0: # %entry
45; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
46; CHECK-NEXT:    vand.vv v8, v8, v9
47; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
48; CHECK-NEXT:    vadd.vv v8, v8, v8
49; CHECK-NEXT:    ret
50entry:
51  %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
52  %b = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
53  %ret = add <vscale x 1 x i64> %a, %b
54  ret <vscale x 1 x i64> %ret
55}
56
57declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
58define <vscale x 1 x i64> @commutable_vand_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
59; CHECK-LABEL: commutable_vand_vv_masked:
60; CHECK:       # %bb.0:
61; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
62; CHECK-NEXT:    vand.vv v10, v8, v9, v0.t
63; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
64; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
65; CHECK-NEXT:    vadd.vv v8, v10, v8
66; CHECK-NEXT:    ret
67  %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
68  %b = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
69  %ret = add <vscale x 1 x i64> %a, %b
70  ret <vscale x 1 x i64> %ret
71}
72
73; vor.vv
74declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
75define <vscale x 1 x i64> @commutable_vor_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
76; CHECK-LABEL: commutable_vor_vv:
77; CHECK:       # %bb.0: # %entry
78; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
79; CHECK-NEXT:    vor.vv v8, v8, v9
80; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
81; CHECK-NEXT:    vadd.vv v8, v8, v8
82; CHECK-NEXT:    ret
83entry:
84  %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
85  %b = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
86  %ret = add <vscale x 1 x i64> %a, %b
87  ret <vscale x 1 x i64> %ret
88}
89
90declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
91define <vscale x 1 x i64> @commutable_vor_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
92; CHECK-LABEL: commutable_vor_vv_masked:
93; CHECK:       # %bb.0:
94; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
95; CHECK-NEXT:    vor.vv v10, v8, v9, v0.t
96; CHECK-NEXT:    vor.vv v8, v8, v9, v0.t
97; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
98; CHECK-NEXT:    vadd.vv v8, v10, v8
99; CHECK-NEXT:    ret
100  %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
101  %b = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
102  %ret = add <vscale x 1 x i64> %a, %b
103  ret <vscale x 1 x i64> %ret
104}
105
106; vxor.vv
107declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
108define <vscale x 1 x i64> @commutable_vxor_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
109; CHECK-LABEL: commutable_vxor_vv:
110; CHECK:       # %bb.0: # %entry
111; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
112; CHECK-NEXT:    vxor.vv v8, v8, v9
113; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
114; CHECK-NEXT:    vadd.vv v8, v8, v8
115; CHECK-NEXT:    ret
116entry:
117  %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
118  %b = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
119  %ret = add <vscale x 1 x i64> %a, %b
120  ret <vscale x 1 x i64> %ret
121}
122
123declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
124define <vscale x 1 x i64> @commutable_vxor_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
125; CHECK-LABEL: commutable_vxor_vv_masked:
126; CHECK:       # %bb.0:
127; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
128; CHECK-NEXT:    vxor.vv v10, v8, v9, v0.t
129; CHECK-NEXT:    vxor.vv v8, v8, v9, v0.t
130; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
131; CHECK-NEXT:    vadd.vv v8, v10, v8
132; CHECK-NEXT:    ret
133  %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
134  %b = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
135  %ret = add <vscale x 1 x i64> %a, %b
136  ret <vscale x 1 x i64> %ret
137}
138
139; vmseq.vv
140declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
141define <vscale x 1 x i1> @commutable_vmseq_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
142; CHECK-LABEL: commutable_vmseq_vv:
143; CHECK:       # %bb.0: # %entry
144; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
145; CHECK-NEXT:    vmseq.vv v8, v8, v9
146; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
147; CHECK-NEXT:    vmxor.mm v0, v8, v8
148; CHECK-NEXT:    ret
149entry:
150  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
151  %b = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(<vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
152  %ret = add <vscale x 1 x i1> %a, %b
153  ret <vscale x 1 x i1> %ret
154}
155
156declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(<vscale x 1 x i1>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen);
157define <vscale x 1 x i1> @commutable_vmseq_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
158; CHECK-LABEL: commutable_vmseq_vv_masked:
159; CHECK:       # %bb.0:
160; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
161; CHECK-NEXT:    vmseq.vv v10, v8, v9, v0.t
162; CHECK-NEXT:    vmseq.vv v8, v8, v9, v0.t
163; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
164; CHECK-NEXT:    vmxor.mm v0, v10, v8
165; CHECK-NEXT:    ret
166  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2)
167  %b = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2)
168  %ret = add <vscale x 1 x i1> %a, %b
169  ret <vscale x 1 x i1> %ret
170}
171
172; vmsne.vv
173declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
174define <vscale x 1 x i1> @commutable_vmsne_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
175; CHECK-LABEL: commutable_vmsne_vv:
176; CHECK:       # %bb.0: # %entry
177; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
178; CHECK-NEXT:    vmsne.vv v8, v8, v9
179; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
180; CHECK-NEXT:    vmxor.mm v0, v8, v8
181; CHECK-NEXT:    ret
182entry:
183  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
184  %b = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(<vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
185  %ret = add <vscale x 1 x i1> %a, %b
186  ret <vscale x 1 x i1> %ret
187}
188
189declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(<vscale x 1 x i1>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen);
190define <vscale x 1 x i1> @commutable_vmsne_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
191; CHECK-LABEL: commutable_vmsne_vv_masked:
192; CHECK:       # %bb.0:
193; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
194; CHECK-NEXT:    vmsne.vv v10, v8, v9, v0.t
195; CHECK-NEXT:    vmsne.vv v8, v8, v9, v0.t
196; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
197; CHECK-NEXT:    vmxor.mm v0, v10, v8
198; CHECK-NEXT:    ret
199  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2)
200  %b = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2)
201  %ret = add <vscale x 1 x i1> %a, %b
202  ret <vscale x 1 x i1> %ret
203}
204
205; vmin.vv
206declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
207define <vscale x 1 x i64> @commutable_vmin_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
208; CHECK-LABEL: commutable_vmin_vv:
209; CHECK:       # %bb.0: # %entry
210; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
211; CHECK-NEXT:    vmin.vv v8, v8, v9
212; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
213; CHECK-NEXT:    vadd.vv v8, v8, v8
214; CHECK-NEXT:    ret
215entry:
216  %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
217  %b = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
218  %ret = add <vscale x 1 x i64> %a, %b
219  ret <vscale x 1 x i64> %ret
220}
221
222declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
223define <vscale x 1 x i64> @commutable_vmin_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
224; CHECK-LABEL: commutable_vmin_vv_masked:
225; CHECK:       # %bb.0:
226; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
227; CHECK-NEXT:    vmin.vv v10, v8, v9, v0.t
228; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
229; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
230; CHECK-NEXT:    vadd.vv v8, v10, v8
231; CHECK-NEXT:    ret
232  %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
233  %b = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
234  %ret = add <vscale x 1 x i64> %a, %b
235  ret <vscale x 1 x i64> %ret
236}
237
238; vminu.vv
239declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
240define <vscale x 1 x i64> @commutable_vminu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
241; CHECK-LABEL: commutable_vminu_vv:
242; CHECK:       # %bb.0: # %entry
243; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
244; CHECK-NEXT:    vminu.vv v8, v8, v9
245; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
246; CHECK-NEXT:    vadd.vv v8, v8, v8
247; CHECK-NEXT:    ret
248entry:
249  %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
250  %b = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
251  %ret = add <vscale x 1 x i64> %a, %b
252  ret <vscale x 1 x i64> %ret
253}
254
255declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
256define <vscale x 1 x i64> @commutable_vminu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
257; CHECK-LABEL: commutable_vminu_vv_masked:
258; CHECK:       # %bb.0:
259; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
260; CHECK-NEXT:    vminu.vv v10, v8, v9, v0.t
261; CHECK-NEXT:    vminu.vv v8, v8, v9, v0.t
262; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
263; CHECK-NEXT:    vadd.vv v8, v10, v8
264; CHECK-NEXT:    ret
265  %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
266  %b = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
267  %ret = add <vscale x 1 x i64> %a, %b
268  ret <vscale x 1 x i64> %ret
269}
270
271; vmax.vv
272declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
273define <vscale x 1 x i64> @commutable_vmax_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
274; CHECK-LABEL: commutable_vmax_vv:
275; CHECK:       # %bb.0: # %entry
276; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
277; CHECK-NEXT:    vmax.vv v8, v8, v9
278; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
279; CHECK-NEXT:    vadd.vv v8, v8, v8
280; CHECK-NEXT:    ret
281entry:
282  %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
283  %b = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
284  %ret = add <vscale x 1 x i64> %a, %b
285  ret <vscale x 1 x i64> %ret
286}
287
288declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
289define <vscale x 1 x i64> @commutable_vmax_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
290; CHECK-LABEL: commutable_vmax_vv_masked:
291; CHECK:       # %bb.0:
292; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
293; CHECK-NEXT:    vmax.vv v10, v8, v9, v0.t
294; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
295; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
296; CHECK-NEXT:    vadd.vv v8, v10, v8
297; CHECK-NEXT:    ret
298  %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
299  %b = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
300  %ret = add <vscale x 1 x i64> %a, %b
301  ret <vscale x 1 x i64> %ret
302}
303
304; vmaxu.vv
305declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
306define <vscale x 1 x i64> @commutable_vmaxu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
307; CHECK-LABEL: commutable_vmaxu_vv:
308; CHECK:       # %bb.0: # %entry
309; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
310; CHECK-NEXT:    vmaxu.vv v8, v8, v9
311; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
312; CHECK-NEXT:    vadd.vv v8, v8, v8
313; CHECK-NEXT:    ret
314entry:
315  %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
316  %b = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
317  %ret = add <vscale x 1 x i64> %a, %b
318  ret <vscale x 1 x i64> %ret
319}
320
321declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
322define <vscale x 1 x i64> @commutable_vmaxu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
323; CHECK-LABEL: commutable_vmaxu_vv_masked:
324; CHECK:       # %bb.0:
325; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
326; CHECK-NEXT:    vmaxu.vv v10, v8, v9, v0.t
327; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
328; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
329; CHECK-NEXT:    vadd.vv v8, v10, v8
330; CHECK-NEXT:    ret
331  %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
332  %b = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
333  %ret = add <vscale x 1 x i64> %a, %b
334  ret <vscale x 1 x i64> %ret
335}
336
337; vmul.vv
338declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
339define <vscale x 1 x i64> @commutable_vmul_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
340; CHECK-LABEL: commutable_vmul_vv:
341; CHECK:       # %bb.0: # %entry
342; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
343; CHECK-NEXT:    vmul.vv v8, v8, v9
344; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
345; CHECK-NEXT:    vadd.vv v8, v8, v8
346; CHECK-NEXT:    ret
347entry:
348  %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
349  %b = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
350  %ret = add <vscale x 1 x i64> %a, %b
351  ret <vscale x 1 x i64> %ret
352}
353
354declare <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
355define <vscale x 1 x i64> @commutable_vmul_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
356; CHECK-LABEL: commutable_vmul_vv_masked:
357; CHECK:       # %bb.0:
358; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
359; CHECK-NEXT:    vmul.vv v10, v8, v9, v0.t
360; CHECK-NEXT:    vmul.vv v8, v8, v9, v0.t
361; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
362; CHECK-NEXT:    vadd.vv v8, v10, v8
363; CHECK-NEXT:    ret
364  %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
365  %b = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
366  %ret = add <vscale x 1 x i64> %a, %b
367  ret <vscale x 1 x i64> %ret
368}
369
370; vmulh.vv
371declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
372define <vscale x 1 x i64> @commutable_vmulh_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
373; CHECK-LABEL: commutable_vmulh_vv:
374; CHECK:       # %bb.0: # %entry
375; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
376; CHECK-NEXT:    vmulh.vv v8, v8, v9
377; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
378; CHECK-NEXT:    vadd.vv v8, v8, v8
379; CHECK-NEXT:    ret
380entry:
381  %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
382  %b = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
383  %ret = add <vscale x 1 x i64> %a, %b
384  ret <vscale x 1 x i64> %ret
385}
386
387declare <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
388define <vscale x 1 x i64> @commutable_vmulh_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
389; CHECK-LABEL: commutable_vmulh_vv_masked:
390; CHECK:       # %bb.0:
391; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
392; CHECK-NEXT:    vmulh.vv v10, v8, v9, v0.t
393; CHECK-NEXT:    vmulh.vv v8, v8, v9, v0.t
394; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
395; CHECK-NEXT:    vadd.vv v8, v10, v8
396; CHECK-NEXT:    ret
397  %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
398  %b = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
399  %ret = add <vscale x 1 x i64> %a, %b
400  ret <vscale x 1 x i64> %ret
401}
402
403; vmulhu.vv
404declare <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
405define <vscale x 1 x i64> @commutable_vmulhu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
406; CHECK-LABEL: commutable_vmulhu_vv:
407; CHECK:       # %bb.0: # %entry
408; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
409; CHECK-NEXT:    vmulhu.vv v8, v8, v9
410; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
411; CHECK-NEXT:    vadd.vv v8, v8, v8
412; CHECK-NEXT:    ret
413entry:
414  %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
415  %b = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
416  %ret = add <vscale x 1 x i64> %a, %b
417  ret <vscale x 1 x i64> %ret
418}
419
420declare <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
421define <vscale x 1 x i64> @commutable_vmulhu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
422; CHECK-LABEL: commutable_vmulhu_vv_masked:
423; CHECK:       # %bb.0:
424; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
425; CHECK-NEXT:    vmulhu.vv v10, v8, v9, v0.t
426; CHECK-NEXT:    vmulhu.vv v8, v8, v9, v0.t
427; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
428; CHECK-NEXT:    vadd.vv v8, v10, v8
429; CHECK-NEXT:    ret
430  %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
431  %b = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
432  %ret = add <vscale x 1 x i64> %a, %b
433  ret <vscale x 1 x i64> %ret
434}
435
436; vwadd.vv
437declare <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen);
438define <vscale x 1 x i64> @commutable_vwadd_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
439; CHECK-LABEL: commutable_vwadd_vv:
440; CHECK:       # %bb.0: # %entry
441; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
442; CHECK-NEXT:    vwadd.vv v10, v8, v9
443; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
444; CHECK-NEXT:    vadd.vv v8, v10, v10
445; CHECK-NEXT:    ret
446entry:
447  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2)
448  %b = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2)
449  %ret = add <vscale x 1 x i64> %a, %b
450  ret <vscale x 1 x i64> %ret
451}
452
453declare <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
454define <vscale x 1 x i64> @commutable_vwadd_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
455; CHECK-LABEL: commutable_vwadd_vv_masked:
456; CHECK:       # %bb.0:
457; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
458; CHECK-NEXT:    vwadd.vv v10, v8, v9, v0.t
459; CHECK-NEXT:    vwadd.vv v11, v8, v9, v0.t
460; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
461; CHECK-NEXT:    vadd.vv v8, v10, v11
462; CHECK-NEXT:    ret
463  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
464  %b = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
465  %ret = add <vscale x 1 x i64> %a, %b
466  ret <vscale x 1 x i64> %ret
467}
468
469; vwaddu.vv
470declare <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen);
471define <vscale x 1 x i64> @commutable_vwaddu_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
472; CHECK-LABEL: commutable_vwaddu_vv:
473; CHECK:       # %bb.0: # %entry
474; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
475; CHECK-NEXT:    vwaddu.vv v10, v8, v9
476; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
477; CHECK-NEXT:    vadd.vv v8, v10, v10
478; CHECK-NEXT:    ret
479entry:
480  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2)
481  %b = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2)
482  %ret = add <vscale x 1 x i64> %a, %b
483  ret <vscale x 1 x i64> %ret
484}
485
486declare <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
487define <vscale x 1 x i64> @commutable_vwaddu_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
488; CHECK-LABEL: commutable_vwaddu_vv_masked:
489; CHECK:       # %bb.0:
490; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
491; CHECK-NEXT:    vwaddu.vv v10, v8, v9, v0.t
492; CHECK-NEXT:    vwaddu.vv v11, v8, v9, v0.t
493; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
494; CHECK-NEXT:    vadd.vv v8, v10, v11
495; CHECK-NEXT:    ret
496  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
497  %b = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
498  %ret = add <vscale x 1 x i64> %a, %b
499  ret <vscale x 1 x i64> %ret
500}
501
502; vwmul.vv
503declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen);
504define <vscale x 1 x i64> @commutable_vwmul_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
505; CHECK-LABEL: commutable_vwmul_vv:
506; CHECK:       # %bb.0: # %entry
507; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
508; CHECK-NEXT:    vwmul.vv v10, v8, v9
509; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
510; CHECK-NEXT:    vadd.vv v8, v10, v10
511; CHECK-NEXT:    ret
512entry:
513  %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2)
514  %b = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2)
515  %ret = add <vscale x 1 x i64> %a, %b
516  ret <vscale x 1 x i64> %ret
517}
518
519declare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
520define <vscale x 1 x i64> @commutable_vwmul_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
521; CHECK-LABEL: commutable_vwmul_vv_masked:
522; CHECK:       # %bb.0:
523; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
524; CHECK-NEXT:    vwmul.vv v10, v8, v9, v0.t
525; CHECK-NEXT:    vwmul.vv v11, v8, v9, v0.t
526; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
527; CHECK-NEXT:    vadd.vv v8, v10, v11
528; CHECK-NEXT:    ret
529  %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
530  %b = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
531  %ret = add <vscale x 1 x i64> %a, %b
532  ret <vscale x 1 x i64> %ret
533}
534
535; vwmulu.vv
536declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen);
537define <vscale x 1 x i64> @commutable_vwmulu_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
538; CHECK-LABEL: commutable_vwmulu_vv:
539; CHECK:       # %bb.0: # %entry
540; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
541; CHECK-NEXT:    vwmulu.vv v10, v8, v9
542; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
543; CHECK-NEXT:    vadd.vv v8, v10, v10
544; CHECK-NEXT:    ret
545entry:
546  %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2)
547  %b = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2)
548  %ret = add <vscale x 1 x i64> %a, %b
549  ret <vscale x 1 x i64> %ret
550}
551
552declare <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
553define <vscale x 1 x i64> @commutable_vwmulu_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
554; CHECK-LABEL: commutable_vwmulu_vv_masked:
555; CHECK:       # %bb.0:
556; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
557; CHECK-NEXT:    vwmulu.vv v10, v8, v9, v0.t
558; CHECK-NEXT:    vwmulu.vv v11, v8, v9, v0.t
559; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
560; CHECK-NEXT:    vadd.vv v8, v10, v11
561; CHECK-NEXT:    ret
562  %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
563  %b = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
564  %ret = add <vscale x 1 x i64> %a, %b
565  ret <vscale x 1 x i64> %ret
566}
567
568; vwmacc.vv
569declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen);
570define <vscale x 1 x i64> @commutable_vwmacc_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
571; CHECK-LABEL: commutable_vwmacc_vv:
572; CHECK:       # %bb.0: # %entry
573; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
574; CHECK-NEXT:    vwmacc.vv v10, v8, v9
575; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
576; CHECK-NEXT:    vadd.vv v8, v10, v10
577; CHECK-NEXT:    ret
578entry:
579  %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, iXLen 1)
580  %b = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2, iXLen 1)
581  %ret = add <vscale x 1 x i64> %a, %b
582  ret <vscale x 1 x i64> %ret
583}
584
585declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
586define <vscale x 1 x i64> @commutable_vwmacc_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
587; CHECK-LABEL: commutable_vwmacc_vv_masked:
588; CHECK:       # %bb.0:
589; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
590; CHECK-NEXT:    vwmacc.vv v10, v8, v9, v0.t
591; CHECK-NEXT:    vwmacc.vv v11, v9, v8, v0.t
592; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
593; CHECK-NEXT:    vadd.vv v8, v10, v11
594; CHECK-NEXT:    ret
595  %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
596  %b = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
597  %ret = add <vscale x 1 x i64> %a, %b
598  ret <vscale x 1 x i64> %ret
599}
600
601; vwmaccu.vv
602declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen);
603define <vscale x 1 x i64> @commutable_vwmaccu_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
604; CHECK-LABEL: commutable_vwmaccu_vv:
605; CHECK:       # %bb.0: # %entry
606; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
607; CHECK-NEXT:    vwmaccu.vv v10, v8, v9
608; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
609; CHECK-NEXT:    vadd.vv v8, v10, v10
610; CHECK-NEXT:    ret
611entry:
612  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, iXLen 1)
613  %b = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2, iXLen 1)
614  %ret = add <vscale x 1 x i64> %a, %b
615  ret <vscale x 1 x i64> %ret
616}
617
618declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
619define <vscale x 1 x i64> @commutable_vwmaccu_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
620; CHECK-LABEL: commutable_vwmaccu_vv_masked:
621; CHECK:       # %bb.0:
622; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
623; CHECK-NEXT:    vwmaccu.vv v10, v8, v9, v0.t
624; CHECK-NEXT:    vwmaccu.vv v11, v9, v8, v0.t
625; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
626; CHECK-NEXT:    vadd.vv v8, v10, v11
627; CHECK-NEXT:    ret
628  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
629  %b = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
630  %ret = add <vscale x 1 x i64> %a, %b
631  ret <vscale x 1 x i64> %ret
632}
633
634; vadc.vvm
635declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen);
636define <vscale x 1 x i64> @commutable_vadc_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) nounwind {
637; CHECK-LABEL: commutable_vadc_vv:
638; CHECK:       # %bb.0: # %entry
639; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
640; CHECK-NEXT:    vadc.vvm v10, v8, v9, v0
641; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
642; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
643; CHECK-NEXT:    vadd.vv v8, v10, v8
644; CHECK-NEXT:    ret
645entry:
646  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2)
647  %b = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2)
648  %ret = add <vscale x 1 x i64> %a, %b
649  ret <vscale x 1 x i64> %ret
650}
651
652; vsadd.vv
653declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
654define <vscale x 1 x i64> @commutable_vsadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
655; CHECK-LABEL: commutable_vsadd_vv:
656; CHECK:       # %bb.0: # %entry
657; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
658; CHECK-NEXT:    vsadd.vv v8, v8, v9
659; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
660; CHECK-NEXT:    vadd.vv v8, v8, v8
661; CHECK-NEXT:    ret
662entry:
663  %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
664  %b = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
665  %ret = add <vscale x 1 x i64> %a, %b
666  ret <vscale x 1 x i64> %ret
667}
668
669declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
670define <vscale x 1 x i64> @commutable_vsadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
671; CHECK-LABEL: commutable_vsadd_vv_masked:
672; CHECK:       # %bb.0:
673; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
674; CHECK-NEXT:    vsadd.vv v10, v8, v9, v0.t
675; CHECK-NEXT:    vsadd.vv v8, v8, v9, v0.t
676; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
677; CHECK-NEXT:    vadd.vv v8, v10, v8
678; CHECK-NEXT:    ret
679  %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
680  %b = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
681  %ret = add <vscale x 1 x i64> %a, %b
682  ret <vscale x 1 x i64> %ret
683}
684
685; vsaddu.vv
686declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
687define <vscale x 1 x i64> @commutable_vsaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
688; CHECK-LABEL: commutable_vsaddu_vv:
689; CHECK:       # %bb.0: # %entry
690; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
691; CHECK-NEXT:    vsaddu.vv v8, v8, v9
692; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
693; CHECK-NEXT:    vadd.vv v8, v8, v8
694; CHECK-NEXT:    ret
695entry:
696  %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
697  %b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
698  %ret = add <vscale x 1 x i64> %a, %b
699  ret <vscale x 1 x i64> %ret
700}
701
702declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
703define <vscale x 1 x i64> @commutable_vsaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
704; CHECK-LABEL: commutable_vsaddu_vv_masked:
705; CHECK:       # %bb.0:
706; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
707; CHECK-NEXT:    vsaddu.vv v10, v8, v9, v0.t
708; CHECK-NEXT:    vsaddu.vv v8, v8, v9, v0.t
709; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
710; CHECK-NEXT:    vadd.vv v8, v10, v8
711; CHECK-NEXT:    ret
712  %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
713  %b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
714  %ret = add <vscale x 1 x i64> %a, %b
715  ret <vscale x 1 x i64> %ret
716}
717
718; vaadd.vv
719declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
720define <vscale x 1 x i64> @commutable_vaadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
721; CHECK-LABEL: commutable_vaadd_vv:
722; CHECK:       # %bb.0: # %entry
723; CHECK-NEXT:    csrwi vxrm, 0
724; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
725; CHECK-NEXT:    vaadd.vv v8, v8, v9
726; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
727; CHECK-NEXT:    vadd.vv v8, v8, v8
728; CHECK-NEXT:    ret
729entry:
730  %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
731  %b = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
732  %ret = add <vscale x 1 x i64> %a, %b
733  ret <vscale x 1 x i64> %ret
734}
735
736declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
737define <vscale x 1 x i64> @commutable_vaadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
738; CHECK-LABEL: commutable_vaadd_vv_masked:
739; CHECK:       # %bb.0:
740; CHECK-NEXT:    csrwi vxrm, 0
741; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
742; CHECK-NEXT:    vaadd.vv v10, v8, v9, v0.t
743; CHECK-NEXT:    vaadd.vv v8, v8, v9, v0.t
744; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
745; CHECK-NEXT:    vadd.vv v8, v10, v8
746; CHECK-NEXT:    ret
747  %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
748  %b = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
749  %ret = add <vscale x 1 x i64> %a, %b
750  ret <vscale x 1 x i64> %ret
751}
752
753; vaaddu.vv
754declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
755define <vscale x 1 x i64> @commutable_vaaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
756; CHECK-LABEL: commutable_vaaddu_vv:
757; CHECK:       # %bb.0: # %entry
758; CHECK-NEXT:    csrwi vxrm, 0
759; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
760; CHECK-NEXT:    vaaddu.vv v8, v8, v9
761; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
762; CHECK-NEXT:    vadd.vv v8, v8, v8
763; CHECK-NEXT:    ret
764entry:
765  %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
766  %b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
767  %ret = add <vscale x 1 x i64> %a, %b
768  ret <vscale x 1 x i64> %ret
769}
770
771declare <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
772define <vscale x 1 x i64> @commutable_vaaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
773; CHECK-LABEL: commutable_vaaddu_vv_masked:
774; CHECK:       # %bb.0:
775; CHECK-NEXT:    csrwi vxrm, 0
776; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
777; CHECK-NEXT:    vaaddu.vv v10, v8, v9, v0.t
778; CHECK-NEXT:    vaaddu.vv v8, v8, v9, v0.t
779; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
780; CHECK-NEXT:    vadd.vv v8, v10, v8
781; CHECK-NEXT:    ret
782  %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
783  %b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
784  %ret = add <vscale x 1 x i64> %a, %b
785  ret <vscale x 1 x i64> %ret
786}
787
788; vsmul.vv
789declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
790define <vscale x 1 x i64> @commutable_vsmul_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
791; CHECK-LABEL: commutable_vsmul_vv:
792; CHECK:       # %bb.0: # %entry
793; CHECK-NEXT:    csrwi vxrm, 0
794; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
795; CHECK-NEXT:    vsmul.vv v8, v8, v9
796; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
797; CHECK-NEXT:    vadd.vv v8, v8, v8
798; CHECK-NEXT:    ret
799entry:
800  %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
801  %b = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
802  %ret = add <vscale x 1 x i64> %a, %b
803  ret <vscale x 1 x i64> %ret
804}
805
806declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
807define <vscale x 1 x i64> @commutable_vsmul_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
808; CHECK-LABEL: commutable_vsmul_vv_masked:
809; CHECK:       # %bb.0:
810; CHECK-NEXT:    csrwi vxrm, 0
811; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
812; CHECK-NEXT:    vsmul.vv v10, v8, v9, v0.t
813; CHECK-NEXT:    vsmul.vv v8, v8, v9, v0.t
814; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
815; CHECK-NEXT:    vadd.vv v8, v10, v8
816; CHECK-NEXT:    ret
817  %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
818  %b = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
819  %ret = add <vscale x 1 x i64> %a, %b
820  ret <vscale x 1 x i64> %ret
821}
822