xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/commutable.ll (revision 26766a00ff946c281b7dd517b2ba8d594012c21e)
1d1493709SPengcheng Wang; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2*26766a00SCraig Topper; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3d1493709SPengcheng Wang; RUN:   -verify-machineinstrs | FileCheck %s
4*26766a00SCraig Topper; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5d1493709SPengcheng Wang; RUN:   -verify-machineinstrs | FileCheck %s
6d1493709SPengcheng Wang
7d1493709SPengcheng Wang; vadd.vv
8d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
9d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
10d1493709SPengcheng Wang; CHECK-LABEL: commutable_vadd_vv:
11d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
12d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
13d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v9
14d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
15d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
16d1493709SPengcheng Wang; CHECK-NEXT:    ret
17d1493709SPengcheng Wangentry:
18d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
19d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
20d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
21d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
22d1493709SPengcheng Wang}
23d1493709SPengcheng Wang
24d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
25d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
26d1493709SPengcheng Wang; CHECK-LABEL: commutable_vadd_vv_masked:
27d1493709SPengcheng Wang; CHECK:       # %bb.0:
28d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
29d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v10, v8, v9, v0.t
30d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v9, v0.t
31d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
32d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
33d1493709SPengcheng Wang; CHECK-NEXT:    ret
34d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
35d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
36d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
37d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
38d1493709SPengcheng Wang}
39d1493709SPengcheng Wang
40d1493709SPengcheng Wang; vand.vv
41d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
42d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vand_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
43d1493709SPengcheng Wang; CHECK-LABEL: commutable_vand_vv:
44d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
45d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
46d1493709SPengcheng Wang; CHECK-NEXT:    vand.vv v8, v8, v9
47d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
48d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
49d1493709SPengcheng Wang; CHECK-NEXT:    ret
50d1493709SPengcheng Wangentry:
51d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
52d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
53d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
54d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
55d1493709SPengcheng Wang}
56d1493709SPengcheng Wang
57d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
58d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vand_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
59d1493709SPengcheng Wang; CHECK-LABEL: commutable_vand_vv_masked:
60d1493709SPengcheng Wang; CHECK:       # %bb.0:
61d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
62d1493709SPengcheng Wang; CHECK-NEXT:    vand.vv v10, v8, v9, v0.t
63d1493709SPengcheng Wang; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
64d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
65d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
66d1493709SPengcheng Wang; CHECK-NEXT:    ret
67d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
68d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
69d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
70d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
71d1493709SPengcheng Wang}
72d1493709SPengcheng Wang
73d1493709SPengcheng Wang; vor.vv
74d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
75d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vor_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
76d1493709SPengcheng Wang; CHECK-LABEL: commutable_vor_vv:
77d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
78d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
79d1493709SPengcheng Wang; CHECK-NEXT:    vor.vv v8, v8, v9
80d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
81d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
82d1493709SPengcheng Wang; CHECK-NEXT:    ret
83d1493709SPengcheng Wangentry:
84d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
85d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
86d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
87d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
88d1493709SPengcheng Wang}
89d1493709SPengcheng Wang
90d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
91d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vor_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
92d1493709SPengcheng Wang; CHECK-LABEL: commutable_vor_vv_masked:
93d1493709SPengcheng Wang; CHECK:       # %bb.0:
94d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
95d1493709SPengcheng Wang; CHECK-NEXT:    vor.vv v10, v8, v9, v0.t
96d1493709SPengcheng Wang; CHECK-NEXT:    vor.vv v8, v8, v9, v0.t
97d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
98d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
99d1493709SPengcheng Wang; CHECK-NEXT:    ret
100d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
101d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
102d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
103d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
104d1493709SPengcheng Wang}
105d1493709SPengcheng Wang
106d1493709SPengcheng Wang; vxor.vv
107d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
108d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vxor_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
109d1493709SPengcheng Wang; CHECK-LABEL: commutable_vxor_vv:
110d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
111d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
112d1493709SPengcheng Wang; CHECK-NEXT:    vxor.vv v8, v8, v9
113d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
114d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
115d1493709SPengcheng Wang; CHECK-NEXT:    ret
116d1493709SPengcheng Wangentry:
117d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
118d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
119d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
120d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
121d1493709SPengcheng Wang}
122d1493709SPengcheng Wang
123d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
124d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vxor_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
125d1493709SPengcheng Wang; CHECK-LABEL: commutable_vxor_vv_masked:
126d1493709SPengcheng Wang; CHECK:       # %bb.0:
127d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
128d1493709SPengcheng Wang; CHECK-NEXT:    vxor.vv v10, v8, v9, v0.t
129d1493709SPengcheng Wang; CHECK-NEXT:    vxor.vv v8, v8, v9, v0.t
130d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
131d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
132d1493709SPengcheng Wang; CHECK-NEXT:    ret
133d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
134d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
135d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
136d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
137d1493709SPengcheng Wang}
138d1493709SPengcheng Wang
139d1493709SPengcheng Wang; vmseq.vv
140d1493709SPengcheng Wangdeclare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
141d1493709SPengcheng Wangdefine <vscale x 1 x i1> @commutable_vmseq_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
142d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmseq_vv:
143d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
144d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
145d1493709SPengcheng Wang; CHECK-NEXT:    vmseq.vv v8, v8, v9
146d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
147d1493709SPengcheng Wang; CHECK-NEXT:    vmxor.mm v0, v8, v8
148d1493709SPengcheng Wang; CHECK-NEXT:    ret
149d1493709SPengcheng Wangentry:
150d1493709SPengcheng Wang  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
151d1493709SPengcheng Wang  %b = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(<vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
152d1493709SPengcheng Wang  %ret = add <vscale x 1 x i1> %a, %b
153d1493709SPengcheng Wang  ret <vscale x 1 x i1> %ret
154d1493709SPengcheng Wang}
155d1493709SPengcheng Wang
156d1493709SPengcheng Wangdeclare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(<vscale x 1 x i1>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen);
157d1493709SPengcheng Wangdefine <vscale x 1 x i1> @commutable_vmseq_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
158d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmseq_vv_masked:
159d1493709SPengcheng Wang; CHECK:       # %bb.0:
160d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
161d1493709SPengcheng Wang; CHECK-NEXT:    vmseq.vv v10, v8, v9, v0.t
162d1493709SPengcheng Wang; CHECK-NEXT:    vmseq.vv v8, v8, v9, v0.t
163d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
164d1493709SPengcheng Wang; CHECK-NEXT:    vmxor.mm v0, v10, v8
165d1493709SPengcheng Wang; CHECK-NEXT:    ret
166d1493709SPengcheng Wang  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2)
167d1493709SPengcheng Wang  %b = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2)
168d1493709SPengcheng Wang  %ret = add <vscale x 1 x i1> %a, %b
169d1493709SPengcheng Wang  ret <vscale x 1 x i1> %ret
170d1493709SPengcheng Wang}
171d1493709SPengcheng Wang
172d1493709SPengcheng Wang; vmsne.vv
173d1493709SPengcheng Wangdeclare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
174d1493709SPengcheng Wangdefine <vscale x 1 x i1> @commutable_vmsne_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
175d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmsne_vv:
176d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
177d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
178d1493709SPengcheng Wang; CHECK-NEXT:    vmsne.vv v8, v8, v9
179d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
180d1493709SPengcheng Wang; CHECK-NEXT:    vmxor.mm v0, v8, v8
181d1493709SPengcheng Wang; CHECK-NEXT:    ret
182d1493709SPengcheng Wangentry:
183d1493709SPengcheng Wang  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
184d1493709SPengcheng Wang  %b = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(<vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
185d1493709SPengcheng Wang  %ret = add <vscale x 1 x i1> %a, %b
186d1493709SPengcheng Wang  ret <vscale x 1 x i1> %ret
187d1493709SPengcheng Wang}
188d1493709SPengcheng Wang
189d1493709SPengcheng Wangdeclare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(<vscale x 1 x i1>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen);
190d1493709SPengcheng Wangdefine <vscale x 1 x i1> @commutable_vmsne_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
191d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmsne_vv_masked:
192d1493709SPengcheng Wang; CHECK:       # %bb.0:
193d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
194d1493709SPengcheng Wang; CHECK-NEXT:    vmsne.vv v10, v8, v9, v0.t
195d1493709SPengcheng Wang; CHECK-NEXT:    vmsne.vv v8, v8, v9, v0.t
196d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
197d1493709SPengcheng Wang; CHECK-NEXT:    vmxor.mm v0, v10, v8
198d1493709SPengcheng Wang; CHECK-NEXT:    ret
199d1493709SPengcheng Wang  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2)
200d1493709SPengcheng Wang  %b = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(<vscale x 1 x i1> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2)
201d1493709SPengcheng Wang  %ret = add <vscale x 1 x i1> %a, %b
202d1493709SPengcheng Wang  ret <vscale x 1 x i1> %ret
203d1493709SPengcheng Wang}
204d1493709SPengcheng Wang
205d1493709SPengcheng Wang; vmin.vv
206d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
207d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vmin_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
208d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmin_vv:
209d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
210d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
211d1493709SPengcheng Wang; CHECK-NEXT:    vmin.vv v8, v8, v9
212d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
213d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
214d1493709SPengcheng Wang; CHECK-NEXT:    ret
215d1493709SPengcheng Wangentry:
216d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
217d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
218d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
219d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
220d1493709SPengcheng Wang}
221d1493709SPengcheng Wang
222d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
223d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vmin_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
224d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmin_vv_masked:
225d1493709SPengcheng Wang; CHECK:       # %bb.0:
226d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
227d1493709SPengcheng Wang; CHECK-NEXT:    vmin.vv v10, v8, v9, v0.t
228d1493709SPengcheng Wang; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
229d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
230d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
231d1493709SPengcheng Wang; CHECK-NEXT:    ret
232d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
233d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
234d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
235d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
236d1493709SPengcheng Wang}
237d1493709SPengcheng Wang
238d1493709SPengcheng Wang; vminu.vv
239d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
240d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vminu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
241d1493709SPengcheng Wang; CHECK-LABEL: commutable_vminu_vv:
242d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
243d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
244d1493709SPengcheng Wang; CHECK-NEXT:    vminu.vv v8, v8, v9
245d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
246d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
247d1493709SPengcheng Wang; CHECK-NEXT:    ret
248d1493709SPengcheng Wangentry:
249d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
250d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
251d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
252d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
253d1493709SPengcheng Wang}
254d1493709SPengcheng Wang
255d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
256d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vminu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
257d1493709SPengcheng Wang; CHECK-LABEL: commutable_vminu_vv_masked:
258d1493709SPengcheng Wang; CHECK:       # %bb.0:
259d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
260d1493709SPengcheng Wang; CHECK-NEXT:    vminu.vv v10, v8, v9, v0.t
261d1493709SPengcheng Wang; CHECK-NEXT:    vminu.vv v8, v8, v9, v0.t
262d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
263d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
264d1493709SPengcheng Wang; CHECK-NEXT:    ret
265d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
266d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
267d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
268d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
269d1493709SPengcheng Wang}
270d1493709SPengcheng Wang
271d1493709SPengcheng Wang; vmax.vv
272d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
273d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vmax_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
274d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmax_vv:
275d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
276d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
277d1493709SPengcheng Wang; CHECK-NEXT:    vmax.vv v8, v8, v9
278d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
279d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
280d1493709SPengcheng Wang; CHECK-NEXT:    ret
281d1493709SPengcheng Wangentry:
282d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
283d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
284d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
285d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
286d1493709SPengcheng Wang}
287d1493709SPengcheng Wang
288d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
289d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vmax_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
290d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmax_vv_masked:
291d1493709SPengcheng Wang; CHECK:       # %bb.0:
292d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
293d1493709SPengcheng Wang; CHECK-NEXT:    vmax.vv v10, v8, v9, v0.t
294d1493709SPengcheng Wang; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
295d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
296d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
297d1493709SPengcheng Wang; CHECK-NEXT:    ret
298d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
299d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
300d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
301d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
302d1493709SPengcheng Wang}
303d1493709SPengcheng Wang
304d1493709SPengcheng Wang; vmaxu.vv
305d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
306d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vmaxu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
307d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmaxu_vv:
308d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
309d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
310d1493709SPengcheng Wang; CHECK-NEXT:    vmaxu.vv v8, v8, v9
311d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
312d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
313d1493709SPengcheng Wang; CHECK-NEXT:    ret
314d1493709SPengcheng Wangentry:
315d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
316d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
317d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
318d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
319d1493709SPengcheng Wang}
320d1493709SPengcheng Wang
321d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
322d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vmaxu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
323d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmaxu_vv_masked:
324d1493709SPengcheng Wang; CHECK:       # %bb.0:
325d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
326d1493709SPengcheng Wang; CHECK-NEXT:    vmaxu.vv v10, v8, v9, v0.t
327d1493709SPengcheng Wang; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
328d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
329d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
330d1493709SPengcheng Wang; CHECK-NEXT:    ret
331d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
332d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
333d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
334d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
335d1493709SPengcheng Wang}
336d1493709SPengcheng Wang
337d1493709SPengcheng Wang; vmul.vv
338d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
339d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vmul_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
340d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmul_vv:
341d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
342d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
343d1493709SPengcheng Wang; CHECK-NEXT:    vmul.vv v8, v8, v9
344d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
345d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
346d1493709SPengcheng Wang; CHECK-NEXT:    ret
347d1493709SPengcheng Wangentry:
348d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
349d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
350d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
351d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
352d1493709SPengcheng Wang}
353d1493709SPengcheng Wang
354d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
355d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vmul_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
356d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmul_vv_masked:
357d1493709SPengcheng Wang; CHECK:       # %bb.0:
358d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
359d1493709SPengcheng Wang; CHECK-NEXT:    vmul.vv v10, v8, v9, v0.t
360d1493709SPengcheng Wang; CHECK-NEXT:    vmul.vv v8, v8, v9, v0.t
361d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
362d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
363d1493709SPengcheng Wang; CHECK-NEXT:    ret
364d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
365d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
366d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
367d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
368d1493709SPengcheng Wang}
369d1493709SPengcheng Wang
370d1493709SPengcheng Wang; vmulh.vv
371d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
372d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vmulh_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
373d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmulh_vv:
374d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
375d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
376d1493709SPengcheng Wang; CHECK-NEXT:    vmulh.vv v8, v8, v9
377d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
378d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
379d1493709SPengcheng Wang; CHECK-NEXT:    ret
380d1493709SPengcheng Wangentry:
381d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
382d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
383d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
384d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
385d1493709SPengcheng Wang}
386d1493709SPengcheng Wang
387d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
388d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vmulh_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
389d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmulh_vv_masked:
390d1493709SPengcheng Wang; CHECK:       # %bb.0:
391d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
392d1493709SPengcheng Wang; CHECK-NEXT:    vmulh.vv v10, v8, v9, v0.t
393d1493709SPengcheng Wang; CHECK-NEXT:    vmulh.vv v8, v8, v9, v0.t
394d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
395d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
396d1493709SPengcheng Wang; CHECK-NEXT:    ret
397d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
398d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
399d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
400d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
401d1493709SPengcheng Wang}
402d1493709SPengcheng Wang
403d1493709SPengcheng Wang; vmulhu.vv
404d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
405d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vmulhu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
406d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmulhu_vv:
407d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
408d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
409d1493709SPengcheng Wang; CHECK-NEXT:    vmulhu.vv v8, v8, v9
410d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
411d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
412d1493709SPengcheng Wang; CHECK-NEXT:    ret
413d1493709SPengcheng Wangentry:
414d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
415d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
416d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
417d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
418d1493709SPengcheng Wang}
419d1493709SPengcheng Wang
420d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
421d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vmulhu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
422d1493709SPengcheng Wang; CHECK-LABEL: commutable_vmulhu_vv_masked:
423d1493709SPengcheng Wang; CHECK:       # %bb.0:
424d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
425d1493709SPengcheng Wang; CHECK-NEXT:    vmulhu.vv v10, v8, v9, v0.t
426d1493709SPengcheng Wang; CHECK-NEXT:    vmulhu.vv v8, v8, v9, v0.t
427d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
428d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
429d1493709SPengcheng Wang; CHECK-NEXT:    ret
430d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
431d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
432d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
433d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
434d1493709SPengcheng Wang}
435d1493709SPengcheng Wang
436d1493709SPengcheng Wang; vwadd.vv
437d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen);
438d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vwadd_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
439d1493709SPengcheng Wang; CHECK-LABEL: commutable_vwadd_vv:
440d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
441d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
442d1493709SPengcheng Wang; CHECK-NEXT:    vwadd.vv v10, v8, v9
443d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
444d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v10
445d1493709SPengcheng Wang; CHECK-NEXT:    ret
446d1493709SPengcheng Wangentry:
447d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2)
448d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2)
449d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
450d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
451d1493709SPengcheng Wang}
452d1493709SPengcheng Wang
453d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
454d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vwadd_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
455d1493709SPengcheng Wang; CHECK-LABEL: commutable_vwadd_vv_masked:
456d1493709SPengcheng Wang; CHECK:       # %bb.0:
457d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
458d1493709SPengcheng Wang; CHECK-NEXT:    vwadd.vv v10, v8, v9, v0.t
459d1493709SPengcheng Wang; CHECK-NEXT:    vwadd.vv v11, v8, v9, v0.t
460d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
461d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v11
462d1493709SPengcheng Wang; CHECK-NEXT:    ret
463d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
464d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
465d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
466d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
467d1493709SPengcheng Wang}
468d1493709SPengcheng Wang
469d1493709SPengcheng Wang; vwaddu.vv
470d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen);
471d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vwaddu_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
472d1493709SPengcheng Wang; CHECK-LABEL: commutable_vwaddu_vv:
473d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
474d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
475d1493709SPengcheng Wang; CHECK-NEXT:    vwaddu.vv v10, v8, v9
476d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
477d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v10
478d1493709SPengcheng Wang; CHECK-NEXT:    ret
479d1493709SPengcheng Wangentry:
480d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2)
481d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2)
482d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
483d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
484d1493709SPengcheng Wang}
485d1493709SPengcheng Wang
486d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
487d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vwaddu_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
488d1493709SPengcheng Wang; CHECK-LABEL: commutable_vwaddu_vv_masked:
489d1493709SPengcheng Wang; CHECK:       # %bb.0:
490d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
491d1493709SPengcheng Wang; CHECK-NEXT:    vwaddu.vv v10, v8, v9, v0.t
492d1493709SPengcheng Wang; CHECK-NEXT:    vwaddu.vv v11, v8, v9, v0.t
493d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
494d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v11
495d1493709SPengcheng Wang; CHECK-NEXT:    ret
496d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
497d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
498d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
499d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
500d1493709SPengcheng Wang}
501d1493709SPengcheng Wang
502d1493709SPengcheng Wang; vwmul.vv
503d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen);
504d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vwmul_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
505d1493709SPengcheng Wang; CHECK-LABEL: commutable_vwmul_vv:
506d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
507d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
508d1493709SPengcheng Wang; CHECK-NEXT:    vwmul.vv v10, v8, v9
509d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
510d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v10
511d1493709SPengcheng Wang; CHECK-NEXT:    ret
512d1493709SPengcheng Wangentry:
513d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2)
514d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2)
515d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
516d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
517d1493709SPengcheng Wang}
518d1493709SPengcheng Wang
519d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
520d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vwmul_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
521d1493709SPengcheng Wang; CHECK-LABEL: commutable_vwmul_vv_masked:
522d1493709SPengcheng Wang; CHECK:       # %bb.0:
523d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
524d1493709SPengcheng Wang; CHECK-NEXT:    vwmul.vv v10, v8, v9, v0.t
525d1493709SPengcheng Wang; CHECK-NEXT:    vwmul.vv v11, v8, v9, v0.t
526d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
527d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v11
528d1493709SPengcheng Wang; CHECK-NEXT:    ret
529d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
530d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
531d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
532d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
533d1493709SPengcheng Wang}
534d1493709SPengcheng Wang
535d1493709SPengcheng Wang; vwmulu.vv
536d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen);
537d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vwmulu_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
538d1493709SPengcheng Wang; CHECK-LABEL: commutable_vwmulu_vv:
539d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
540d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
541d1493709SPengcheng Wang; CHECK-NEXT:    vwmulu.vv v10, v8, v9
542d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
543d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v10
544d1493709SPengcheng Wang; CHECK-NEXT:    ret
545d1493709SPengcheng Wangentry:
546d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2)
547d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2)
548d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
549d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
550d1493709SPengcheng Wang}
551d1493709SPengcheng Wang
552d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
553d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vwmulu_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
554d1493709SPengcheng Wang; CHECK-LABEL: commutable_vwmulu_vv_masked:
555d1493709SPengcheng Wang; CHECK:       # %bb.0:
556d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
557d1493709SPengcheng Wang; CHECK-NEXT:    vwmulu.vv v10, v8, v9, v0.t
558d1493709SPengcheng Wang; CHECK-NEXT:    vwmulu.vv v11, v8, v9, v0.t
559d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
560d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v11
561d1493709SPengcheng Wang; CHECK-NEXT:    ret
562d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
563d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
564d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
565d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
566d1493709SPengcheng Wang}
567d1493709SPengcheng Wang
568d1493709SPengcheng Wang; vwmacc.vv
569d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen);
570d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vwmacc_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
571d1493709SPengcheng Wang; CHECK-LABEL: commutable_vwmacc_vv:
572d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
573d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
574d1493709SPengcheng Wang; CHECK-NEXT:    vwmacc.vv v10, v8, v9
575d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
576d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v10
577d1493709SPengcheng Wang; CHECK-NEXT:    ret
578d1493709SPengcheng Wangentry:
579d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, iXLen 1)
580d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2, iXLen 1)
581d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
582d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
583d1493709SPengcheng Wang}
584d1493709SPengcheng Wang
585d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
586d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vwmacc_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
587d1493709SPengcheng Wang; CHECK-LABEL: commutable_vwmacc_vv_masked:
588d1493709SPengcheng Wang; CHECK:       # %bb.0:
589d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
590d1493709SPengcheng Wang; CHECK-NEXT:    vwmacc.vv v10, v8, v9, v0.t
591d1493709SPengcheng Wang; CHECK-NEXT:    vwmacc.vv v11, v9, v8, v0.t
592d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
593d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v11
594d1493709SPengcheng Wang; CHECK-NEXT:    ret
595d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
596d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
597d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
598d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
599d1493709SPengcheng Wang}
600d1493709SPengcheng Wang
601d1493709SPengcheng Wang; vwmaccu.vv
602d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen);
603d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vwmaccu_vv(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
604d1493709SPengcheng Wang; CHECK-LABEL: commutable_vwmaccu_vv:
605d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
606d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
607d1493709SPengcheng Wang; CHECK-NEXT:    vwmaccu.vv v10, v8, v9
608d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
609d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v10
610d1493709SPengcheng Wang; CHECK-NEXT:    ret
611d1493709SPengcheng Wangentry:
612d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, iXLen 1)
613d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, iXLen %2, iXLen 1)
614d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
615d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
616d1493709SPengcheng Wang}
617d1493709SPengcheng Wang
618d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, iXLen, iXLen);
619d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vwmaccu_vv_masked(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2) {
620d1493709SPengcheng Wang; CHECK-LABEL: commutable_vwmaccu_vv_masked:
621d1493709SPengcheng Wang; CHECK:       # %bb.0:
622d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
623d1493709SPengcheng Wang; CHECK-NEXT:    vwmaccu.vv v10, v8, v9, v0.t
624d1493709SPengcheng Wang; CHECK-NEXT:    vwmaccu.vv v11, v9, v8, v0.t
625d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
626d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v11
627d1493709SPengcheng Wang; CHECK-NEXT:    ret
628d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
629d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i32> %1, <vscale x 1 x i32> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
630d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
631d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
632d1493709SPengcheng Wang}
633d1493709SPengcheng Wang
634d1493709SPengcheng Wang; vadc.vvm
635d1493709SPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen);
636d1493709SPengcheng Wangdefine <vscale x 1 x i64> @commutable_vadc_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) nounwind {
637d1493709SPengcheng Wang; CHECK-LABEL: commutable_vadc_vv:
638d1493709SPengcheng Wang; CHECK:       # %bb.0: # %entry
639d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
640d1493709SPengcheng Wang; CHECK-NEXT:    vadc.vvm v10, v8, v9, v0
641d1493709SPengcheng Wang; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
642d1493709SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
643d1493709SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
644d1493709SPengcheng Wang; CHECK-NEXT:    ret
645d1493709SPengcheng Wangentry:
646d1493709SPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2)
647d1493709SPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2)
648d1493709SPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
649d1493709SPengcheng Wang  ret <vscale x 1 x i64> %ret
650d1493709SPengcheng Wang}
651d1493709SPengcheng Wang
6522c1c887cSPengcheng Wang; vsadd.vv
6532c1c887cSPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
6542c1c887cSPengcheng Wangdefine <vscale x 1 x i64> @commutable_vsadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
6552c1c887cSPengcheng Wang; CHECK-LABEL: commutable_vsadd_vv:
6562c1c887cSPengcheng Wang; CHECK:       # %bb.0: # %entry
6572c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
658940ef968SPengcheng Wang; CHECK-NEXT:    vsadd.vv v8, v8, v9
6592c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
660940ef968SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
6612c1c887cSPengcheng Wang; CHECK-NEXT:    ret
6622c1c887cSPengcheng Wangentry:
6632c1c887cSPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
6642c1c887cSPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
6652c1c887cSPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
6662c1c887cSPengcheng Wang  ret <vscale x 1 x i64> %ret
6672c1c887cSPengcheng Wang}
6682c1c887cSPengcheng Wang
6692c1c887cSPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
6702c1c887cSPengcheng Wangdefine <vscale x 1 x i64> @commutable_vsadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
6712c1c887cSPengcheng Wang; CHECK-LABEL: commutable_vsadd_vv_masked:
6722c1c887cSPengcheng Wang; CHECK:       # %bb.0:
6732c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
6742c1c887cSPengcheng Wang; CHECK-NEXT:    vsadd.vv v10, v8, v9, v0.t
675940ef968SPengcheng Wang; CHECK-NEXT:    vsadd.vv v8, v8, v9, v0.t
6762c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
6772c1c887cSPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
6782c1c887cSPengcheng Wang; CHECK-NEXT:    ret
6792c1c887cSPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
6802c1c887cSPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
6812c1c887cSPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
6822c1c887cSPengcheng Wang  ret <vscale x 1 x i64> %ret
6832c1c887cSPengcheng Wang}
6842c1c887cSPengcheng Wang
6852c1c887cSPengcheng Wang; vsaddu.vv
6862c1c887cSPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
6872c1c887cSPengcheng Wangdefine <vscale x 1 x i64> @commutable_vsaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
6882c1c887cSPengcheng Wang; CHECK-LABEL: commutable_vsaddu_vv:
6892c1c887cSPengcheng Wang; CHECK:       # %bb.0: # %entry
6902c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
691940ef968SPengcheng Wang; CHECK-NEXT:    vsaddu.vv v8, v8, v9
6922c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
693940ef968SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
6942c1c887cSPengcheng Wang; CHECK-NEXT:    ret
6952c1c887cSPengcheng Wangentry:
6962c1c887cSPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
6972c1c887cSPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
6982c1c887cSPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
6992c1c887cSPengcheng Wang  ret <vscale x 1 x i64> %ret
7002c1c887cSPengcheng Wang}
7012c1c887cSPengcheng Wang
7022c1c887cSPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
7032c1c887cSPengcheng Wangdefine <vscale x 1 x i64> @commutable_vsaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
7042c1c887cSPengcheng Wang; CHECK-LABEL: commutable_vsaddu_vv_masked:
7052c1c887cSPengcheng Wang; CHECK:       # %bb.0:
7062c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
7072c1c887cSPengcheng Wang; CHECK-NEXT:    vsaddu.vv v10, v8, v9, v0.t
708940ef968SPengcheng Wang; CHECK-NEXT:    vsaddu.vv v8, v8, v9, v0.t
7092c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
7102c1c887cSPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
7112c1c887cSPengcheng Wang; CHECK-NEXT:    ret
7122c1c887cSPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
7132c1c887cSPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
7142c1c887cSPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
7152c1c887cSPengcheng Wang  ret <vscale x 1 x i64> %ret
7162c1c887cSPengcheng Wang}
7172c1c887cSPengcheng Wang
7182c1c887cSPengcheng Wang; vaadd.vv
7192c1c887cSPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
7202c1c887cSPengcheng Wangdefine <vscale x 1 x i64> @commutable_vaadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
7212c1c887cSPengcheng Wang; CHECK-LABEL: commutable_vaadd_vv:
7222c1c887cSPengcheng Wang; CHECK:       # %bb.0: # %entry
7232c1c887cSPengcheng Wang; CHECK-NEXT:    csrwi vxrm, 0
7240ebe48f0SLuke Lau; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
7252c1c887cSPengcheng Wang; CHECK-NEXT:    vaadd.vv v8, v8, v9
7262c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
7272c1c887cSPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
7282c1c887cSPengcheng Wang; CHECK-NEXT:    ret
7292c1c887cSPengcheng Wangentry:
7302c1c887cSPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
7312c1c887cSPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
7322c1c887cSPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
7332c1c887cSPengcheng Wang  ret <vscale x 1 x i64> %ret
7342c1c887cSPengcheng Wang}
7352c1c887cSPengcheng Wang
7362c1c887cSPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
7372c1c887cSPengcheng Wangdefine <vscale x 1 x i64> @commutable_vaadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
7382c1c887cSPengcheng Wang; CHECK-LABEL: commutable_vaadd_vv_masked:
7392c1c887cSPengcheng Wang; CHECK:       # %bb.0:
7402c1c887cSPengcheng Wang; CHECK-NEXT:    csrwi vxrm, 0
7410ebe48f0SLuke Lau; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
7422c1c887cSPengcheng Wang; CHECK-NEXT:    vaadd.vv v10, v8, v9, v0.t
7432c1c887cSPengcheng Wang; CHECK-NEXT:    vaadd.vv v8, v8, v9, v0.t
7442c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
7452c1c887cSPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
7462c1c887cSPengcheng Wang; CHECK-NEXT:    ret
7472c1c887cSPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
7482c1c887cSPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
7492c1c887cSPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
7502c1c887cSPengcheng Wang  ret <vscale x 1 x i64> %ret
7512c1c887cSPengcheng Wang}
7522c1c887cSPengcheng Wang
7532c1c887cSPengcheng Wang; vaaddu.vv
7542c1c887cSPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
7552c1c887cSPengcheng Wangdefine <vscale x 1 x i64> @commutable_vaaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
7562c1c887cSPengcheng Wang; CHECK-LABEL: commutable_vaaddu_vv:
7572c1c887cSPengcheng Wang; CHECK:       # %bb.0: # %entry
7582c1c887cSPengcheng Wang; CHECK-NEXT:    csrwi vxrm, 0
7590ebe48f0SLuke Lau; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
7602c1c887cSPengcheng Wang; CHECK-NEXT:    vaaddu.vv v8, v8, v9
7612c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
7622c1c887cSPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
7632c1c887cSPengcheng Wang; CHECK-NEXT:    ret
7642c1c887cSPengcheng Wangentry:
7652c1c887cSPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
7662c1c887cSPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
7672c1c887cSPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
7682c1c887cSPengcheng Wang  ret <vscale x 1 x i64> %ret
7692c1c887cSPengcheng Wang}
7702c1c887cSPengcheng Wang
7712c1c887cSPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
7722c1c887cSPengcheng Wangdefine <vscale x 1 x i64> @commutable_vaaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
7732c1c887cSPengcheng Wang; CHECK-LABEL: commutable_vaaddu_vv_masked:
7742c1c887cSPengcheng Wang; CHECK:       # %bb.0:
7752c1c887cSPengcheng Wang; CHECK-NEXT:    csrwi vxrm, 0
7760ebe48f0SLuke Lau; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
7772c1c887cSPengcheng Wang; CHECK-NEXT:    vaaddu.vv v10, v8, v9, v0.t
7782c1c887cSPengcheng Wang; CHECK-NEXT:    vaaddu.vv v8, v8, v9, v0.t
7792c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
7802c1c887cSPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
7812c1c887cSPengcheng Wang; CHECK-NEXT:    ret
7822c1c887cSPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
7832c1c887cSPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
7842c1c887cSPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
7852c1c887cSPengcheng Wang  ret <vscale x 1 x i64> %ret
7862c1c887cSPengcheng Wang}
7872c1c887cSPengcheng Wang
7882c1c887cSPengcheng Wang; vsmul.vv
7892c1c887cSPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
7902c1c887cSPengcheng Wangdefine <vscale x 1 x i64> @commutable_vsmul_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
7912c1c887cSPengcheng Wang; CHECK-LABEL: commutable_vsmul_vv:
7922c1c887cSPengcheng Wang; CHECK:       # %bb.0: # %entry
7932c1c887cSPengcheng Wang; CHECK-NEXT:    csrwi vxrm, 0
7940ebe48f0SLuke Lau; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
795940ef968SPengcheng Wang; CHECK-NEXT:    vsmul.vv v8, v8, v9
7962c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
797940ef968SPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v8, v8
7982c1c887cSPengcheng Wang; CHECK-NEXT:    ret
7992c1c887cSPengcheng Wangentry:
8002c1c887cSPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
8012c1c887cSPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
8022c1c887cSPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
8032c1c887cSPengcheng Wang  ret <vscale x 1 x i64> %ret
8042c1c887cSPengcheng Wang}
8052c1c887cSPengcheng Wang
8062c1c887cSPengcheng Wangdeclare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
8072c1c887cSPengcheng Wangdefine <vscale x 1 x i64> @commutable_vsmul_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
8082c1c887cSPengcheng Wang; CHECK-LABEL: commutable_vsmul_vv_masked:
8092c1c887cSPengcheng Wang; CHECK:       # %bb.0:
8102c1c887cSPengcheng Wang; CHECK-NEXT:    csrwi vxrm, 0
8110ebe48f0SLuke Lau; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
8122c1c887cSPengcheng Wang; CHECK-NEXT:    vsmul.vv v10, v8, v9, v0.t
813940ef968SPengcheng Wang; CHECK-NEXT:    vsmul.vv v8, v8, v9, v0.t
8142c1c887cSPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
8152c1c887cSPengcheng Wang; CHECK-NEXT:    vadd.vv v8, v10, v8
8162c1c887cSPengcheng Wang; CHECK-NEXT:    ret
8172c1c887cSPengcheng Wang  %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
8182c1c887cSPengcheng Wang  %b = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
8192c1c887cSPengcheng Wang  %ret = add <vscale x 1 x i64> %a, %b
8202c1c887cSPengcheng Wang  ret <vscale x 1 x i64> %ret
8212c1c887cSPengcheng Wang}
822