xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll (revision ae7751f4050d5cbd3552adbcf9958600072d37ed)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN:  sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \
3; RUN:    -verify-machineinstrs | FileCheck %s
4; RUN:  sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \
5; RUN:    -verify-machineinstrs | FileCheck %s
6
7define void @test_sf_vc_vvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
8; CHECK-LABEL: test_sf_vc_vvv_se_e8mf8:
9; CHECK:       # %bb.0: # %entry
10; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
11; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
12; CHECK-NEXT:    ret
13entry:
14  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
15  ret void
16}
17
18declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
19
20define void @test_sf_vc_vvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
21; CHECK-LABEL: test_sf_vc_vvv_se_e8mf4:
22; CHECK:       # %bb.0: # %entry
23; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
24; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
25; CHECK-NEXT:    ret
26entry:
27  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
28  ret void
29}
30
31declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
32
33define void @test_sf_vc_vvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
34; CHECK-LABEL: test_sf_vc_vvv_se_e8mf2:
35; CHECK:       # %bb.0: # %entry
36; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
37; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
38; CHECK-NEXT:    ret
39entry:
40  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
41  ret void
42}
43
44declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
45
46define void @test_sf_vc_vvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
47; CHECK-LABEL: test_sf_vc_vvv_se_e8m1:
48; CHECK:       # %bb.0: # %entry
49; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
50; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
51; CHECK-NEXT:    ret
52entry:
53  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
54  ret void
55}
56
57declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
58
59define void @test_sf_vc_vvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
60; CHECK-LABEL: test_sf_vc_vvv_se_e8m2:
61; CHECK:       # %bb.0: # %entry
62; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
63; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
64; CHECK-NEXT:    ret
65entry:
66  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
67  ret void
68}
69
70declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
71
72define void @test_sf_vc_vvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
73; CHECK-LABEL: test_sf_vc_vvv_se_e8m4:
74; CHECK:       # %bb.0: # %entry
75; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
76; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
77; CHECK-NEXT:    ret
78entry:
79  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
80  ret void
81}
82
83declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
84
85define void @test_sf_vc_vvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
86; CHECK-LABEL: test_sf_vc_vvv_se_e8m8:
87; CHECK:       # %bb.0: # %entry
88; CHECK-NEXT:    vl8r.v v24, (a0)
89; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
90; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
91; CHECK-NEXT:    ret
92entry:
93  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
94  ret void
95}
96
97declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
98
99define void @test_sf_vc_vvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
100; CHECK-LABEL: test_sf_vc_vvv_se_e16mf4:
101; CHECK:       # %bb.0: # %entry
102; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
103; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
104; CHECK-NEXT:    ret
105entry:
106  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
107  ret void
108}
109
110declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
111
112define void @test_sf_vc_vvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
113; CHECK-LABEL: test_sf_vc_vvv_se_e16mf2:
114; CHECK:       # %bb.0: # %entry
115; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
116; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
117; CHECK-NEXT:    ret
118entry:
119  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
120  ret void
121}
122
123declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
124
125define void @test_sf_vc_vvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
126; CHECK-LABEL: test_sf_vc_vvv_se_e16m1:
127; CHECK:       # %bb.0: # %entry
128; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
129; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
130; CHECK-NEXT:    ret
131entry:
132  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
133  ret void
134}
135
136declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
137
138define void @test_sf_vc_vvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
139; CHECK-LABEL: test_sf_vc_vvv_se_e16m2:
140; CHECK:       # %bb.0: # %entry
141; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
142; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
143; CHECK-NEXT:    ret
144entry:
145  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
146  ret void
147}
148
149declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
150
151define void @test_sf_vc_vvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
152; CHECK-LABEL: test_sf_vc_vvv_se_e16m4:
153; CHECK:       # %bb.0: # %entry
154; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
155; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
156; CHECK-NEXT:    ret
157entry:
158  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
159  ret void
160}
161
162declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
163
164define void @test_sf_vc_vvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
165; CHECK-LABEL: test_sf_vc_vvv_se_e16m8:
166; CHECK:       # %bb.0: # %entry
167; CHECK-NEXT:    vl8re16.v v24, (a0)
168; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
169; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
170; CHECK-NEXT:    ret
171entry:
172  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
173  ret void
174}
175
176declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
177
178define void @test_sf_vc_vvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
179; CHECK-LABEL: test_sf_vc_vvv_se_e32mf2:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
182; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
183; CHECK-NEXT:    ret
184entry:
185  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
186  ret void
187}
188
189declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
190
191define void @test_sf_vc_vvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
192; CHECK-LABEL: test_sf_vc_vvv_se_e32m1:
193; CHECK:       # %bb.0: # %entry
194; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
195; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
196; CHECK-NEXT:    ret
197entry:
198  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
199  ret void
200}
201
202declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
203
204define void @test_sf_vc_vvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
205; CHECK-LABEL: test_sf_vc_vvv_se_e32m2:
206; CHECK:       # %bb.0: # %entry
207; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
208; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
209; CHECK-NEXT:    ret
210entry:
211  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
212  ret void
213}
214
215declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
216
217define void @test_sf_vc_vvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
218; CHECK-LABEL: test_sf_vc_vvv_se_e32m4:
219; CHECK:       # %bb.0: # %entry
220; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
221; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
222; CHECK-NEXT:    ret
223entry:
224  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
225  ret void
226}
227
228declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
229
230define void @test_sf_vc_vvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
231; CHECK-LABEL: test_sf_vc_vvv_se_e32m8:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vl8re32.v v24, (a0)
234; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
235; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
236; CHECK-NEXT:    ret
237entry:
238  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
239  ret void
240}
241
242declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
243
244define void @test_sf_vc_vvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
245; CHECK-LABEL: test_sf_vc_vvv_se_e64m1:
246; CHECK:       # %bb.0: # %entry
247; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
248; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
249; CHECK-NEXT:    ret
250entry:
251  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
252  ret void
253}
254
255declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
256
257define void @test_sf_vc_vvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
258; CHECK-LABEL: test_sf_vc_vvv_se_e64m2:
259; CHECK:       # %bb.0: # %entry
260; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
261; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
262; CHECK-NEXT:    ret
263entry:
264  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
265  ret void
266}
267
268declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
269
270define void @test_sf_vc_vvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
271; CHECK-LABEL: test_sf_vc_vvv_se_e64m4:
272; CHECK:       # %bb.0: # %entry
273; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
274; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
275; CHECK-NEXT:    ret
276entry:
277  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
278  ret void
279}
280
281declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
282
283define void @test_sf_vc_vvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
284; CHECK-LABEL: test_sf_vc_vvv_se_e64m8:
285; CHECK:       # %bb.0: # %entry
286; CHECK-NEXT:    vl8re64.v v24, (a0)
287; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
288; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
289; CHECK-NEXT:    ret
290entry:
291  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
292  ret void
293}
294
295declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
296
297define <vscale x 1 x i8> @test_sf_vc_v_vvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
298; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf8:
299; CHECK:       # %bb.0: # %entry
300; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
301; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
302; CHECK-NEXT:    ret
303entry:
304  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
305  ret <vscale x 1 x i8> %0
306}
307
308declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
309
310define <vscale x 2 x i8> @test_sf_vc_v_vvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
311; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf4:
312; CHECK:       # %bb.0: # %entry
313; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
314; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
315; CHECK-NEXT:    ret
316entry:
317  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
318  ret <vscale x 2 x i8> %0
319}
320
321declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
322
323define <vscale x 4 x i8> @test_sf_vc_v_vvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
324; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf2:
325; CHECK:       # %bb.0: # %entry
326; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
327; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
328; CHECK-NEXT:    ret
329entry:
330  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
331  ret <vscale x 4 x i8> %0
332}
333
334declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
335
336define <vscale x 8 x i8> @test_sf_vc_v_vvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
337; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m1:
338; CHECK:       # %bb.0: # %entry
339; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
340; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
341; CHECK-NEXT:    ret
342entry:
343  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
344  ret <vscale x 8 x i8> %0
345}
346
347declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
348
349define <vscale x 16 x i8> @test_sf_vc_v_vvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
350; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m2:
351; CHECK:       # %bb.0: # %entry
352; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
353; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
354; CHECK-NEXT:    ret
355entry:
356  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
357  ret <vscale x 16 x i8> %0
358}
359
360declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
361
362define <vscale x 32 x i8> @test_sf_vc_v_vvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
363; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m4:
364; CHECK:       # %bb.0: # %entry
365; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
366; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
367; CHECK-NEXT:    ret
368entry:
369  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
370  ret <vscale x 32 x i8> %0
371}
372
373declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
374
375define <vscale x 64 x i8> @test_sf_vc_v_vvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
376; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m8:
377; CHECK:       # %bb.0: # %entry
378; CHECK-NEXT:    vl8r.v v24, (a0)
379; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
380; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
381; CHECK-NEXT:    ret
382entry:
383  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
384  ret <vscale x 64 x i8> %0
385}
386
387declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
388
389define <vscale x 1 x i16> @test_sf_vc_v_vvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
390; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf4:
391; CHECK:       # %bb.0: # %entry
392; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
393; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
394; CHECK-NEXT:    ret
395entry:
396  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
397  ret <vscale x 1 x i16> %0
398}
399
400declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
401
402define <vscale x 2 x i16> @test_sf_vc_v_vvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
403; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf2:
404; CHECK:       # %bb.0: # %entry
405; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
406; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
407; CHECK-NEXT:    ret
408entry:
409  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
410  ret <vscale x 2 x i16> %0
411}
412
413declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
414
415define <vscale x 4 x i16> @test_sf_vc_v_vvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
416; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m1:
417; CHECK:       # %bb.0: # %entry
418; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
419; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
420; CHECK-NEXT:    ret
421entry:
422  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
423  ret <vscale x 4 x i16> %0
424}
425
426declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
427
428define <vscale x 8 x i16> @test_sf_vc_v_vvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
429; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m2:
430; CHECK:       # %bb.0: # %entry
431; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
432; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
433; CHECK-NEXT:    ret
434entry:
435  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
436  ret <vscale x 8 x i16> %0
437}
438
439declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
440
441define <vscale x 16 x i16> @test_sf_vc_v_vvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
442; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m4:
443; CHECK:       # %bb.0: # %entry
444; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
445; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
446; CHECK-NEXT:    ret
447entry:
448  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
449  ret <vscale x 16 x i16> %0
450}
451
452declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
453
454define <vscale x 32 x i16> @test_sf_vc_v_vvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
455; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m8:
456; CHECK:       # %bb.0: # %entry
457; CHECK-NEXT:    vl8re16.v v24, (a0)
458; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
459; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
460; CHECK-NEXT:    ret
461entry:
462  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
463  ret <vscale x 32 x i16> %0
464}
465
466declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
467
468define <vscale x 1 x i32> @test_sf_vc_v_vvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
469; CHECK-LABEL: test_sf_vc_v_vvv_se_e32mf2:
470; CHECK:       # %bb.0: # %entry
471; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
472; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
473; CHECK-NEXT:    ret
474entry:
475  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
476  ret <vscale x 1 x i32> %0
477}
478
479declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
480
481define <vscale x 2 x i32> @test_sf_vc_v_vvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
482; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m1:
483; CHECK:       # %bb.0: # %entry
484; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
485; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
486; CHECK-NEXT:    ret
487entry:
488  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
489  ret <vscale x 2 x i32> %0
490}
491
492declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
493
494define <vscale x 4 x i32> @test_sf_vc_v_vvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
495; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m2:
496; CHECK:       # %bb.0: # %entry
497; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
498; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
499; CHECK-NEXT:    ret
500entry:
501  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
502  ret <vscale x 4 x i32> %0
503}
504
505declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
506
507define <vscale x 8 x i32> @test_sf_vc_v_vvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
508; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m4:
509; CHECK:       # %bb.0: # %entry
510; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
511; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
512; CHECK-NEXT:    ret
513entry:
514  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
515  ret <vscale x 8 x i32> %0
516}
517
518declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
519
520define <vscale x 16 x i32> @test_sf_vc_v_vvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
521; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m8:
522; CHECK:       # %bb.0: # %entry
523; CHECK-NEXT:    vl8re32.v v24, (a0)
524; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
525; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
526; CHECK-NEXT:    ret
527entry:
528  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
529  ret <vscale x 16 x i32> %0
530}
531
532declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
533
534define <vscale x 1 x i64> @test_sf_vc_v_vvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
535; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m1:
536; CHECK:       # %bb.0: # %entry
537; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
538; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
539; CHECK-NEXT:    ret
540entry:
541  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
542  ret <vscale x 1 x i64> %0
543}
544
545declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
546
547define <vscale x 2 x i64> @test_sf_vc_v_vvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
548; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m2:
549; CHECK:       # %bb.0: # %entry
550; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
551; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
552; CHECK-NEXT:    ret
553entry:
554  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
555  ret <vscale x 2 x i64> %0
556}
557
558declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
559
560define <vscale x 4 x i64> @test_sf_vc_v_vvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
561; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m4:
562; CHECK:       # %bb.0: # %entry
563; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
564; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
565; CHECK-NEXT:    ret
566entry:
567  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
568  ret <vscale x 4 x i64> %0
569}
570
571declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
572
573define <vscale x 8 x i64> @test_sf_vc_v_vvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
574; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m8:
575; CHECK:       # %bb.0: # %entry
576; CHECK-NEXT:    vl8re64.v v24, (a0)
577; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
578; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
579; CHECK-NEXT:    ret
580entry:
581  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
582  ret <vscale x 8 x i64> %0
583}
584
585declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
586
587define <vscale x 1 x i8> @test_sf_vc_v_vvv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
588; CHECK-LABEL: test_sf_vc_v_vvv_e8mf8:
589; CHECK:       # %bb.0: # %entry
590; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
591; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
592; CHECK-NEXT:    ret
593entry:
594  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
595  ret <vscale x 1 x i8> %0
596}
597
598declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
599
600define <vscale x 2 x i8> @test_sf_vc_v_vvv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
601; CHECK-LABEL: test_sf_vc_v_vvv_e8mf4:
602; CHECK:       # %bb.0: # %entry
603; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
604; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
605; CHECK-NEXT:    ret
606entry:
607  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
608  ret <vscale x 2 x i8> %0
609}
610
611declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
612
613define <vscale x 4 x i8> @test_sf_vc_v_vvv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
614; CHECK-LABEL: test_sf_vc_v_vvv_e8mf2:
615; CHECK:       # %bb.0: # %entry
616; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
617; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
618; CHECK-NEXT:    ret
619entry:
620  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
621  ret <vscale x 4 x i8> %0
622}
623
624declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
625
626define <vscale x 8 x i8> @test_sf_vc_v_vvv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
627; CHECK-LABEL: test_sf_vc_v_vvv_e8m1:
628; CHECK:       # %bb.0: # %entry
629; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
630; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
631; CHECK-NEXT:    ret
632entry:
633  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
634  ret <vscale x 8 x i8> %0
635}
636
637declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
638
639define <vscale x 16 x i8> @test_sf_vc_v_vvv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
640; CHECK-LABEL: test_sf_vc_v_vvv_e8m2:
641; CHECK:       # %bb.0: # %entry
642; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
643; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
644; CHECK-NEXT:    ret
645entry:
646  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
647  ret <vscale x 16 x i8> %0
648}
649
650declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
651
652define <vscale x 32 x i8> @test_sf_vc_v_vvv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
653; CHECK-LABEL: test_sf_vc_v_vvv_e8m4:
654; CHECK:       # %bb.0: # %entry
655; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
656; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
657; CHECK-NEXT:    ret
658entry:
659  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
660  ret <vscale x 32 x i8> %0
661}
662
663declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
664
665define <vscale x 64 x i8> @test_sf_vc_v_vvv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
666; CHECK-LABEL: test_sf_vc_v_vvv_e8m8:
667; CHECK:       # %bb.0: # %entry
668; CHECK-NEXT:    vl8r.v v24, (a0)
669; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
670; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
671; CHECK-NEXT:    ret
672entry:
673  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
674  ret <vscale x 64 x i8> %0
675}
676
677declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
678
679define <vscale x 1 x i16> @test_sf_vc_v_vvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
680; CHECK-LABEL: test_sf_vc_v_vvv_e16mf4:
681; CHECK:       # %bb.0: # %entry
682; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
683; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
684; CHECK-NEXT:    ret
685entry:
686  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
687  ret <vscale x 1 x i16> %0
688}
689
690declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
691
692define <vscale x 2 x i16> @test_sf_vc_v_vvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
693; CHECK-LABEL: test_sf_vc_v_vvv_e16mf2:
694; CHECK:       # %bb.0: # %entry
695; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
696; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
697; CHECK-NEXT:    ret
698entry:
699  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
700  ret <vscale x 2 x i16> %0
701}
702
703declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
704
705define <vscale x 4 x i16> @test_sf_vc_v_vvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
706; CHECK-LABEL: test_sf_vc_v_vvv_e16m1:
707; CHECK:       # %bb.0: # %entry
708; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
709; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
710; CHECK-NEXT:    ret
711entry:
712  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
713  ret <vscale x 4 x i16> %0
714}
715
716declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
717
718define <vscale x 8 x i16> @test_sf_vc_v_vvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
719; CHECK-LABEL: test_sf_vc_v_vvv_e16m2:
720; CHECK:       # %bb.0: # %entry
721; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
722; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
723; CHECK-NEXT:    ret
724entry:
725  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
726  ret <vscale x 8 x i16> %0
727}
728
729declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
730
731define <vscale x 16 x i16> @test_sf_vc_v_vvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
732; CHECK-LABEL: test_sf_vc_v_vvv_e16m4:
733; CHECK:       # %bb.0: # %entry
734; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
735; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
736; CHECK-NEXT:    ret
737entry:
738  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
739  ret <vscale x 16 x i16> %0
740}
741
742declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
743
744define <vscale x 32 x i16> @test_sf_vc_v_vvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
745; CHECK-LABEL: test_sf_vc_v_vvv_e16m8:
746; CHECK:       # %bb.0: # %entry
747; CHECK-NEXT:    vl8re16.v v24, (a0)
748; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
749; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
750; CHECK-NEXT:    ret
751entry:
752  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
753  ret <vscale x 32 x i16> %0
754}
755
756declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
757
758define <vscale x 1 x i32> @test_sf_vc_v_vvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
759; CHECK-LABEL: test_sf_vc_v_vvv_e32mf2:
760; CHECK:       # %bb.0: # %entry
761; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
762; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
763; CHECK-NEXT:    ret
764entry:
765  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
766  ret <vscale x 1 x i32> %0
767}
768
769declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
770
771define <vscale x 2 x i32> @test_sf_vc_v_vvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
772; CHECK-LABEL: test_sf_vc_v_vvv_e32m1:
773; CHECK:       # %bb.0: # %entry
774; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
775; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
776; CHECK-NEXT:    ret
777entry:
778  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
779  ret <vscale x 2 x i32> %0
780}
781
782declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
783
784define <vscale x 4 x i32> @test_sf_vc_v_vvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
785; CHECK-LABEL: test_sf_vc_v_vvv_e32m2:
786; CHECK:       # %bb.0: # %entry
787; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
788; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
789; CHECK-NEXT:    ret
790entry:
791  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
792  ret <vscale x 4 x i32> %0
793}
794
795declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
796
797define <vscale x 8 x i32> @test_sf_vc_v_vvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
798; CHECK-LABEL: test_sf_vc_v_vvv_e32m4:
799; CHECK:       # %bb.0: # %entry
800; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
801; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
802; CHECK-NEXT:    ret
803entry:
804  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
805  ret <vscale x 8 x i32> %0
806}
807
808declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
809
810define <vscale x 16 x i32> @test_sf_vc_v_vvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
811; CHECK-LABEL: test_sf_vc_v_vvv_e32m8:
812; CHECK:       # %bb.0: # %entry
813; CHECK-NEXT:    vl8re32.v v24, (a0)
814; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
815; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
816; CHECK-NEXT:    ret
817entry:
818  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
819  ret <vscale x 16 x i32> %0
820}
821
822declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
823
824define <vscale x 1 x i64> @test_sf_vc_v_vvv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
825; CHECK-LABEL: test_sf_vc_v_vvv_e64m1:
826; CHECK:       # %bb.0: # %entry
827; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
828; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
829; CHECK-NEXT:    ret
830entry:
831  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
832  ret <vscale x 1 x i64> %0
833}
834
835declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
836
837define <vscale x 2 x i64> @test_sf_vc_v_vvv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
838; CHECK-LABEL: test_sf_vc_v_vvv_e64m2:
839; CHECK:       # %bb.0: # %entry
840; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
841; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
842; CHECK-NEXT:    ret
843entry:
844  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
845  ret <vscale x 2 x i64> %0
846}
847
848declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
849
850define <vscale x 4 x i64> @test_sf_vc_v_vvv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
851; CHECK-LABEL: test_sf_vc_v_vvv_e64m4:
852; CHECK:       # %bb.0: # %entry
853; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
854; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
855; CHECK-NEXT:    ret
856entry:
857  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
858  ret <vscale x 4 x i64> %0
859}
860
861declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
862
863define <vscale x 8 x i64> @test_sf_vc_v_vvv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
864; CHECK-LABEL: test_sf_vc_v_vvv_e64m8:
865; CHECK:       # %bb.0: # %entry
866; CHECK-NEXT:    vl8re64.v v24, (a0)
867; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
868; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
869; CHECK-NEXT:    ret
870entry:
871  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
872  ret <vscale x 8 x i64> %0
873}
874
875declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
876
877define void @test_sf_vc_xvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
878; CHECK-LABEL: test_sf_vc_xvv_se_e8mf8:
879; CHECK:       # %bb.0: # %entry
880; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
881; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
882; CHECK-NEXT:    ret
883entry:
884  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
885  ret void
886}
887
888declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
889
890define void @test_sf_vc_xvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
891; CHECK-LABEL: test_sf_vc_xvv_se_e8mf4:
892; CHECK:       # %bb.0: # %entry
893; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
894; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
895; CHECK-NEXT:    ret
896entry:
897  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
898  ret void
899}
900
901declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
902
903define void @test_sf_vc_xvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
904; CHECK-LABEL: test_sf_vc_xvv_se_e8mf2:
905; CHECK:       # %bb.0: # %entry
906; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
907; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
908; CHECK-NEXT:    ret
909entry:
910  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
911  ret void
912}
913
914declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
915
916define void @test_sf_vc_xvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
917; CHECK-LABEL: test_sf_vc_xvv_se_e8m1:
918; CHECK:       # %bb.0: # %entry
919; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
920; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
921; CHECK-NEXT:    ret
922entry:
923  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
924  ret void
925}
926
927declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
928
929define void @test_sf_vc_xvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
930; CHECK-LABEL: test_sf_vc_xvv_se_e8m2:
931; CHECK:       # %bb.0: # %entry
932; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
933; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
934; CHECK-NEXT:    ret
935entry:
936  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
937  ret void
938}
939
940declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
941
942define void @test_sf_vc_xvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
943; CHECK-LABEL: test_sf_vc_xvv_se_e8m4:
944; CHECK:       # %bb.0: # %entry
945; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
946; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
947; CHECK-NEXT:    ret
948entry:
949  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
950  ret void
951}
952
953declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
954
955define void @test_sf_vc_xvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
956; CHECK-LABEL: test_sf_vc_xvv_se_e8m8:
957; CHECK:       # %bb.0: # %entry
958; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
959; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
960; CHECK-NEXT:    ret
961entry:
962  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
963  ret void
964}
965
966declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
967
968define void @test_sf_vc_xvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
969; CHECK-LABEL: test_sf_vc_xvv_se_e16mf4:
970; CHECK:       # %bb.0: # %entry
971; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
972; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
973; CHECK-NEXT:    ret
974entry:
975  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
976  ret void
977}
978
979declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
980
981define void @test_sf_vc_xvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
982; CHECK-LABEL: test_sf_vc_xvv_se_e16mf2:
983; CHECK:       # %bb.0: # %entry
984; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
985; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
986; CHECK-NEXT:    ret
987entry:
988  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
989  ret void
990}
991
992declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
993
994define void @test_sf_vc_xvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
995; CHECK-LABEL: test_sf_vc_xvv_se_e16m1:
996; CHECK:       # %bb.0: # %entry
997; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
998; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
999; CHECK-NEXT:    ret
1000entry:
1001  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1002  ret void
1003}
1004
1005declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
1006
1007define void @test_sf_vc_xvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1008; CHECK-LABEL: test_sf_vc_xvv_se_e16m2:
1009; CHECK:       # %bb.0: # %entry
1010; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1011; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
1012; CHECK-NEXT:    ret
1013entry:
1014  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1015  ret void
1016}
1017
1018declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
1019
1020define void @test_sf_vc_xvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1021; CHECK-LABEL: test_sf_vc_xvv_se_e16m4:
1022; CHECK:       # %bb.0: # %entry
1023; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1024; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
1025; CHECK-NEXT:    ret
1026entry:
1027  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1028  ret void
1029}
1030
1031declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
1032
1033define void @test_sf_vc_xvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1034; CHECK-LABEL: test_sf_vc_xvv_se_e16m8:
1035; CHECK:       # %bb.0: # %entry
1036; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
1037; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
1038; CHECK-NEXT:    ret
1039entry:
1040  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
1041  ret void
1042}
1043
1044declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
1045
1046define void @test_sf_vc_xvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1047; CHECK-LABEL: test_sf_vc_xvv_se_e32mf2:
1048; CHECK:       # %bb.0: # %entry
1049; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1050; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
1051; CHECK-NEXT:    ret
1052entry:
1053  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1054  ret void
1055}
1056
1057declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
1058
1059define void @test_sf_vc_xvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1060; CHECK-LABEL: test_sf_vc_xvv_se_e32m1:
1061; CHECK:       # %bb.0: # %entry
1062; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1063; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
1064; CHECK-NEXT:    ret
1065entry:
1066  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1067  ret void
1068}
1069
1070declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
1071
1072define void @test_sf_vc_xvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1073; CHECK-LABEL: test_sf_vc_xvv_se_e32m2:
1074; CHECK:       # %bb.0: # %entry
1075; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1076; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
1077; CHECK-NEXT:    ret
1078entry:
1079  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1080  ret void
1081}
1082
1083declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
1084
1085define void @test_sf_vc_xvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1086; CHECK-LABEL: test_sf_vc_xvv_se_e32m4:
1087; CHECK:       # %bb.0: # %entry
1088; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1089; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
1090; CHECK-NEXT:    ret
1091entry:
1092  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1093  ret void
1094}
1095
1096declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
1097
1098define void @test_sf_vc_xvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1099; CHECK-LABEL: test_sf_vc_xvv_se_e32m8:
1100; CHECK:       # %bb.0: # %entry
1101; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1102; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
1103; CHECK-NEXT:    ret
1104entry:
1105  tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
1106  ret void
1107}
1108
1109declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
1110
1111define <vscale x 1 x i8> @test_sf_vc_v_xvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1112; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf8:
1113; CHECK:       # %bb.0: # %entry
1114; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
1115; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1116; CHECK-NEXT:    ret
1117entry:
1118  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
1119  ret <vscale x 1 x i8> %0
1120}
1121
1122declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
1123
1124define <vscale x 2 x i8> @test_sf_vc_v_xvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1125; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf4:
1126; CHECK:       # %bb.0: # %entry
1127; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
1128; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1129; CHECK-NEXT:    ret
1130entry:
1131  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
1132  ret <vscale x 2 x i8> %0
1133}
1134
1135declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
1136
1137define <vscale x 4 x i8> @test_sf_vc_v_xvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1138; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf2:
1139; CHECK:       # %bb.0: # %entry
1140; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
1141; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1142; CHECK-NEXT:    ret
1143entry:
1144  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
1145  ret <vscale x 4 x i8> %0
1146}
1147
1148declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
1149
1150define <vscale x 8 x i8> @test_sf_vc_v_xvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1151; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m1:
1152; CHECK:       # %bb.0: # %entry
1153; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
1154; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1155; CHECK-NEXT:    ret
1156entry:
1157  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
1158  ret <vscale x 8 x i8> %0
1159}
1160
1161declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
1162
1163define <vscale x 16 x i8> @test_sf_vc_v_xvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1164; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m2:
1165; CHECK:       # %bb.0: # %entry
1166; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
1167; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
1168; CHECK-NEXT:    ret
1169entry:
1170  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
1171  ret <vscale x 16 x i8> %0
1172}
1173
1174declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
1175
1176define <vscale x 32 x i8> @test_sf_vc_v_xvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1177; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m4:
1178; CHECK:       # %bb.0: # %entry
1179; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
1180; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
1181; CHECK-NEXT:    ret
1182entry:
1183  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
1184  ret <vscale x 32 x i8> %0
1185}
1186
1187declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
1188
1189define <vscale x 64 x i8> @test_sf_vc_v_xvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1190; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m8:
1191; CHECK:       # %bb.0: # %entry
1192; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
1193; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
1194; CHECK-NEXT:    ret
1195entry:
1196  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
1197  ret <vscale x 64 x i8> %0
1198}
1199
1200declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
1201
1202define <vscale x 1 x i16> @test_sf_vc_v_xvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1203; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf4:
1204; CHECK:       # %bb.0: # %entry
1205; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
1206; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1207; CHECK-NEXT:    ret
1208entry:
1209  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
1210  ret <vscale x 1 x i16> %0
1211}
1212
1213declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
1214
1215define <vscale x 2 x i16> @test_sf_vc_v_xvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1216; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf2:
1217; CHECK:       # %bb.0: # %entry
1218; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
1219; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1220; CHECK-NEXT:    ret
1221entry:
1222  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
1223  ret <vscale x 2 x i16> %0
1224}
1225
1226declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
1227
1228define <vscale x 4 x i16> @test_sf_vc_v_xvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1229; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m1:
1230; CHECK:       # %bb.0: # %entry
1231; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
1232; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1233; CHECK-NEXT:    ret
1234entry:
1235  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1236  ret <vscale x 4 x i16> %0
1237}
1238
1239declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
1240
1241define <vscale x 8 x i16> @test_sf_vc_v_xvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1242; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m2:
1243; CHECK:       # %bb.0: # %entry
1244; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
1245; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
1246; CHECK-NEXT:    ret
1247entry:
1248  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1249  ret <vscale x 8 x i16> %0
1250}
1251
1252declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
1253
1254define <vscale x 16 x i16> @test_sf_vc_v_xvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1255; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m4:
1256; CHECK:       # %bb.0: # %entry
1257; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
1258; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
1259; CHECK-NEXT:    ret
1260entry:
1261  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1262  ret <vscale x 16 x i16> %0
1263}
1264
1265declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
1266
1267define <vscale x 32 x i16> @test_sf_vc_v_xvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1268; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m8:
1269; CHECK:       # %bb.0: # %entry
1270; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
1271; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
1272; CHECK-NEXT:    ret
1273entry:
1274  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
1275  ret <vscale x 32 x i16> %0
1276}
1277
1278declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
1279
1280define <vscale x 1 x i32> @test_sf_vc_v_xvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1281; CHECK-LABEL: test_sf_vc_v_xvv_se_e32mf2:
1282; CHECK:       # %bb.0: # %entry
1283; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
1284; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1285; CHECK-NEXT:    ret
1286entry:
1287  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1288  ret <vscale x 1 x i32> %0
1289}
1290
1291declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
1292
1293define <vscale x 2 x i32> @test_sf_vc_v_xvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1294; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m1:
1295; CHECK:       # %bb.0: # %entry
1296; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
1297; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1298; CHECK-NEXT:    ret
1299entry:
1300  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1301  ret <vscale x 2 x i32> %0
1302}
1303
1304declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
1305
1306define <vscale x 4 x i32> @test_sf_vc_v_xvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1307; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m2:
1308; CHECK:       # %bb.0: # %entry
1309; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
1310; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
1311; CHECK-NEXT:    ret
1312entry:
1313  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1314  ret <vscale x 4 x i32> %0
1315}
1316
1317declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
1318
1319define <vscale x 8 x i32> @test_sf_vc_v_xvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1320; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m4:
1321; CHECK:       # %bb.0: # %entry
1322; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
1323; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
1324; CHECK-NEXT:    ret
1325entry:
1326  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1327  ret <vscale x 8 x i32> %0
1328}
1329
1330declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
1331
1332define <vscale x 16 x i32> @test_sf_vc_v_xvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1333; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m8:
1334; CHECK:       # %bb.0: # %entry
1335; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
1336; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
1337; CHECK-NEXT:    ret
1338entry:
1339  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
1340  ret <vscale x 16 x i32> %0
1341}
1342
1343declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
1344
1345define <vscale x 1 x i8> @test_sf_vc_v_xvv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1346; CHECK-LABEL: test_sf_vc_v_xvv_e8mf8:
1347; CHECK:       # %bb.0: # %entry
1348; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
1349; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1350; CHECK-NEXT:    ret
1351entry:
1352  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
1353  ret <vscale x 1 x i8> %0
1354}
1355
1356declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
1357
1358define <vscale x 2 x i8> @test_sf_vc_v_xvv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1359; CHECK-LABEL: test_sf_vc_v_xvv_e8mf4:
1360; CHECK:       # %bb.0: # %entry
1361; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
1362; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1363; CHECK-NEXT:    ret
1364entry:
1365  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
1366  ret <vscale x 2 x i8> %0
1367}
1368
1369declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
1370
1371define <vscale x 4 x i8> @test_sf_vc_v_xvv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1372; CHECK-LABEL: test_sf_vc_v_xvv_e8mf2:
1373; CHECK:       # %bb.0: # %entry
1374; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
1375; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1376; CHECK-NEXT:    ret
1377entry:
1378  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
1379  ret <vscale x 4 x i8> %0
1380}
1381
1382declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
1383
1384define <vscale x 8 x i8> @test_sf_vc_v_xvv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1385; CHECK-LABEL: test_sf_vc_v_xvv_e8m1:
1386; CHECK:       # %bb.0: # %entry
1387; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
1388; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1389; CHECK-NEXT:    ret
1390entry:
1391  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
1392  ret <vscale x 8 x i8> %0
1393}
1394
1395declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
1396
1397define <vscale x 16 x i8> @test_sf_vc_v_xvv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1398; CHECK-LABEL: test_sf_vc_v_xvv_e8m2:
1399; CHECK:       # %bb.0: # %entry
1400; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
1401; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
1402; CHECK-NEXT:    ret
1403entry:
1404  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
1405  ret <vscale x 16 x i8> %0
1406}
1407
1408declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
1409
1410define <vscale x 32 x i8> @test_sf_vc_v_xvv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1411; CHECK-LABEL: test_sf_vc_v_xvv_e8m4:
1412; CHECK:       # %bb.0: # %entry
1413; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
1414; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
1415; CHECK-NEXT:    ret
1416entry:
1417  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
1418  ret <vscale x 32 x i8> %0
1419}
1420
1421declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
1422
1423define <vscale x 64 x i8> @test_sf_vc_v_xvv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1424; CHECK-LABEL: test_sf_vc_v_xvv_e8m8:
1425; CHECK:       # %bb.0: # %entry
1426; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
1427; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
1428; CHECK-NEXT:    ret
1429entry:
1430  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
1431  ret <vscale x 64 x i8> %0
1432}
1433
1434declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
1435
1436define <vscale x 1 x i16> @test_sf_vc_v_xvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1437; CHECK-LABEL: test_sf_vc_v_xvv_e16mf4:
1438; CHECK:       # %bb.0: # %entry
1439; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
1440; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1441; CHECK-NEXT:    ret
1442entry:
1443  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
1444  ret <vscale x 1 x i16> %0
1445}
1446
1447declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
1448
1449define <vscale x 2 x i16> @test_sf_vc_v_xvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1450; CHECK-LABEL: test_sf_vc_v_xvv_e16mf2:
1451; CHECK:       # %bb.0: # %entry
1452; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
1453; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1454; CHECK-NEXT:    ret
1455entry:
1456  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
1457  ret <vscale x 2 x i16> %0
1458}
1459
1460declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
1461
1462define <vscale x 4 x i16> @test_sf_vc_v_xvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1463; CHECK-LABEL: test_sf_vc_v_xvv_e16m1:
1464; CHECK:       # %bb.0: # %entry
1465; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
1466; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1467; CHECK-NEXT:    ret
1468entry:
1469  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1470  ret <vscale x 4 x i16> %0
1471}
1472
1473declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
1474
1475define <vscale x 8 x i16> @test_sf_vc_v_xvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1476; CHECK-LABEL: test_sf_vc_v_xvv_e16m2:
1477; CHECK:       # %bb.0: # %entry
1478; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
1479; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
1480; CHECK-NEXT:    ret
1481entry:
1482  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1483  ret <vscale x 8 x i16> %0
1484}
1485
1486declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
1487
1488define <vscale x 16 x i16> @test_sf_vc_v_xvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1489; CHECK-LABEL: test_sf_vc_v_xvv_e16m4:
1490; CHECK:       # %bb.0: # %entry
1491; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
1492; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
1493; CHECK-NEXT:    ret
1494entry:
1495  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1496  ret <vscale x 16 x i16> %0
1497}
1498
1499declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
1500
1501define <vscale x 32 x i16> @test_sf_vc_v_xvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1502; CHECK-LABEL: test_sf_vc_v_xvv_e16m8:
1503; CHECK:       # %bb.0: # %entry
1504; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
1505; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
1506; CHECK-NEXT:    ret
1507entry:
1508  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
1509  ret <vscale x 32 x i16> %0
1510}
1511
1512declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
1513
1514define <vscale x 1 x i32> @test_sf_vc_v_xvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1515; CHECK-LABEL: test_sf_vc_v_xvv_e32mf2:
1516; CHECK:       # %bb.0: # %entry
1517; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
1518; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1519; CHECK-NEXT:    ret
1520entry:
1521  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1522  ret <vscale x 1 x i32> %0
1523}
1524
1525declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
1526
1527define <vscale x 2 x i32> @test_sf_vc_v_xvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1528; CHECK-LABEL: test_sf_vc_v_xvv_e32m1:
1529; CHECK:       # %bb.0: # %entry
1530; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
1531; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
1532; CHECK-NEXT:    ret
1533entry:
1534  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1535  ret <vscale x 2 x i32> %0
1536}
1537
1538declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
1539
1540define <vscale x 4 x i32> @test_sf_vc_v_xvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1541; CHECK-LABEL: test_sf_vc_v_xvv_e32m2:
1542; CHECK:       # %bb.0: # %entry
1543; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
1544; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
1545; CHECK-NEXT:    ret
1546entry:
1547  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1548  ret <vscale x 4 x i32> %0
1549}
1550
1551declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
1552
1553define <vscale x 8 x i32> @test_sf_vc_v_xvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1554; CHECK-LABEL: test_sf_vc_v_xvv_e32m4:
1555; CHECK:       # %bb.0: # %entry
1556; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
1557; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
1558; CHECK-NEXT:    ret
1559entry:
1560  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1561  ret <vscale x 8 x i32> %0
1562}
1563
1564declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
1565
1566define <vscale x 16 x i32> @test_sf_vc_v_xvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1567; CHECK-LABEL: test_sf_vc_v_xvv_e32m8:
1568; CHECK:       # %bb.0: # %entry
1569; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
1570; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
1571; CHECK-NEXT:    ret
1572entry:
1573  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
1574  ret <vscale x 16 x i32> %0
1575}
1576
1577declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
1578
1579define void @test_sf_vc_ivv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
1580; CHECK-LABEL: test_sf_vc_ivv_se_e8mf8:
1581; CHECK:       # %bb.0: # %entry
1582; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1583; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
1584; CHECK-NEXT:    ret
1585entry:
1586  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1587  ret void
1588}
1589
1590declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
1591
1592define void @test_sf_vc_ivv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
1593; CHECK-LABEL: test_sf_vc_ivv_se_e8mf4:
1594; CHECK:       # %bb.0: # %entry
1595; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
1596; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
1597; CHECK-NEXT:    ret
1598entry:
1599  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1600  ret void
1601}
1602
1603declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
1604
1605define void @test_sf_vc_ivv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
1606; CHECK-LABEL: test_sf_vc_ivv_se_e8mf2:
1607; CHECK:       # %bb.0: # %entry
1608; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
1609; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
1610; CHECK-NEXT:    ret
1611entry:
1612  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1613  ret void
1614}
1615
1616declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
1617
1618define void @test_sf_vc_ivv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
1619; CHECK-LABEL: test_sf_vc_ivv_se_e8m1:
1620; CHECK:       # %bb.0: # %entry
1621; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
1622; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
1623; CHECK-NEXT:    ret
1624entry:
1625  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1626  ret void
1627}
1628
1629declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
1630
1631define void @test_sf_vc_ivv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
1632; CHECK-LABEL: test_sf_vc_ivv_se_e8m2:
1633; CHECK:       # %bb.0: # %entry
1634; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
1635; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
1636; CHECK-NEXT:    ret
1637entry:
1638  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1639  ret void
1640}
1641
1642declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
1643
1644define void @test_sf_vc_ivv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
1645; CHECK-LABEL: test_sf_vc_ivv_se_e8m4:
1646; CHECK:       # %bb.0: # %entry
1647; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
1648; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
1649; CHECK-NEXT:    ret
1650entry:
1651  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1652  ret void
1653}
1654
1655declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
1656
1657define void @test_sf_vc_ivv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
1658; CHECK-LABEL: test_sf_vc_ivv_se_e8m8:
1659; CHECK:       # %bb.0: # %entry
1660; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
1661; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
1662; CHECK-NEXT:    ret
1663entry:
1664  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
1665  ret void
1666}
1667
1668declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen)
1669
1670define void @test_sf_vc_ivv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
1671; CHECK-LABEL: test_sf_vc_ivv_se_e16mf4:
1672; CHECK:       # %bb.0: # %entry
1673; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1674; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
1675; CHECK-NEXT:    ret
1676entry:
1677  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1678  ret void
1679}
1680
1681declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
1682
1683define void @test_sf_vc_ivv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
1684; CHECK-LABEL: test_sf_vc_ivv_se_e16mf2:
1685; CHECK:       # %bb.0: # %entry
1686; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
1687; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
1688; CHECK-NEXT:    ret
1689entry:
1690  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1691  ret void
1692}
1693
1694declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
1695
1696define void @test_sf_vc_ivv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
1697; CHECK-LABEL: test_sf_vc_ivv_se_e16m1:
1698; CHECK:       # %bb.0: # %entry
1699; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
1700; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
1701; CHECK-NEXT:    ret
1702entry:
1703  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1704  ret void
1705}
1706
1707declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
1708
1709define void @test_sf_vc_ivv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
1710; CHECK-LABEL: test_sf_vc_ivv_se_e16m2:
1711; CHECK:       # %bb.0: # %entry
1712; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
1713; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
1714; CHECK-NEXT:    ret
1715entry:
1716  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
1717  ret void
1718}
1719
1720declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
1721
1722define void @test_sf_vc_ivv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
1723; CHECK-LABEL: test_sf_vc_ivv_se_e16m4:
1724; CHECK:       # %bb.0: # %entry
1725; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
1726; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
1727; CHECK-NEXT:    ret
1728entry:
1729  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
1730  ret void
1731}
1732
1733declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
1734
1735define void @test_sf_vc_ivv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
1736; CHECK-LABEL: test_sf_vc_ivv_se_e16m8:
1737; CHECK:       # %bb.0: # %entry
1738; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
1739; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
1740; CHECK-NEXT:    ret
1741entry:
1742  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
1743  ret void
1744}
1745
1746declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen, iXLen)
1747
1748define void @test_sf_vc_ivv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
1749; CHECK-LABEL: test_sf_vc_ivv_se_e32mf2:
1750; CHECK:       # %bb.0: # %entry
1751; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
1752; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
1753; CHECK-NEXT:    ret
1754entry:
1755  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
1756  ret void
1757}
1758
1759declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen)
1760
1761define void @test_sf_vc_ivv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
1762; CHECK-LABEL: test_sf_vc_ivv_se_e32m1:
1763; CHECK:       # %bb.0: # %entry
1764; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1765; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
1766; CHECK-NEXT:    ret
1767entry:
1768  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
1769  ret void
1770}
1771
1772declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen, iXLen)
1773
1774define void @test_sf_vc_ivv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
1775; CHECK-LABEL: test_sf_vc_ivv_se_e32m2:
1776; CHECK:       # %bb.0: # %entry
1777; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1778; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
1779; CHECK-NEXT:    ret
1780entry:
1781  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
1782  ret void
1783}
1784
1785declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen, iXLen)
1786
1787define void @test_sf_vc_ivv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
1788; CHECK-LABEL: test_sf_vc_ivv_se_e32m4:
1789; CHECK:       # %bb.0: # %entry
1790; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1791; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
1792; CHECK-NEXT:    ret
1793entry:
1794  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
1795  ret void
1796}
1797
1798declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen, iXLen)
1799
1800define void @test_sf_vc_ivv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
1801; CHECK-LABEL: test_sf_vc_ivv_se_e32m8:
1802; CHECK:       # %bb.0: # %entry
1803; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1804; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
1805; CHECK-NEXT:    ret
1806entry:
1807  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
1808  ret void
1809}
1810
1811declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen, iXLen)
1812
1813define void @test_sf_vc_ivv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
1814; CHECK-LABEL: test_sf_vc_ivv_se_e64m1:
1815; CHECK:       # %bb.0: # %entry
1816; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1817; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 10
1818; CHECK-NEXT:    ret
1819entry:
1820  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
1821  ret void
1822}
1823
1824declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen)
1825
1826define void @test_sf_vc_ivv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
1827; CHECK-LABEL: test_sf_vc_ivv_se_e64m2:
1828; CHECK:       # %bb.0: # %entry
1829; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1830; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 10
1831; CHECK-NEXT:    ret
1832entry:
1833  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
1834  ret void
1835}
1836
1837declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen, iXLen)
1838
1839define void @test_sf_vc_ivv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
1840; CHECK-LABEL: test_sf_vc_ivv_se_e64m4:
1841; CHECK:       # %bb.0: # %entry
1842; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1843; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 10
1844; CHECK-NEXT:    ret
1845entry:
1846  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
1847  ret void
1848}
1849
1850declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen, iXLen)
1851
1852define void @test_sf_vc_ivv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
1853; CHECK-LABEL: test_sf_vc_ivv_se_e64m8:
1854; CHECK:       # %bb.0: # %entry
1855; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1856; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 10
1857; CHECK-NEXT:    ret
1858entry:
1859  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
1860  ret void
1861}
1862
1863declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen, iXLen)
1864
1865define <vscale x 1 x i8> @test_sf_vc_v_ivv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
1866; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf8:
1867; CHECK:       # %bb.0: # %entry
1868; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1869; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
1870; CHECK-NEXT:    ret
1871entry:
1872  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1873  ret <vscale x 1 x i8> %0
1874}
1875
1876declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
1877
1878define <vscale x 2 x i8> @test_sf_vc_v_ivv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
1879; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf4:
1880; CHECK:       # %bb.0: # %entry
1881; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
1882; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
1883; CHECK-NEXT:    ret
1884entry:
1885  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1886  ret <vscale x 2 x i8> %0
1887}
1888
1889declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
1890
1891define <vscale x 4 x i8> @test_sf_vc_v_ivv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
1892; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf2:
1893; CHECK:       # %bb.0: # %entry
1894; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
1895; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
1896; CHECK-NEXT:    ret
1897entry:
1898  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1899  ret <vscale x 4 x i8> %0
1900}
1901
1902declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
1903
1904define <vscale x 8 x i8> @test_sf_vc_v_ivv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
1905; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m1:
1906; CHECK:       # %bb.0: # %entry
1907; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
1908; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
1909; CHECK-NEXT:    ret
1910entry:
1911  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1912  ret <vscale x 8 x i8> %0
1913}
1914
1915declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
1916
1917define <vscale x 16 x i8> @test_sf_vc_v_ivv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
1918; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m2:
1919; CHECK:       # %bb.0: # %entry
1920; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
1921; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
1922; CHECK-NEXT:    ret
1923entry:
1924  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1925  ret <vscale x 16 x i8> %0
1926}
1927
1928declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
1929
1930define <vscale x 32 x i8> @test_sf_vc_v_ivv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
1931; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m4:
1932; CHECK:       # %bb.0: # %entry
1933; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
1934; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
1935; CHECK-NEXT:    ret
1936entry:
1937  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1938  ret <vscale x 32 x i8> %0
1939}
1940
1941declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
1942
1943define <vscale x 64 x i8> @test_sf_vc_v_ivv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
1944; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m8:
1945; CHECK:       # %bb.0: # %entry
1946; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
1947; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
1948; CHECK-NEXT:    ret
1949entry:
1950  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
1951  ret <vscale x 64 x i8> %0
1952}
1953
1954declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen)
1955
1956define <vscale x 1 x i16> @test_sf_vc_v_ivv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
1957; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf4:
1958; CHECK:       # %bb.0: # %entry
1959; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
1960; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
1961; CHECK-NEXT:    ret
1962entry:
1963  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1964  ret <vscale x 1 x i16> %0
1965}
1966
1967declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
1968
1969define <vscale x 2 x i16> @test_sf_vc_v_ivv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
1970; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf2:
1971; CHECK:       # %bb.0: # %entry
1972; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
1973; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
1974; CHECK-NEXT:    ret
1975entry:
1976  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1977  ret <vscale x 2 x i16> %0
1978}
1979
1980declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
1981
1982define <vscale x 4 x i16> @test_sf_vc_v_ivv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
1983; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m1:
1984; CHECK:       # %bb.0: # %entry
1985; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
1986; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
1987; CHECK-NEXT:    ret
1988entry:
1989  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1990  ret <vscale x 4 x i16> %0
1991}
1992
1993declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
1994
1995define <vscale x 8 x i16> @test_sf_vc_v_ivv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
1996; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m2:
1997; CHECK:       # %bb.0: # %entry
1998; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
1999; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
2000; CHECK-NEXT:    ret
2001entry:
2002  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
2003  ret <vscale x 8 x i16> %0
2004}
2005
2006declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
2007
2008define <vscale x 16 x i16> @test_sf_vc_v_ivv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
2009; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m4:
2010; CHECK:       # %bb.0: # %entry
2011; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
2012; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
2013; CHECK-NEXT:    ret
2014entry:
2015  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
2016  ret <vscale x 16 x i16> %0
2017}
2018
2019declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
2020
2021define <vscale x 32 x i16> @test_sf_vc_v_ivv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
2022; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m8:
2023; CHECK:       # %bb.0: # %entry
2024; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
2025; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
2026; CHECK-NEXT:    ret
2027entry:
2028  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
2029  ret <vscale x 32 x i16> %0
2030}
2031
2032declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen, iXLen)
2033
2034define <vscale x 1 x i32> @test_sf_vc_v_ivv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
2035; CHECK-LABEL: test_sf_vc_v_ivv_se_e32mf2:
2036; CHECK:       # %bb.0: # %entry
2037; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
2038; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2039; CHECK-NEXT:    ret
2040entry:
2041  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
2042  ret <vscale x 1 x i32> %0
2043}
2044
2045declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen)
2046
2047define <vscale x 2 x i32> @test_sf_vc_v_ivv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
2048; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m1:
2049; CHECK:       # %bb.0: # %entry
2050; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
2051; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2052; CHECK-NEXT:    ret
2053entry:
2054  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
2055  ret <vscale x 2 x i32> %0
2056}
2057
2058declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen, iXLen)
2059
2060define <vscale x 4 x i32> @test_sf_vc_v_ivv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
2061; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m2:
2062; CHECK:       # %bb.0: # %entry
2063; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
2064; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
2065; CHECK-NEXT:    ret
2066entry:
2067  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
2068  ret <vscale x 4 x i32> %0
2069}
2070
2071declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen, iXLen)
2072
2073define <vscale x 8 x i32> @test_sf_vc_v_ivv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
2074; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m4:
2075; CHECK:       # %bb.0: # %entry
2076; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
2077; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
2078; CHECK-NEXT:    ret
2079entry:
2080  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
2081  ret <vscale x 8 x i32> %0
2082}
2083
2084declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen, iXLen)
2085
2086define <vscale x 16 x i32> @test_sf_vc_v_ivv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
2087; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m8:
2088; CHECK:       # %bb.0: # %entry
2089; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
2090; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
2091; CHECK-NEXT:    ret
2092entry:
2093  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
2094  ret <vscale x 16 x i32> %0
2095}
2096
2097declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen, iXLen)
2098
2099define <vscale x 1 x i64> @test_sf_vc_v_ivv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
2100; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m1:
2101; CHECK:       # %bb.0: # %entry
2102; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
2103; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2104; CHECK-NEXT:    ret
2105entry:
2106  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
2107  ret <vscale x 1 x i64> %0
2108}
2109
2110declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen)
2111
2112define <vscale x 2 x i64> @test_sf_vc_v_ivv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
2113; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m2:
2114; CHECK:       # %bb.0: # %entry
2115; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
2116; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
2117; CHECK-NEXT:    ret
2118entry:
2119  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
2120  ret <vscale x 2 x i64> %0
2121}
2122
2123declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen, iXLen)
2124
2125define <vscale x 4 x i64> @test_sf_vc_v_ivv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
2126; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m4:
2127; CHECK:       # %bb.0: # %entry
2128; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
2129; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
2130; CHECK-NEXT:    ret
2131entry:
2132  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
2133  ret <vscale x 4 x i64> %0
2134}
2135
2136declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen, iXLen)
2137
2138define <vscale x 8 x i64> @test_sf_vc_v_ivv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
2139; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m8:
2140; CHECK:       # %bb.0: # %entry
2141; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
2142; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
2143; CHECK-NEXT:    ret
2144entry:
2145  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
2146  ret <vscale x 8 x i64> %0
2147}
2148
2149declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen, iXLen)
2150
2151define <vscale x 1 x i8> @test_sf_vc_v_ivv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
2152; CHECK-LABEL: test_sf_vc_v_ivv_e8mf8:
2153; CHECK:       # %bb.0: # %entry
2154; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
2155; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2156; CHECK-NEXT:    ret
2157entry:
2158  %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
2159  ret <vscale x 1 x i8> %0
2160}
2161
2162declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
2163
2164define <vscale x 2 x i8> @test_sf_vc_v_ivv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
2165; CHECK-LABEL: test_sf_vc_v_ivv_e8mf4:
2166; CHECK:       # %bb.0: # %entry
2167; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
2168; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2169; CHECK-NEXT:    ret
2170entry:
2171  %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
2172  ret <vscale x 2 x i8> %0
2173}
2174
2175declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
2176
2177define <vscale x 4 x i8> @test_sf_vc_v_ivv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
2178; CHECK-LABEL: test_sf_vc_v_ivv_e8mf2:
2179; CHECK:       # %bb.0: # %entry
2180; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
2181; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2182; CHECK-NEXT:    ret
2183entry:
2184  %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
2185  ret <vscale x 4 x i8> %0
2186}
2187
2188declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
2189
2190define <vscale x 8 x i8> @test_sf_vc_v_ivv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
2191; CHECK-LABEL: test_sf_vc_v_ivv_e8m1:
2192; CHECK:       # %bb.0: # %entry
2193; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
2194; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2195; CHECK-NEXT:    ret
2196entry:
2197  %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
2198  ret <vscale x 8 x i8> %0
2199}
2200
2201declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
2202
2203define <vscale x 16 x i8> @test_sf_vc_v_ivv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
2204; CHECK-LABEL: test_sf_vc_v_ivv_e8m2:
2205; CHECK:       # %bb.0: # %entry
2206; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
2207; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
2208; CHECK-NEXT:    ret
2209entry:
2210  %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
2211  ret <vscale x 16 x i8> %0
2212}
2213
2214declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
2215
2216define <vscale x 32 x i8> @test_sf_vc_v_ivv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
2217; CHECK-LABEL: test_sf_vc_v_ivv_e8m4:
2218; CHECK:       # %bb.0: # %entry
2219; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
2220; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
2221; CHECK-NEXT:    ret
2222entry:
2223  %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
2224  ret <vscale x 32 x i8> %0
2225}
2226
2227declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
2228
2229define <vscale x 64 x i8> @test_sf_vc_v_ivv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
2230; CHECK-LABEL: test_sf_vc_v_ivv_e8m8:
2231; CHECK:       # %bb.0: # %entry
2232; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
2233; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
2234; CHECK-NEXT:    ret
2235entry:
2236  %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
2237  ret <vscale x 64 x i8> %0
2238}
2239
2240declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen)
2241
2242define <vscale x 1 x i16> @test_sf_vc_v_ivv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
2243; CHECK-LABEL: test_sf_vc_v_ivv_e16mf4:
2244; CHECK:       # %bb.0: # %entry
2245; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2246; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2247; CHECK-NEXT:    ret
2248entry:
2249  %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
2250  ret <vscale x 1 x i16> %0
2251}
2252
2253declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
2254
2255define <vscale x 2 x i16> @test_sf_vc_v_ivv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
2256; CHECK-LABEL: test_sf_vc_v_ivv_e16mf2:
2257; CHECK:       # %bb.0: # %entry
2258; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
2259; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2260; CHECK-NEXT:    ret
2261entry:
2262  %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
2263  ret <vscale x 2 x i16> %0
2264}
2265
2266declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
2267
2268define <vscale x 4 x i16> @test_sf_vc_v_ivv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
2269; CHECK-LABEL: test_sf_vc_v_ivv_e16m1:
2270; CHECK:       # %bb.0: # %entry
2271; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
2272; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2273; CHECK-NEXT:    ret
2274entry:
2275  %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
2276  ret <vscale x 4 x i16> %0
2277}
2278
2279declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
2280
2281define <vscale x 8 x i16> @test_sf_vc_v_ivv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
2282; CHECK-LABEL: test_sf_vc_v_ivv_e16m2:
2283; CHECK:       # %bb.0: # %entry
2284; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
2285; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
2286; CHECK-NEXT:    ret
2287entry:
2288  %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
2289  ret <vscale x 8 x i16> %0
2290}
2291
2292declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
2293
2294define <vscale x 16 x i16> @test_sf_vc_v_ivv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
2295; CHECK-LABEL: test_sf_vc_v_ivv_e16m4:
2296; CHECK:       # %bb.0: # %entry
2297; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
2298; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
2299; CHECK-NEXT:    ret
2300entry:
2301  %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
2302  ret <vscale x 16 x i16> %0
2303}
2304
2305declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
2306
2307define <vscale x 32 x i16> @test_sf_vc_v_ivv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
2308; CHECK-LABEL: test_sf_vc_v_ivv_e16m8:
2309; CHECK:       # %bb.0: # %entry
2310; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
2311; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
2312; CHECK-NEXT:    ret
2313entry:
2314  %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
2315  ret <vscale x 32 x i16> %0
2316}
2317
2318declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen, iXLen)
2319
2320define <vscale x 1 x i32> @test_sf_vc_v_ivv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
2321; CHECK-LABEL: test_sf_vc_v_ivv_e32mf2:
2322; CHECK:       # %bb.0: # %entry
2323; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
2324; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2325; CHECK-NEXT:    ret
2326entry:
2327  %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
2328  ret <vscale x 1 x i32> %0
2329}
2330
2331declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen)
2332
2333define <vscale x 2 x i32> @test_sf_vc_v_ivv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
2334; CHECK-LABEL: test_sf_vc_v_ivv_e32m1:
2335; CHECK:       # %bb.0: # %entry
2336; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
2337; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2338; CHECK-NEXT:    ret
2339entry:
2340  %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
2341  ret <vscale x 2 x i32> %0
2342}
2343
2344declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen, iXLen)
2345
2346define <vscale x 4 x i32> @test_sf_vc_v_ivv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
2347; CHECK-LABEL: test_sf_vc_v_ivv_e32m2:
2348; CHECK:       # %bb.0: # %entry
2349; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
2350; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
2351; CHECK-NEXT:    ret
2352entry:
2353  %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
2354  ret <vscale x 4 x i32> %0
2355}
2356
2357declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen, iXLen)
2358
2359define <vscale x 8 x i32> @test_sf_vc_v_ivv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
2360; CHECK-LABEL: test_sf_vc_v_ivv_e32m4:
2361; CHECK:       # %bb.0: # %entry
2362; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
2363; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
2364; CHECK-NEXT:    ret
2365entry:
2366  %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
2367  ret <vscale x 8 x i32> %0
2368}
2369
2370declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen, iXLen)
2371
2372define <vscale x 16 x i32> @test_sf_vc_v_ivv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
2373; CHECK-LABEL: test_sf_vc_v_ivv_e32m8:
2374; CHECK:       # %bb.0: # %entry
2375; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
2376; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
2377; CHECK-NEXT:    ret
2378entry:
2379  %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
2380  ret <vscale x 16 x i32> %0
2381}
2382
2383declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen, iXLen)
2384
2385define <vscale x 1 x i64> @test_sf_vc_v_ivv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
2386; CHECK-LABEL: test_sf_vc_v_ivv_e64m1:
2387; CHECK:       # %bb.0: # %entry
2388; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
2389; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 10
2390; CHECK-NEXT:    ret
2391entry:
2392  %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
2393  ret <vscale x 1 x i64> %0
2394}
2395
2396declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen)
2397
2398define <vscale x 2 x i64> @test_sf_vc_v_ivv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
2399; CHECK-LABEL: test_sf_vc_v_ivv_e64m2:
2400; CHECK:       # %bb.0: # %entry
2401; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
2402; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 10
2403; CHECK-NEXT:    ret
2404entry:
2405  %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
2406  ret <vscale x 2 x i64> %0
2407}
2408
2409declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen, iXLen)
2410
2411define <vscale x 4 x i64> @test_sf_vc_v_ivv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
2412; CHECK-LABEL: test_sf_vc_v_ivv_e64m4:
2413; CHECK:       # %bb.0: # %entry
2414; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
2415; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 10
2416; CHECK-NEXT:    ret
2417entry:
2418  %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
2419  ret <vscale x 4 x i64> %0
2420}
2421
2422declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen, iXLen)
2423
2424define <vscale x 8 x i64> @test_sf_vc_v_ivv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
2425; CHECK-LABEL: test_sf_vc_v_ivv_e64m8:
2426; CHECK:       # %bb.0: # %entry
2427; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
2428; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 10
2429; CHECK-NEXT:    ret
2430entry:
2431  %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
2432  ret <vscale x 8 x i64> %0
2433}
2434
2435declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen, iXLen)
2436
2437define void @test_sf_vc_fvvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
2438; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf4:
2439; CHECK:       # %bb.0: # %entry
2440; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
2441; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
2442; CHECK-NEXT:    ret
2443entry:
2444  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
2445  ret void
2446}
2447
2448declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
2449
2450define <vscale x 1 x half> @test_sf_vc_fv_fvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
2451; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf4:
2452; CHECK:       # %bb.0: # %entry
2453; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2454; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
2455; CHECK-NEXT:    ret
2456entry:
2457  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
2458  ret <vscale x 1 x half> %0
2459}
2460
2461declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
2462
2463define void @test_sf_vc_fvvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
2464; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf2:
2465; CHECK:       # %bb.0: # %entry
2466; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
2467; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
2468; CHECK-NEXT:    ret
2469entry:
2470  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
2471  ret void
2472}
2473
2474declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
2475
2476define <vscale x 2 x half> @test_sf_vc_fv_fvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
2477; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf2:
2478; CHECK:       # %bb.0: # %entry
2479; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
2480; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
2481; CHECK-NEXT:    ret
2482entry:
2483  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
2484  ret <vscale x 2 x half> %0
2485}
2486
2487declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
2488
2489define void @test_sf_vc_fvvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
2490; CHECK-LABEL: test_sf_vc_fvvv_se_e16m1:
2491; CHECK:       # %bb.0: # %entry
2492; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
2493; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
2494; CHECK-NEXT:    ret
2495entry:
2496  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
2497  ret void
2498}
2499
2500declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
2501
2502define <vscale x 4 x half> @test_sf_vc_fv_fvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
2503; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m1:
2504; CHECK:       # %bb.0: # %entry
2505; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
2506; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
2507; CHECK-NEXT:    ret
2508entry:
2509  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
2510  ret <vscale x 4 x half> %0
2511}
2512
2513declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
2514
2515define void @test_sf_vc_fvvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
2516; CHECK-LABEL: test_sf_vc_fvvv_se_e16m2:
2517; CHECK:       # %bb.0: # %entry
2518; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
2519; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
2520; CHECK-NEXT:    ret
2521entry:
2522  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
2523  ret void
2524}
2525
2526declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
2527
2528define <vscale x 8 x half> @test_sf_vc_fv_fvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
2529; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m2:
2530; CHECK:       # %bb.0: # %entry
2531; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
2532; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
2533; CHECK-NEXT:    ret
2534entry:
2535  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
2536  ret <vscale x 8 x half> %0
2537}
2538
2539declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
2540
2541define void @test_sf_vc_fvvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
2542; CHECK-LABEL: test_sf_vc_fvvv_se_e16m4:
2543; CHECK:       # %bb.0: # %entry
2544; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
2545; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
2546; CHECK-NEXT:    ret
2547entry:
2548  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
2549  ret void
2550}
2551
2552declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
2553
2554define <vscale x 16 x half> @test_sf_vc_fv_fvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
2555; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m4:
2556; CHECK:       # %bb.0: # %entry
2557; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
2558; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
2559; CHECK-NEXT:    ret
2560entry:
2561  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
2562  ret <vscale x 16 x half> %0
2563}
2564
2565declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
2566
2567define void @test_sf_vc_fvvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
2568; CHECK-LABEL: test_sf_vc_fvvv_se_e16m8:
2569; CHECK:       # %bb.0: # %entry
2570; CHECK-NEXT:    vl8re16.v v24, (a0)
2571; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
2572; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
2573; CHECK-NEXT:    ret
2574entry:
2575  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
2576  ret void
2577}
2578
2579declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
2580
2581define <vscale x 32 x half> @test_sf_vc_fv_fvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
2582; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m8:
2583; CHECK:       # %bb.0: # %entry
2584; CHECK-NEXT:    vl8re16.v v24, (a0)
2585; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
2586; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
2587; CHECK-NEXT:    ret
2588entry:
2589  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
2590  ret <vscale x 32 x half> %0
2591}
2592
2593declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
2594
2595define void @test_sf_vc_fvvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
2596; CHECK-LABEL: test_sf_vc_fvvv_se_e32mf2:
2597; CHECK:       # %bb.0: # %entry
2598; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
2599; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
2600; CHECK-NEXT:    ret
2601entry:
2602  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
2603  ret void
2604}
2605
2606declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
2607
2608define <vscale x 1 x float> @test_sf_vc_fv_fvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
2609; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32mf2:
2610; CHECK:       # %bb.0: # %entry
2611; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
2612; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
2613; CHECK-NEXT:    ret
2614entry:
2615  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
2616  ret <vscale x 1 x float> %0
2617}
2618
2619declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
2620
2621define void @test_sf_vc_fvvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
2622; CHECK-LABEL: test_sf_vc_fvvv_se_e32m1:
2623; CHECK:       # %bb.0: # %entry
2624; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
2625; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
2626; CHECK-NEXT:    ret
2627entry:
2628  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
2629  ret void
2630}
2631
2632declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
2633
2634define <vscale x 2 x float> @test_sf_vc_fv_fvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
2635; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m1:
2636; CHECK:       # %bb.0: # %entry
2637; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
2638; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
2639; CHECK-NEXT:    ret
2640entry:
2641  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
2642  ret <vscale x 2 x float> %0
2643}
2644
2645declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
2646
2647define void @test_sf_vc_fvvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
2648; CHECK-LABEL: test_sf_vc_fvvv_se_e32m2:
2649; CHECK:       # %bb.0: # %entry
2650; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
2651; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
2652; CHECK-NEXT:    ret
2653entry:
2654  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
2655  ret void
2656}
2657
2658declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
2659
2660define <vscale x 4 x float> @test_sf_vc_fv_fvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
2661; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m2:
2662; CHECK:       # %bb.0: # %entry
2663; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
2664; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
2665; CHECK-NEXT:    ret
2666entry:
2667  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
2668  ret <vscale x 4 x float> %0
2669}
2670
2671declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
2672
2673define void @test_sf_vc_fvvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
2674; CHECK-LABEL: test_sf_vc_fvvv_se_e32m4:
2675; CHECK:       # %bb.0: # %entry
2676; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
2677; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
2678; CHECK-NEXT:    ret
2679entry:
2680  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
2681  ret void
2682}
2683
2684declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
2685
2686define <vscale x 8 x float> @test_sf_vc_fv_fvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
2687; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m4:
2688; CHECK:       # %bb.0: # %entry
2689; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
2690; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
2691; CHECK-NEXT:    ret
2692entry:
2693  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
2694  ret <vscale x 8 x float> %0
2695}
2696
2697declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
2698
2699define void @test_sf_vc_fvvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
2700; CHECK-LABEL: test_sf_vc_fvvv_se_e32m8:
2701; CHECK:       # %bb.0: # %entry
2702; CHECK-NEXT:    vl8re32.v v24, (a0)
2703; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
2704; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
2705; CHECK-NEXT:    ret
2706entry:
2707  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
2708  ret void
2709}
2710
2711declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
2712
2713define <vscale x 16 x float> @test_sf_vc_fv_fvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
2714; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m8:
2715; CHECK:       # %bb.0: # %entry
2716; CHECK-NEXT:    vl8re32.v v24, (a0)
2717; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
2718; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
2719; CHECK-NEXT:    ret
2720entry:
2721  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
2722  ret <vscale x 16 x float> %0
2723}
2724
2725declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
2726
2727define void @test_sf_vc_fvvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
2728; CHECK-LABEL: test_sf_vc_fvvv_se_e64m1:
2729; CHECK:       # %bb.0: # %entry
2730; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
2731; CHECK-NEXT:    sf.vc.vvv 3, v8, v9, v10
2732; CHECK-NEXT:    ret
2733entry:
2734  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
2735  ret void
2736}
2737
2738declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
2739
2740define <vscale x 1 x double> @test_sf_vc_fv_fvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
2741; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m1:
2742; CHECK:       # %bb.0: # %entry
2743; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
2744; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v9, v10
2745; CHECK-NEXT:    ret
2746entry:
2747  %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
2748  ret <vscale x 1 x double> %0
2749}
2750
2751declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
2752
2753define void @test_sf_vc_fvvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
2754; CHECK-LABEL: test_sf_vc_fvvv_se_e64m2:
2755; CHECK:       # %bb.0: # %entry
2756; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
2757; CHECK-NEXT:    sf.vc.vvv 3, v8, v10, v12
2758; CHECK-NEXT:    ret
2759entry:
2760  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
2761  ret void
2762}
2763
2764declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
2765
2766define <vscale x 2 x double> @test_sf_vc_fv_fvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
2767; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m2:
2768; CHECK:       # %bb.0: # %entry
2769; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
2770; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v10, v12
2771; CHECK-NEXT:    ret
2772entry:
2773  %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
2774  ret <vscale x 2 x double> %0
2775}
2776
2777declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
2778
2779define void @test_sf_vc_fvvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
2780; CHECK-LABEL: test_sf_vc_fvvv_se_e64m4:
2781; CHECK:       # %bb.0: # %entry
2782; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
2783; CHECK-NEXT:    sf.vc.vvv 3, v8, v12, v16
2784; CHECK-NEXT:    ret
2785entry:
2786  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
2787  ret void
2788}
2789
2790declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
2791
2792define <vscale x 4 x double> @test_sf_vc_fv_fvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
2793; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m4:
2794; CHECK:       # %bb.0: # %entry
2795; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
2796; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v12, v16
2797; CHECK-NEXT:    ret
2798entry:
2799  %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
2800  ret <vscale x 4 x double> %0
2801}
2802
2803declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
2804
2805define void @test_sf_vc_fvvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
2806; CHECK-LABEL: test_sf_vc_fvvv_se_e64m8:
2807; CHECK:       # %bb.0: # %entry
2808; CHECK-NEXT:    vl8re64.v v24, (a0)
2809; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
2810; CHECK-NEXT:    sf.vc.vvv 3, v8, v16, v24
2811; CHECK-NEXT:    ret
2812entry:
2813  tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
2814  ret void
2815}
2816
2817declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
2818
2819define <vscale x 8 x double> @test_sf_vc_fv_fvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
2820; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m8:
2821; CHECK:       # %bb.0: # %entry
2822; CHECK-NEXT:    vl8re64.v v24, (a0)
2823; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
2824; CHECK-NEXT:    sf.vc.v.vvv 3, v8, v16, v24
2825; CHECK-NEXT:    ret
2826entry:
2827  %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
2828  ret <vscale x 8 x double> %0
2829}
2830
2831declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
2832
2833define void @test_sf_vc_fvvx_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl) {
2834; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf4:
2835; CHECK:       # %bb.0: # %entry
2836; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2837; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
2838; CHECK-NEXT:    ret
2839entry:
2840  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
2841  ret void
2842}
2843
2844declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, i16, iXLen)
2845
2846define <vscale x 1 x half> @test_sf_vc_v_fvvx_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl) {
2847; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf4:
2848; CHECK:       # %bb.0: # %entry
2849; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
2850; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
2851; CHECK-NEXT:    ret
2852entry:
2853  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.nxv1f16.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
2854  ret <vscale x 1 x half> %0
2855}
2856
2857declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.nxv1f16.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, i16, iXLen)
2858
2859define void @test_sf_vc_fvvx_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl) {
2860; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf2:
2861; CHECK:       # %bb.0: # %entry
2862; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2863; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
2864; CHECK-NEXT:    ret
2865entry:
2866  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
2867  ret void
2868}
2869
2870declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, i16, iXLen)
2871
2872define <vscale x 2 x half> @test_sf_vc_v_fvvx_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl) {
2873; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf2:
2874; CHECK:       # %bb.0: # %entry
2875; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
2876; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
2877; CHECK-NEXT:    ret
2878entry:
2879  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.nxv2f16.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
2880  ret <vscale x 2 x half> %0
2881}
2882
2883declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.nxv2f16.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, i16, iXLen)
2884
2885define void @test_sf_vc_fvvx_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl) {
2886; CHECK-LABEL: test_sf_vc_fvvx_se_e16m1:
2887; CHECK:       # %bb.0: # %entry
2888; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2889; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
2890; CHECK-NEXT:    ret
2891entry:
2892  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
2893  ret void
2894}
2895
2896declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, i16, iXLen)
2897
2898define <vscale x 4 x half> @test_sf_vc_v_fvvx_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl) {
2899; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m1:
2900; CHECK:       # %bb.0: # %entry
2901; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
2902; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
2903; CHECK-NEXT:    ret
2904entry:
2905  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.nxv4f16.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
2906  ret <vscale x 4 x half> %0
2907}
2908
2909declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.nxv4f16.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, i16, iXLen)
2910
2911define void @test_sf_vc_fvvx_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl) {
2912; CHECK-LABEL: test_sf_vc_fvvx_se_e16m2:
2913; CHECK:       # %bb.0: # %entry
2914; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2915; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
2916; CHECK-NEXT:    ret
2917entry:
2918  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
2919  ret void
2920}
2921
2922declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, i16, iXLen)
2923
2924define <vscale x 8 x half> @test_sf_vc_v_fvvx_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl) {
2925; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m2:
2926; CHECK:       # %bb.0: # %entry
2927; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
2928; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
2929; CHECK-NEXT:    ret
2930entry:
2931  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.nxv8f16.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
2932  ret <vscale x 8 x half> %0
2933}
2934
2935declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.nxv8f16.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, i16, iXLen)
2936
2937define void @test_sf_vc_fvvx_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl) {
2938; CHECK-LABEL: test_sf_vc_fvvx_se_e16m4:
2939; CHECK:       # %bb.0: # %entry
2940; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2941; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
2942; CHECK-NEXT:    ret
2943entry:
2944  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
2945  ret void
2946}
2947
2948declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, i16, iXLen)
2949
2950define <vscale x 16 x half> @test_sf_vc_v_fvvx_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl) {
2951; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m4:
2952; CHECK:       # %bb.0: # %entry
2953; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
2954; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
2955; CHECK-NEXT:    ret
2956entry:
2957  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.nxv16f16.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
2958  ret <vscale x 16 x half> %0
2959}
2960
2961declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.nxv16f16.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, i16, iXLen)
2962
2963define void @test_sf_vc_fvvx_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl) {
2964; CHECK-LABEL: test_sf_vc_fvvx_se_e16m8:
2965; CHECK:       # %bb.0: # %entry
2966; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
2967; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
2968; CHECK-NEXT:    ret
2969entry:
2970  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.nxv32i16.i16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
2971  ret void
2972}
2973
2974declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.nxv32i16.i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, i16, iXLen)
2975
2976define <vscale x 32 x half> @test_sf_vc_v_fvvx_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl) {
2977; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m8:
2978; CHECK:       # %bb.0: # %entry
2979; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
2980; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
2981; CHECK-NEXT:    ret
2982entry:
2983  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.nxv32f16.nxv32i16.i16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
2984  ret <vscale x 32 x half> %0
2985}
2986
2987declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.nxv32f16.nxv32i16.i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, i16, iXLen)
2988
2989define void @test_sf_vc_fvvx_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl) {
2990; CHECK-LABEL: test_sf_vc_fvvx_se_e32mf2:
2991; CHECK:       # %bb.0: # %entry
2992; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2993; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
2994; CHECK-NEXT:    ret
2995entry:
2996  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f32.nxv1i32.i32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
2997  ret void
2998}
2999
3000declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f32.nxv1i32.i32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, i32, iXLen)
3001
3002define <vscale x 1 x float> @test_sf_vc_v_fvvx_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl) {
3003; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32mf2:
3004; CHECK:       # %bb.0: # %entry
3005; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
3006; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
3007; CHECK-NEXT:    ret
3008entry:
3009  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.nxv1f32.nxv1i32.i32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
3010  ret <vscale x 1 x float> %0
3011}
3012
3013declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.nxv1f32.nxv1i32.i32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, i32, iXLen)
3014
3015define void @test_sf_vc_fvvx_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl) {
3016; CHECK-LABEL: test_sf_vc_fvvx_se_e32m1:
3017; CHECK:       # %bb.0: # %entry
3018; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3019; CHECK-NEXT:    sf.vc.xvv 3, v8, v9, a0
3020; CHECK-NEXT:    ret
3021entry:
3022  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f32.nxv2i32.i32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
3023  ret void
3024}
3025
3026declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f32.nxv2i32.i32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, i32, iXLen)
3027
3028define <vscale x 2 x float> @test_sf_vc_v_fvvx_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl) {
3029; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m1:
3030; CHECK:       # %bb.0: # %entry
3031; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
3032; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v9, a0
3033; CHECK-NEXT:    ret
3034entry:
3035  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.nxv2f32.nxv2i32.i32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
3036  ret <vscale x 2 x float> %0
3037}
3038
3039declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.nxv2f32.nxv2i32.i32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, i32, iXLen)
3040
3041define void @test_sf_vc_fvvx_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl) {
3042; CHECK-LABEL: test_sf_vc_fvvx_se_e32m2:
3043; CHECK:       # %bb.0: # %entry
3044; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
3045; CHECK-NEXT:    sf.vc.xvv 3, v8, v10, a0
3046; CHECK-NEXT:    ret
3047entry:
3048  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f32.nxv4i32.i32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
3049  ret void
3050}
3051
3052declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f32.nxv4i32.i32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, i32, iXLen)
3053
3054define <vscale x 4 x float> @test_sf_vc_v_fvvx_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl) {
3055; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m2:
3056; CHECK:       # %bb.0: # %entry
3057; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
3058; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v10, a0
3059; CHECK-NEXT:    ret
3060entry:
3061  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.nxv4f32.nxv4i32.i32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
3062  ret <vscale x 4 x float> %0
3063}
3064
3065declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.nxv4f32.nxv4i32.i32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, i32, iXLen)
3066
3067define void @test_sf_vc_fvvx_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl) {
3068; CHECK-LABEL: test_sf_vc_fvvx_se_e32m4:
3069; CHECK:       # %bb.0: # %entry
3070; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
3071; CHECK-NEXT:    sf.vc.xvv 3, v8, v12, a0
3072; CHECK-NEXT:    ret
3073entry:
3074  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f32.nxv8i32.i32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
3075  ret void
3076}
3077
3078declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f32.nxv8i32.i32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, i32, iXLen)
3079
3080define <vscale x 8 x float> @test_sf_vc_v_fvvx_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl) {
3081; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m4:
3082; CHECK:       # %bb.0: # %entry
3083; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
3084; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v12, a0
3085; CHECK-NEXT:    ret
3086entry:
3087  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.nxv8f32.nxv8i32.i32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
3088  ret <vscale x 8 x float> %0
3089}
3090
3091declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.nxv8f32.nxv8i32.i32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, i32, iXLen)
3092
3093define void @test_sf_vc_fvvx_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl) {
3094; CHECK-LABEL: test_sf_vc_fvvx_se_e32m8:
3095; CHECK:       # %bb.0: # %entry
3096; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
3097; CHECK-NEXT:    sf.vc.xvv 3, v8, v16, a0
3098; CHECK-NEXT:    ret
3099entry:
3100  tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f32.nxv16i32.i32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
3101  ret void
3102}
3103
3104declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f32.nxv16i32.i32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, i32, iXLen)
3105
3106define <vscale x 16 x float> @test_sf_vc_v_fvvx_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl) {
3107; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m8:
3108; CHECK:       # %bb.0: # %entry
3109; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
3110; CHECK-NEXT:    sf.vc.v.xvv 3, v8, v16, a0
3111; CHECK-NEXT:    ret
3112entry:
3113  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.nxv16f32.nxv16i32.i32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
3114  ret <vscale x 16 x float> %0
3115}
3116
3117declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.nxv16f32.nxv16i32.i32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, i32, iXLen)
3118
3119define void @test_sf_vc_fvvi_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
3120; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf4:
3121; CHECK:       # %bb.0: # %entry
3122; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
3123; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 3
3124; CHECK-NEXT:    ret
3125entry:
3126  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, iXLen 3, iXLen %vl)
3127  ret void
3128}
3129
3130declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, iXLen, iXLen)
3131
3132define <vscale x 1 x half> @test_sf_vc_fv_fvvi_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
3133; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf4:
3134; CHECK:       # %bb.0: # %entry
3135; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
3136; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 3
3137; CHECK-NEXT:    ret
3138entry:
3139  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.nxv1f16.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, iXLen 3, iXLen %vl)
3140  ret <vscale x 1 x half> %0
3141}
3142
3143declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, iXLen, iXLen)
3144
3145define void @test_sf_vc_fvvi_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
3146; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf2:
3147; CHECK:       # %bb.0: # %entry
3148; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
3149; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 3
3150; CHECK-NEXT:    ret
3151entry:
3152  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, iXLen 3, iXLen %vl)
3153  ret void
3154}
3155
3156declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, iXLen, iXLen)
3157
3158define <vscale x 2 x half> @test_sf_vc_fv_fvvi_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
3159; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf2:
3160; CHECK:       # %bb.0: # %entry
3161; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
3162; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 3
3163; CHECK-NEXT:    ret
3164entry:
3165  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.nxv2f16.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, iXLen 3, iXLen %vl)
3166  ret <vscale x 2 x half> %0
3167}
3168
3169declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, iXLen, iXLen)
3170
3171define void @test_sf_vc_fvvi_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
3172; CHECK-LABEL: test_sf_vc_fvvi_se_e16m1:
3173; CHECK:       # %bb.0: # %entry
3174; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
3175; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 3
3176; CHECK-NEXT:    ret
3177entry:
3178  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, iXLen 3, iXLen %vl)
3179  ret void
3180}
3181
3182declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, iXLen, iXLen)
3183
3184define <vscale x 4 x half> @test_sf_vc_fv_fvvi_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
3185; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m1:
3186; CHECK:       # %bb.0: # %entry
3187; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
3188; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 3
3189; CHECK-NEXT:    ret
3190entry:
3191  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.nxv4f16.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, iXLen 3, iXLen %vl)
3192  ret <vscale x 4 x half> %0
3193}
3194
3195declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, iXLen, iXLen)
3196
3197define void @test_sf_vc_fvvi_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
3198; CHECK-LABEL: test_sf_vc_fvvi_se_e16m2:
3199; CHECK:       # %bb.0: # %entry
3200; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
3201; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 3
3202; CHECK-NEXT:    ret
3203entry:
3204  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, iXLen 3, iXLen %vl)
3205  ret void
3206}
3207
3208declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, iXLen, iXLen)
3209
3210define <vscale x 8 x half> @test_sf_vc_fv_fvvi_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
3211; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m2:
3212; CHECK:       # %bb.0: # %entry
3213; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
3214; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 3
3215; CHECK-NEXT:    ret
3216entry:
3217  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.nxv8f16.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, iXLen 3, iXLen %vl)
3218  ret <vscale x 8 x half> %0
3219}
3220
3221declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, iXLen, iXLen)
3222
3223define void @test_sf_vc_fvvi_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
3224; CHECK-LABEL: test_sf_vc_fvvi_se_e16m4:
3225; CHECK:       # %bb.0: # %entry
3226; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
3227; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 3
3228; CHECK-NEXT:    ret
3229entry:
3230  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, iXLen 3, iXLen %vl)
3231  ret void
3232}
3233
3234declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, iXLen, iXLen)
3235
3236define <vscale x 16 x half> @test_sf_vc_fv_fvvi_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
3237; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m4:
3238; CHECK:       # %bb.0: # %entry
3239; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
3240; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 3
3241; CHECK-NEXT:    ret
3242entry:
3243  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.nxv16f16.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, iXLen 3, iXLen %vl)
3244  ret <vscale x 16 x half> %0
3245}
3246
3247declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, iXLen, iXLen)
3248
3249define void @test_sf_vc_fvvi_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
3250; CHECK-LABEL: test_sf_vc_fvvi_se_e16m8:
3251; CHECK:       # %bb.0: # %entry
3252; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
3253; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 3
3254; CHECK-NEXT:    ret
3255entry:
3256  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.nxv32i16.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, iXLen 3, iXLen %vl)
3257  ret void
3258}
3259
3260declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, iXLen, iXLen)
3261
3262define <vscale x 32 x half> @test_sf_vc_fv_fvvi_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
3263; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m8:
3264; CHECK:       # %bb.0: # %entry
3265; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
3266; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 3
3267; CHECK-NEXT:    ret
3268entry:
3269  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.nxv32f16.nxv32i16.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, iXLen 3, iXLen %vl)
3270  ret <vscale x 32 x half> %0
3271}
3272
3273declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, iXLen, iXLen)
3274
3275define void @test_sf_vc_fvvi_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
3276; CHECK-LABEL: test_sf_vc_fvvi_se_e32mf2:
3277; CHECK:       # %bb.0: # %entry
3278; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
3279; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 3
3280; CHECK-NEXT:    ret
3281entry:
3282  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, iXLen 3, iXLen %vl)
3283  ret void
3284}
3285
3286declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, iXLen, iXLen)
3287
3288define <vscale x 1 x float> @test_sf_vc_fv_fvvi_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
3289; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32mf2:
3290; CHECK:       # %bb.0: # %entry
3291; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
3292; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 3
3293; CHECK-NEXT:    ret
3294entry:
3295  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.nxv1f32.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, iXLen 3, iXLen %vl)
3296  ret <vscale x 1 x float> %0
3297}
3298
3299declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, iXLen, iXLen)
3300
3301define void @test_sf_vc_fvvi_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
3302; CHECK-LABEL: test_sf_vc_fvvi_se_e32m1:
3303; CHECK:       # %bb.0: # %entry
3304; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
3305; CHECK-NEXT:    sf.vc.ivv 3, v8, v9, 3
3306; CHECK-NEXT:    ret
3307entry:
3308  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, iXLen 3, iXLen %vl)
3309  ret void
3310}
3311
3312declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, iXLen, iXLen)
3313
3314define <vscale x 2 x float> @test_sf_vc_fv_fvvi_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
3315; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m1:
3316; CHECK:       # %bb.0: # %entry
3317; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
3318; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v9, 3
3319; CHECK-NEXT:    ret
3320entry:
3321  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.nxv2f32.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, iXLen 3, iXLen %vl)
3322  ret <vscale x 2 x float> %0
3323}
3324
3325declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, iXLen, iXLen)
3326
3327define void @test_sf_vc_fvvi_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
3328; CHECK-LABEL: test_sf_vc_fvvi_se_e32m2:
3329; CHECK:       # %bb.0: # %entry
3330; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
3331; CHECK-NEXT:    sf.vc.ivv 3, v8, v10, 3
3332; CHECK-NEXT:    ret
3333entry:
3334  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, iXLen 3, iXLen %vl)
3335  ret void
3336}
3337
3338declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, iXLen, iXLen)
3339
3340define <vscale x 4 x float> @test_sf_vc_fv_fvvi_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
3341; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m2:
3342; CHECK:       # %bb.0: # %entry
3343; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
3344; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v10, 3
3345; CHECK-NEXT:    ret
3346entry:
3347  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.nxv4f32.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, iXLen 3, iXLen %vl)
3348  ret <vscale x 4 x float> %0
3349}
3350
3351declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, iXLen, iXLen)
3352
3353define void @test_sf_vc_fvvi_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
3354; CHECK-LABEL: test_sf_vc_fvvi_se_e32m4:
3355; CHECK:       # %bb.0: # %entry
3356; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
3357; CHECK-NEXT:    sf.vc.ivv 3, v8, v12, 3
3358; CHECK-NEXT:    ret
3359entry:
3360  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, iXLen 3, iXLen %vl)
3361  ret void
3362}
3363
3364declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, iXLen, iXLen)
3365
3366define <vscale x 8 x float> @test_sf_vc_fv_fvvi_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
3367; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m4:
3368; CHECK:       # %bb.0: # %entry
3369; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
3370; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v12, 3
3371; CHECK-NEXT:    ret
3372entry:
3373  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.nxv8f32.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, iXLen 3, iXLen %vl)
3374  ret <vscale x 8 x float> %0
3375}
3376
3377declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, iXLen, iXLen)
3378
3379define void @test_sf_vc_fvvi_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
3380; CHECK-LABEL: test_sf_vc_fvvi_se_e32m8:
3381; CHECK:       # %bb.0: # %entry
3382; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
3383; CHECK-NEXT:    sf.vc.ivv 3, v8, v16, 3
3384; CHECK-NEXT:    ret
3385entry:
3386  tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.nxv16i32.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, iXLen 3, iXLen %vl)
3387  ret void
3388}
3389
3390declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, iXLen, iXLen)
3391
3392define <vscale x 16 x float> @test_sf_vc_fv_fvvi_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
3393; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m8:
3394; CHECK:       # %bb.0: # %entry
3395; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
3396; CHECK-NEXT:    sf.vc.v.ivv 3, v8, v16, 3
3397; CHECK-NEXT:    ret
3398entry:
3399  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.nxv16f32.nxv16i32.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, iXLen 3, iXLen %vl)
3400  ret <vscale x 16 x float> %0
3401}
3402
3403declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, iXLen, iXLen)
3404
3405define void @test_sf_vc_fvvf_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, half %rs1, iXLen %vl) {
3406; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf4:
3407; CHECK:       # %bb.0: # %entry
3408; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
3409; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
3410; CHECK-NEXT:    ret
3411entry:
3412  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, half %rs1, iXLen %vl)
3413  ret void
3414}
3415
3416declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, half, iXLen)
3417
3418define <vscale x 1 x half> @test_sf_vc_fv_fvvf_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, half %rs1, iXLen %vl) {
3419; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf4:
3420; CHECK:       # %bb.0: # %entry
3421; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
3422; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
3423; CHECK-NEXT:    ret
3424entry:
3425  %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.nxv1f16.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, half %rs1, iXLen %vl)
3426  ret <vscale x 1 x half> %0
3427}
3428
3429declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.nxv1f16.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, half %rs1, iXLen)
3430
3431define void @test_sf_vc_fvvf_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, half %rs1, iXLen %vl) {
3432; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf2:
3433; CHECK:       # %bb.0: # %entry
3434; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
3435; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
3436; CHECK-NEXT:    ret
3437entry:
3438  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, half %rs1, iXLen %vl)
3439  ret void
3440}
3441
3442declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, half, iXLen)
3443
3444define <vscale x 2 x half> @test_sf_vc_fv_fvvf_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, half %rs1, iXLen %vl) {
3445; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf2:
3446; CHECK:       # %bb.0: # %entry
3447; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
3448; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
3449; CHECK-NEXT:    ret
3450entry:
3451  %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.nxv2f16.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, half %rs1, iXLen %vl)
3452  ret <vscale x 2 x half> %0
3453}
3454
3455declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.nxv2f16.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, half %rs1, iXLen)
3456
3457define void @test_sf_vc_fvvf_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, half %rs1, iXLen %vl) {
3458; CHECK-LABEL: test_sf_vc_fvvf_se_e16m1:
3459; CHECK:       # %bb.0: # %entry
3460; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
3461; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
3462; CHECK-NEXT:    ret
3463entry:
3464  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, half %rs1, iXLen %vl)
3465  ret void
3466}
3467
3468declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, half, iXLen)
3469
3470define <vscale x 4 x half> @test_sf_vc_fv_fvvf_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, half %rs1, iXLen %vl) {
3471; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m1:
3472; CHECK:       # %bb.0: # %entry
3473; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
3474; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
3475; CHECK-NEXT:    ret
3476entry:
3477  %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.nxv4f16.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, half %rs1, iXLen %vl)
3478  ret <vscale x 4 x half> %0
3479}
3480
3481declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.nxv4f16.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, half %rs1, iXLen)
3482
3483define void @test_sf_vc_fvvf_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, half %rs1, iXLen %vl) {
3484; CHECK-LABEL: test_sf_vc_fvvf_se_e16m2:
3485; CHECK:       # %bb.0: # %entry
3486; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
3487; CHECK-NEXT:    sf.vc.fvv 1, v8, v10, fa0
3488; CHECK-NEXT:    ret
3489entry:
3490  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, half %rs1, iXLen %vl)
3491  ret void
3492}
3493
3494declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, half, iXLen)
3495
3496define <vscale x 8 x half> @test_sf_vc_fv_fvvf_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, half %rs1, iXLen %vl) {
3497; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m2:
3498; CHECK:       # %bb.0: # %entry
3499; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
3500; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
3501; CHECK-NEXT:    ret
3502entry:
3503  %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.nxv8f16.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, half %rs1, iXLen %vl)
3504  ret <vscale x 8 x half> %0
3505}
3506
3507declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.nxv8f16.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, half %rs1, iXLen)
3508
3509define void @test_sf_vc_fvvf_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, half %rs1, iXLen %vl) {
3510; CHECK-LABEL: test_sf_vc_fvvf_se_e16m4:
3511; CHECK:       # %bb.0: # %entry
3512; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
3513; CHECK-NEXT:    sf.vc.fvv 1, v8, v12, fa0
3514; CHECK-NEXT:    ret
3515entry:
3516  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, half %rs1, iXLen %vl)
3517  ret void
3518}
3519
3520declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, half, iXLen)
3521
3522define <vscale x 16 x half> @test_sf_vc_fv_fvvf_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, half %rs1, iXLen %vl) {
3523; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m4:
3524; CHECK:       # %bb.0: # %entry
3525; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
3526; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
3527; CHECK-NEXT:    ret
3528entry:
3529  %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.nxv16f16.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, half %rs1, iXLen %vl)
3530  ret <vscale x 16 x half> %0
3531}
3532
3533declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.nxv16f16.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, half %rs1, iXLen)
3534
3535define void @test_sf_vc_fvvf_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, half %rs1, iXLen %vl) {
3536; CHECK-LABEL: test_sf_vc_fvvf_se_e16m8:
3537; CHECK:       # %bb.0: # %entry
3538; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
3539; CHECK-NEXT:    sf.vc.fvv 1, v8, v16, fa0
3540; CHECK-NEXT:    ret
3541entry:
3542  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.nxv32i16.f16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, half %rs1, iXLen %vl)
3543  ret void
3544}
3545
3546declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.nxv32i16.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, half, iXLen)
3547
3548define <vscale x 32 x half> @test_sf_vc_fv_fvvf_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, half %rs1, iXLen %vl) {
3549; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m8:
3550; CHECK:       # %bb.0: # %entry
3551; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
3552; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
3553; CHECK-NEXT:    ret
3554entry:
3555  %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.nxv32f16.nxv32i16.f16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, half %rs1, iXLen %vl)
3556  ret <vscale x 32 x half> %0
3557}
3558
3559declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.nxv32f16.nxv32i16.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, half %rs1, iXLen)
3560
3561define void @test_sf_vc_fvvf_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, float %rs1, iXLen %vl) {
3562; CHECK-LABEL: test_sf_vc_fvvf_se_e32mf2:
3563; CHECK:       # %bb.0: # %entry
3564; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
3565; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
3566; CHECK-NEXT:    ret
3567entry:
3568  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, float %rs1, iXLen %vl)
3569  ret void
3570}
3571
3572declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, float, iXLen)
3573
3574define <vscale x 1 x float> @test_sf_vc_fv_fvvf_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, float %rs1, iXLen %vl) {
3575; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32mf2:
3576; CHECK:       # %bb.0: # %entry
3577; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
3578; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
3579; CHECK-NEXT:    ret
3580entry:
3581  %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.nxv1f32.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, float %rs1, iXLen %vl)
3582  ret <vscale x 1 x float> %0
3583}
3584
3585declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.nxv1f32.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, float %rs1, iXLen)
3586
3587define void @test_sf_vc_fvvf_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, float %rs1, iXLen %vl) {
3588; CHECK-LABEL: test_sf_vc_fvvf_se_e32m1:
3589; CHECK:       # %bb.0: # %entry
3590; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
3591; CHECK-NEXT:    sf.vc.fvv 1, v8, v9, fa0
3592; CHECK-NEXT:    ret
3593entry:
3594  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, float %rs1, iXLen %vl)
3595  ret void
3596}
3597
3598declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, float, iXLen)
3599
3600define <vscale x 2 x float> @test_sf_vc_fv_fvvf_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, float %rs1, iXLen %vl) {
3601; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m1:
3602; CHECK:       # %bb.0: # %entry
3603; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
3604; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v9, fa0
3605; CHECK-NEXT:    ret
3606entry:
3607  %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.nxv2f32.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, float %rs1, iXLen %vl)
3608  ret <vscale x 2 x float> %0
3609}
3610
3611declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.nxv2f32.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, float %rs1, iXLen)
3612
3613define void @test_sf_vc_fvvf_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, float %rs1, iXLen %vl) {
3614; CHECK-LABEL: test_sf_vc_fvvf_se_e32m2:
3615; CHECK:       # %bb.0: # %entry
3616; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
3617; CHECK-NEXT:    sf.vc.fvv 1, v8, v10, fa0
3618; CHECK-NEXT:    ret
3619entry:
3620  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, float %rs1, iXLen %vl)
3621  ret void
3622}
3623
3624declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, float, iXLen)
3625
3626define <vscale x 4 x float> @test_sf_vc_fv_fvvf_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, float %rs1, iXLen %vl) {
3627; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m2:
3628; CHECK:       # %bb.0: # %entry
3629; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
3630; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v10, fa0
3631; CHECK-NEXT:    ret
3632entry:
3633  %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.nxv4f32.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, float %rs1, iXLen %vl)
3634  ret <vscale x 4 x float> %0
3635}
3636
3637declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.nxv4f32.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, float %rs1, iXLen)
3638
3639define void @test_sf_vc_fvvf_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, float %rs1, iXLen %vl) {
3640; CHECK-LABEL: test_sf_vc_fvvf_se_e32m4:
3641; CHECK:       # %bb.0: # %entry
3642; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
3643; CHECK-NEXT:    sf.vc.fvv 1, v8, v12, fa0
3644; CHECK-NEXT:    ret
3645entry:
3646  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, float %rs1, iXLen %vl)
3647  ret void
3648}
3649
3650declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, float, iXLen)
3651
3652define <vscale x 8 x float> @test_sf_vc_fv_fvvf_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, float %rs1, iXLen %vl) {
3653; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m4:
3654; CHECK:       # %bb.0: # %entry
3655; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
3656; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v12, fa0
3657; CHECK-NEXT:    ret
3658entry:
3659  %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.nxv8f32.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, float %rs1, iXLen %vl)
3660  ret <vscale x 8 x float> %0
3661}
3662
3663declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.nxv8f32.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, float %rs1, iXLen)
3664
3665define void @test_sf_vc_fvvf_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, float %rs1, iXLen %vl) {
3666; CHECK-LABEL: test_sf_vc_fvvf_se_e32m8:
3667; CHECK:       # %bb.0: # %entry
3668; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
3669; CHECK-NEXT:    sf.vc.fvv 1, v8, v16, fa0
3670; CHECK-NEXT:    ret
3671entry:
3672  tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.nxv16i32.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, float %rs1, iXLen %vl)
3673  ret void
3674}
3675
3676declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.nxv16i32.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, float, iXLen)
3677
3678define <vscale x 16 x float> @test_sf_vc_fv_fvvf_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, float %rs1, iXLen %vl) {
3679; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m8:
3680; CHECK:       # %bb.0: # %entry
3681; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
3682; CHECK-NEXT:    sf.vc.v.fvv 1, v8, v16, fa0
3683; CHECK-NEXT:    ret
3684entry:
3685  %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.nxv16f32.nxv16i32.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, float %rs1, iXLen %vl)
3686  ret <vscale x 16 x float> %0
3687}
3688
3689declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.nxv16f32.nxv16i32.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, float %rs1, iXLen)
3690