xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vcompress.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  <vscale x 1 x i1>,
11  iXLen);
12
13define <vscale x 1 x i8> @intrinsic_vcompress_vm_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
14; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i8_nxv1i8:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
17; CHECK-NEXT:    vcompress.vm v8, v9, v0
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
21    <vscale x 1 x i8> %0,
22    <vscale x 1 x i8> %1,
23    <vscale x 1 x i1> %2,
24    iXLen %3)
25
26  ret <vscale x 1 x i8> %a
27}
28
29declare <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8(
30  <vscale x 2 x i8>,
31  <vscale x 2 x i8>,
32  <vscale x 2 x i1>,
33  iXLen);
34
35define <vscale x 2 x i8> @intrinsic_vcompress_vm_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
36; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i8_nxv2i8:
37; CHECK:       # %bb.0: # %entry
38; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
39; CHECK-NEXT:    vcompress.vm v8, v9, v0
40; CHECK-NEXT:    ret
41entry:
42  %a = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8(
43    <vscale x 2 x i8> %0,
44    <vscale x 2 x i8> %1,
45    <vscale x 2 x i1> %2,
46    iXLen %3)
47
48  ret <vscale x 2 x i8> %a
49}
50
51declare <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8(
52  <vscale x 4 x i8>,
53  <vscale x 4 x i8>,
54  <vscale x 4 x i1>,
55  iXLen);
56
57define <vscale x 4 x i8> @intrinsic_vcompress_vm_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
58; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i8_nxv4i8:
59; CHECK:       # %bb.0: # %entry
60; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
61; CHECK-NEXT:    vcompress.vm v8, v9, v0
62; CHECK-NEXT:    ret
63entry:
64  %a = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8(
65    <vscale x 4 x i8> %0,
66    <vscale x 4 x i8> %1,
67    <vscale x 4 x i1> %2,
68    iXLen %3)
69
70  ret <vscale x 4 x i8> %a
71}
72
73declare <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8(
74  <vscale x 8 x i8>,
75  <vscale x 8 x i8>,
76  <vscale x 8 x i1>,
77  iXLen);
78
79define <vscale x 8 x i8> @intrinsic_vcompress_vm_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
80; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i8_nxv8i8:
81; CHECK:       # %bb.0: # %entry
82; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
83; CHECK-NEXT:    vcompress.vm v8, v9, v0
84; CHECK-NEXT:    ret
85entry:
86  %a = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8(
87    <vscale x 8 x i8> %0,
88    <vscale x 8 x i8> %1,
89    <vscale x 8 x i1> %2,
90    iXLen %3)
91
92  ret <vscale x 8 x i8> %a
93}
94
95declare <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8(
96  <vscale x 16 x i8>,
97  <vscale x 16 x i8>,
98  <vscale x 16 x i1>,
99  iXLen);
100
101define <vscale x 16 x i8> @intrinsic_vcompress_vm_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
102; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i8_nxv16i8:
103; CHECK:       # %bb.0: # %entry
104; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
105; CHECK-NEXT:    vcompress.vm v8, v10, v0
106; CHECK-NEXT:    ret
107entry:
108  %a = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8(
109    <vscale x 16 x i8> %0,
110    <vscale x 16 x i8> %1,
111    <vscale x 16 x i1> %2,
112    iXLen %3)
113
114  ret <vscale x 16 x i8> %a
115}
116
117declare <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8(
118  <vscale x 32 x i8>,
119  <vscale x 32 x i8>,
120  <vscale x 32 x i1>,
121  iXLen);
122
123define <vscale x 32 x i8> @intrinsic_vcompress_vm_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
124; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i8_nxv32i8:
125; CHECK:       # %bb.0: # %entry
126; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
127; CHECK-NEXT:    vcompress.vm v8, v12, v0
128; CHECK-NEXT:    ret
129entry:
130  %a = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8(
131    <vscale x 32 x i8> %0,
132    <vscale x 32 x i8> %1,
133    <vscale x 32 x i1> %2,
134    iXLen %3)
135
136  ret <vscale x 32 x i8> %a
137}
138
139declare <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8(
140  <vscale x 64 x i8>,
141  <vscale x 64 x i8>,
142  <vscale x 64 x i1>,
143  iXLen);
144
145define <vscale x 64 x i8> @intrinsic_vcompress_vm_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
146; CHECK-LABEL: intrinsic_vcompress_vm_nxv64i8_nxv64i8:
147; CHECK:       # %bb.0: # %entry
148; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
149; CHECK-NEXT:    vcompress.vm v8, v16, v0
150; CHECK-NEXT:    ret
151entry:
152  %a = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8(
153    <vscale x 64 x i8> %0,
154    <vscale x 64 x i8> %1,
155    <vscale x 64 x i1> %2,
156    iXLen %3)
157
158  ret <vscale x 64 x i8> %a
159}
160
161declare <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16(
162  <vscale x 1 x i16>,
163  <vscale x 1 x i16>,
164  <vscale x 1 x i1>,
165  iXLen);
166
167define <vscale x 1 x i16> @intrinsic_vcompress_vm_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
168; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i16_nxv1i16:
169; CHECK:       # %bb.0: # %entry
170; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
171; CHECK-NEXT:    vcompress.vm v8, v9, v0
172; CHECK-NEXT:    ret
173entry:
174  %a = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16(
175    <vscale x 1 x i16> %0,
176    <vscale x 1 x i16> %1,
177    <vscale x 1 x i1> %2,
178    iXLen %3)
179
180  ret <vscale x 1 x i16> %a
181}
182
183declare <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16(
184  <vscale x 2 x i16>,
185  <vscale x 2 x i16>,
186  <vscale x 2 x i1>,
187  iXLen);
188
189define <vscale x 2 x i16> @intrinsic_vcompress_vm_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
190; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i16_nxv2i16:
191; CHECK:       # %bb.0: # %entry
192; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
193; CHECK-NEXT:    vcompress.vm v8, v9, v0
194; CHECK-NEXT:    ret
195entry:
196  %a = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16(
197    <vscale x 2 x i16> %0,
198    <vscale x 2 x i16> %1,
199    <vscale x 2 x i1> %2,
200    iXLen %3)
201
202  ret <vscale x 2 x i16> %a
203}
204
205declare <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16(
206  <vscale x 4 x i16>,
207  <vscale x 4 x i16>,
208  <vscale x 4 x i1>,
209  iXLen);
210
211define <vscale x 4 x i16> @intrinsic_vcompress_vm_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
212; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i16_nxv4i16:
213; CHECK:       # %bb.0: # %entry
214; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
215; CHECK-NEXT:    vcompress.vm v8, v9, v0
216; CHECK-NEXT:    ret
217entry:
218  %a = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16(
219    <vscale x 4 x i16> %0,
220    <vscale x 4 x i16> %1,
221    <vscale x 4 x i1> %2,
222    iXLen %3)
223
224  ret <vscale x 4 x i16> %a
225}
226
227declare <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16(
228  <vscale x 8 x i16>,
229  <vscale x 8 x i16>,
230  <vscale x 8 x i1>,
231  iXLen);
232
233define <vscale x 8 x i16> @intrinsic_vcompress_vm_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
234; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i16_nxv8i16:
235; CHECK:       # %bb.0: # %entry
236; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
237; CHECK-NEXT:    vcompress.vm v8, v10, v0
238; CHECK-NEXT:    ret
239entry:
240  %a = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16(
241    <vscale x 8 x i16> %0,
242    <vscale x 8 x i16> %1,
243    <vscale x 8 x i1> %2,
244    iXLen %3)
245
246  ret <vscale x 8 x i16> %a
247}
248
249declare <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16(
250  <vscale x 16 x i16>,
251  <vscale x 16 x i16>,
252  <vscale x 16 x i1>,
253  iXLen);
254
255define <vscale x 16 x i16> @intrinsic_vcompress_vm_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
256; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i16_nxv16i16:
257; CHECK:       # %bb.0: # %entry
258; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
259; CHECK-NEXT:    vcompress.vm v8, v12, v0
260; CHECK-NEXT:    ret
261entry:
262  %a = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16(
263    <vscale x 16 x i16> %0,
264    <vscale x 16 x i16> %1,
265    <vscale x 16 x i1> %2,
266    iXLen %3)
267
268  ret <vscale x 16 x i16> %a
269}
270
271declare <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16(
272  <vscale x 32 x i16>,
273  <vscale x 32 x i16>,
274  <vscale x 32 x i1>,
275  iXLen);
276
277define <vscale x 32 x i16> @intrinsic_vcompress_vm_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
278; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i16_nxv32i16:
279; CHECK:       # %bb.0: # %entry
280; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
281; CHECK-NEXT:    vcompress.vm v8, v16, v0
282; CHECK-NEXT:    ret
283entry:
284  %a = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16(
285    <vscale x 32 x i16> %0,
286    <vscale x 32 x i16> %1,
287    <vscale x 32 x i1> %2,
288    iXLen %3)
289
290  ret <vscale x 32 x i16> %a
291}
292
293declare <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32(
294  <vscale x 1 x i32>,
295  <vscale x 1 x i32>,
296  <vscale x 1 x i1>,
297  iXLen);
298
299define <vscale x 1 x i32> @intrinsic_vcompress_vm_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
300; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i32_nxv1i32:
301; CHECK:       # %bb.0: # %entry
302; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
303; CHECK-NEXT:    vcompress.vm v8, v9, v0
304; CHECK-NEXT:    ret
305entry:
306  %a = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32(
307    <vscale x 1 x i32> %0,
308    <vscale x 1 x i32> %1,
309    <vscale x 1 x i1> %2,
310    iXLen %3)
311
312  ret <vscale x 1 x i32> %a
313}
314
315declare <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32(
316  <vscale x 2 x i32>,
317  <vscale x 2 x i32>,
318  <vscale x 2 x i1>,
319  iXLen);
320
321define <vscale x 2 x i32> @intrinsic_vcompress_vm_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
322; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i32_nxv2i32:
323; CHECK:       # %bb.0: # %entry
324; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
325; CHECK-NEXT:    vcompress.vm v8, v9, v0
326; CHECK-NEXT:    ret
327entry:
328  %a = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32(
329    <vscale x 2 x i32> %0,
330    <vscale x 2 x i32> %1,
331    <vscale x 2 x i1> %2,
332    iXLen %3)
333
334  ret <vscale x 2 x i32> %a
335}
336
337declare <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32(
338  <vscale x 4 x i32>,
339  <vscale x 4 x i32>,
340  <vscale x 4 x i1>,
341  iXLen);
342
343define <vscale x 4 x i32> @intrinsic_vcompress_vm_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
344; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i32_nxv4i32:
345; CHECK:       # %bb.0: # %entry
346; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
347; CHECK-NEXT:    vcompress.vm v8, v10, v0
348; CHECK-NEXT:    ret
349entry:
350  %a = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32(
351    <vscale x 4 x i32> %0,
352    <vscale x 4 x i32> %1,
353    <vscale x 4 x i1> %2,
354    iXLen %3)
355
356  ret <vscale x 4 x i32> %a
357}
358
359declare <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32(
360  <vscale x 8 x i32>,
361  <vscale x 8 x i32>,
362  <vscale x 8 x i1>,
363  iXLen);
364
365define <vscale x 8 x i32> @intrinsic_vcompress_vm_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
366; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i32_nxv8i32:
367; CHECK:       # %bb.0: # %entry
368; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
369; CHECK-NEXT:    vcompress.vm v8, v12, v0
370; CHECK-NEXT:    ret
371entry:
372  %a = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32(
373    <vscale x 8 x i32> %0,
374    <vscale x 8 x i32> %1,
375    <vscale x 8 x i1> %2,
376    iXLen %3)
377
378  ret <vscale x 8 x i32> %a
379}
380
381declare <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32(
382  <vscale x 16 x i32>,
383  <vscale x 16 x i32>,
384  <vscale x 16 x i1>,
385  iXLen);
386
387define <vscale x 16 x i32> @intrinsic_vcompress_vm_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
388; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i32_nxv16i32:
389; CHECK:       # %bb.0: # %entry
390; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
391; CHECK-NEXT:    vcompress.vm v8, v16, v0
392; CHECK-NEXT:    ret
393entry:
394  %a = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32(
395    <vscale x 16 x i32> %0,
396    <vscale x 16 x i32> %1,
397    <vscale x 16 x i1> %2,
398    iXLen %3)
399
400  ret <vscale x 16 x i32> %a
401}
402
403declare <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
404  <vscale x 1 x i64>,
405  <vscale x 1 x i64>,
406  <vscale x 1 x i1>,
407  iXLen);
408
409define <vscale x 1 x i64> @intrinsic_vcompress_vm_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
410; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i64_nxv1i64:
411; CHECK:       # %bb.0: # %entry
412; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
413; CHECK-NEXT:    vcompress.vm v8, v9, v0
414; CHECK-NEXT:    ret
415entry:
416  %a = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
417    <vscale x 1 x i64> %0,
418    <vscale x 1 x i64> %1,
419    <vscale x 1 x i1> %2,
420    iXLen %3)
421
422  ret <vscale x 1 x i64> %a
423}
424
425declare <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
426  <vscale x 2 x i64>,
427  <vscale x 2 x i64>,
428  <vscale x 2 x i1>,
429  iXLen);
430
431define <vscale x 2 x i64> @intrinsic_vcompress_vm_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
432; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i64_nxv2i64:
433; CHECK:       # %bb.0: # %entry
434; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
435; CHECK-NEXT:    vcompress.vm v8, v10, v0
436; CHECK-NEXT:    ret
437entry:
438  %a = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
439    <vscale x 2 x i64> %0,
440    <vscale x 2 x i64> %1,
441    <vscale x 2 x i1> %2,
442    iXLen %3)
443
444  ret <vscale x 2 x i64> %a
445}
446
447declare <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
448  <vscale x 4 x i64>,
449  <vscale x 4 x i64>,
450  <vscale x 4 x i1>,
451  iXLen);
452
453define <vscale x 4 x i64> @intrinsic_vcompress_vm_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
454; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i64_nxv4i64:
455; CHECK:       # %bb.0: # %entry
456; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
457; CHECK-NEXT:    vcompress.vm v8, v12, v0
458; CHECK-NEXT:    ret
459entry:
460  %a = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
461    <vscale x 4 x i64> %0,
462    <vscale x 4 x i64> %1,
463    <vscale x 4 x i1> %2,
464    iXLen %3)
465
466  ret <vscale x 4 x i64> %a
467}
468
469declare <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
470  <vscale x 8 x i64>,
471  <vscale x 8 x i64>,
472  <vscale x 8 x i1>,
473  iXLen);
474
475define <vscale x 8 x i64> @intrinsic_vcompress_vm_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
476; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i64_nxv8i64:
477; CHECK:       # %bb.0: # %entry
478; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
479; CHECK-NEXT:    vcompress.vm v8, v16, v0
480; CHECK-NEXT:    ret
481entry:
482  %a = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
483    <vscale x 8 x i64> %0,
484    <vscale x 8 x i64> %1,
485    <vscale x 8 x i1> %2,
486    iXLen %3)
487
488  ret <vscale x 8 x i64> %a
489}
490
491declare <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16(
492  <vscale x 1 x half>,
493  <vscale x 1 x half>,
494  <vscale x 1 x i1>,
495  iXLen);
496
497define <vscale x 1 x half> @intrinsic_vcompress_vm_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
498; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f16_nxv1f16:
499; CHECK:       # %bb.0: # %entry
500; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
501; CHECK-NEXT:    vcompress.vm v8, v9, v0
502; CHECK-NEXT:    ret
503entry:
504  %a = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16(
505    <vscale x 1 x half> %0,
506    <vscale x 1 x half> %1,
507    <vscale x 1 x i1> %2,
508    iXLen %3)
509
510  ret <vscale x 1 x half> %a
511}
512
513declare <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16(
514  <vscale x 2 x half>,
515  <vscale x 2 x half>,
516  <vscale x 2 x i1>,
517  iXLen);
518
519define <vscale x 2 x half> @intrinsic_vcompress_vm_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
520; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f16_nxv2f16:
521; CHECK:       # %bb.0: # %entry
522; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
523; CHECK-NEXT:    vcompress.vm v8, v9, v0
524; CHECK-NEXT:    ret
525entry:
526  %a = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16(
527    <vscale x 2 x half> %0,
528    <vscale x 2 x half> %1,
529    <vscale x 2 x i1> %2,
530    iXLen %3)
531
532  ret <vscale x 2 x half> %a
533}
534
535declare <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16(
536  <vscale x 4 x half>,
537  <vscale x 4 x half>,
538  <vscale x 4 x i1>,
539  iXLen);
540
541define <vscale x 4 x half> @intrinsic_vcompress_vm_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
542; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f16_nxv4f16:
543; CHECK:       # %bb.0: # %entry
544; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
545; CHECK-NEXT:    vcompress.vm v8, v9, v0
546; CHECK-NEXT:    ret
547entry:
548  %a = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16(
549    <vscale x 4 x half> %0,
550    <vscale x 4 x half> %1,
551    <vscale x 4 x i1> %2,
552    iXLen %3)
553
554  ret <vscale x 4 x half> %a
555}
556
557declare <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16(
558  <vscale x 8 x half>,
559  <vscale x 8 x half>,
560  <vscale x 8 x i1>,
561  iXLen);
562
563define <vscale x 8 x half> @intrinsic_vcompress_vm_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
564; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f16_nxv8f16:
565; CHECK:       # %bb.0: # %entry
566; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
567; CHECK-NEXT:    vcompress.vm v8, v10, v0
568; CHECK-NEXT:    ret
569entry:
570  %a = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16(
571    <vscale x 8 x half> %0,
572    <vscale x 8 x half> %1,
573    <vscale x 8 x i1> %2,
574    iXLen %3)
575
576  ret <vscale x 8 x half> %a
577}
578
579declare <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16(
580  <vscale x 16 x half>,
581  <vscale x 16 x half>,
582  <vscale x 16 x i1>,
583  iXLen);
584
585define <vscale x 16 x half> @intrinsic_vcompress_vm_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
586; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f16_nxv16f16:
587; CHECK:       # %bb.0: # %entry
588; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
589; CHECK-NEXT:    vcompress.vm v8, v12, v0
590; CHECK-NEXT:    ret
591entry:
592  %a = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16(
593    <vscale x 16 x half> %0,
594    <vscale x 16 x half> %1,
595    <vscale x 16 x i1> %2,
596    iXLen %3)
597
598  ret <vscale x 16 x half> %a
599}
600
601declare <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16(
602  <vscale x 32 x half>,
603  <vscale x 32 x half>,
604  <vscale x 32 x i1>,
605  iXLen);
606
607define <vscale x 32 x half> @intrinsic_vcompress_vm_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
608; CHECK-LABEL: intrinsic_vcompress_vm_nxv32f16_nxv32f16:
609; CHECK:       # %bb.0: # %entry
610; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
611; CHECK-NEXT:    vcompress.vm v8, v16, v0
612; CHECK-NEXT:    ret
613entry:
614  %a = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16(
615    <vscale x 32 x half> %0,
616    <vscale x 32 x half> %1,
617    <vscale x 32 x i1> %2,
618    iXLen %3)
619
620  ret <vscale x 32 x half> %a
621}
622
623declare <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32(
624  <vscale x 1 x float>,
625  <vscale x 1 x float>,
626  <vscale x 1 x i1>,
627  iXLen);
628
629define <vscale x 1 x float> @intrinsic_vcompress_vm_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
630; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f32_nxv1f32:
631; CHECK:       # %bb.0: # %entry
632; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
633; CHECK-NEXT:    vcompress.vm v8, v9, v0
634; CHECK-NEXT:    ret
635entry:
636  %a = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32(
637    <vscale x 1 x float> %0,
638    <vscale x 1 x float> %1,
639    <vscale x 1 x i1> %2,
640    iXLen %3)
641
642  ret <vscale x 1 x float> %a
643}
644
645declare <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32(
646  <vscale x 2 x float>,
647  <vscale x 2 x float>,
648  <vscale x 2 x i1>,
649  iXLen);
650
651define <vscale x 2 x float> @intrinsic_vcompress_vm_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
652; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f32_nxv2f32:
653; CHECK:       # %bb.0: # %entry
654; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
655; CHECK-NEXT:    vcompress.vm v8, v9, v0
656; CHECK-NEXT:    ret
657entry:
658  %a = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32(
659    <vscale x 2 x float> %0,
660    <vscale x 2 x float> %1,
661    <vscale x 2 x i1> %2,
662    iXLen %3)
663
664  ret <vscale x 2 x float> %a
665}
666
667declare <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32(
668  <vscale x 4 x float>,
669  <vscale x 4 x float>,
670  <vscale x 4 x i1>,
671  iXLen);
672
673define <vscale x 4 x float> @intrinsic_vcompress_vm_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
674; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f32_nxv4f32:
675; CHECK:       # %bb.0: # %entry
676; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
677; CHECK-NEXT:    vcompress.vm v8, v10, v0
678; CHECK-NEXT:    ret
679entry:
680  %a = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32(
681    <vscale x 4 x float> %0,
682    <vscale x 4 x float> %1,
683    <vscale x 4 x i1> %2,
684    iXLen %3)
685
686  ret <vscale x 4 x float> %a
687}
688
689declare <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32(
690  <vscale x 8 x float>,
691  <vscale x 8 x float>,
692  <vscale x 8 x i1>,
693  iXLen);
694
695define <vscale x 8 x float> @intrinsic_vcompress_vm_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
696; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f32_nxv8f32:
697; CHECK:       # %bb.0: # %entry
698; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
699; CHECK-NEXT:    vcompress.vm v8, v12, v0
700; CHECK-NEXT:    ret
701entry:
702  %a = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32(
703    <vscale x 8 x float> %0,
704    <vscale x 8 x float> %1,
705    <vscale x 8 x i1> %2,
706    iXLen %3)
707
708  ret <vscale x 8 x float> %a
709}
710
711declare <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32(
712  <vscale x 16 x float>,
713  <vscale x 16 x float>,
714  <vscale x 16 x i1>,
715  iXLen);
716
717define <vscale x 16 x float> @intrinsic_vcompress_vm_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
718; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f32_nxv16f32:
719; CHECK:       # %bb.0: # %entry
720; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
721; CHECK-NEXT:    vcompress.vm v8, v16, v0
722; CHECK-NEXT:    ret
723entry:
724  %a = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32(
725    <vscale x 16 x float> %0,
726    <vscale x 16 x float> %1,
727    <vscale x 16 x i1> %2,
728    iXLen %3)
729
730  ret <vscale x 16 x float> %a
731}
732
733declare <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
734  <vscale x 1 x double>,
735  <vscale x 1 x double>,
736  <vscale x 1 x i1>,
737  iXLen);
738
739define <vscale x 1 x double> @intrinsic_vcompress_vm_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
740; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f64_nxv1f64:
741; CHECK:       # %bb.0: # %entry
742; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
743; CHECK-NEXT:    vcompress.vm v8, v9, v0
744; CHECK-NEXT:    ret
745entry:
746  %a = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
747    <vscale x 1 x double> %0,
748    <vscale x 1 x double> %1,
749    <vscale x 1 x i1> %2,
750    iXLen %3)
751
752  ret <vscale x 1 x double> %a
753}
754
755declare <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
756  <vscale x 2 x double>,
757  <vscale x 2 x double>,
758  <vscale x 2 x i1>,
759  iXLen);
760
761define <vscale x 2 x double> @intrinsic_vcompress_vm_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
762; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f64_nxv2f64:
763; CHECK:       # %bb.0: # %entry
764; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
765; CHECK-NEXT:    vcompress.vm v8, v10, v0
766; CHECK-NEXT:    ret
767entry:
768  %a = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
769    <vscale x 2 x double> %0,
770    <vscale x 2 x double> %1,
771    <vscale x 2 x i1> %2,
772    iXLen %3)
773
774  ret <vscale x 2 x double> %a
775}
776
777declare <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
778  <vscale x 4 x double>,
779  <vscale x 4 x double>,
780  <vscale x 4 x i1>,
781  iXLen);
782
783define <vscale x 4 x double> @intrinsic_vcompress_vm_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
784; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f64_nxv4f64:
785; CHECK:       # %bb.0: # %entry
786; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
787; CHECK-NEXT:    vcompress.vm v8, v12, v0
788; CHECK-NEXT:    ret
789entry:
790  %a = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
791    <vscale x 4 x double> %0,
792    <vscale x 4 x double> %1,
793    <vscale x 4 x i1> %2,
794    iXLen %3)
795
796  ret <vscale x 4 x double> %a
797}
798
799declare <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
800  <vscale x 8 x double>,
801  <vscale x 8 x double>,
802  <vscale x 8 x i1>,
803  iXLen);
804
805define <vscale x 8 x double> @intrinsic_vcompress_vm_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
806; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f64_nxv8f64:
807; CHECK:       # %bb.0: # %entry
808; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
809; CHECK-NEXT:    vcompress.vm v8, v16, v0
810; CHECK-NEXT:    ret
811entry:
812  %a = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
813    <vscale x 8 x double> %0,
814    <vscale x 8 x double> %1,
815    <vscale x 8 x i1> %2,
816    iXLen %3)
817
818  ret <vscale x 8 x double> %a
819}
820
821declare <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16(
822  <vscale x 1 x bfloat>,
823  <vscale x 1 x bfloat>,
824  <vscale x 1 x i1>,
825  iXLen);
826
827define <vscale x 1 x bfloat> @intrinsic_vcompress_vm_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
828; CHECK-LABEL: intrinsic_vcompress_vm_nxv1bf16:
829; CHECK:       # %bb.0: # %entry
830; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
831; CHECK-NEXT:    vcompress.vm v8, v9, v0
832; CHECK-NEXT:    ret
833entry:
834  %a = call <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16(
835    <vscale x 1 x bfloat> %0,
836    <vscale x 1 x bfloat> %1,
837    <vscale x 1 x i1> %2,
838    iXLen %3)
839
840  ret <vscale x 1 x bfloat> %a
841}
842
843declare <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16(
844  <vscale x 2 x bfloat>,
845  <vscale x 2 x bfloat>,
846  <vscale x 2 x i1>,
847  iXLen);
848
849define <vscale x 2 x bfloat> @intrinsic_vcompress_vm_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
850; CHECK-LABEL: intrinsic_vcompress_vm_nxv2bf16:
851; CHECK:       # %bb.0: # %entry
852; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
853; CHECK-NEXT:    vcompress.vm v8, v9, v0
854; CHECK-NEXT:    ret
855entry:
856  %a = call <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16(
857    <vscale x 2 x bfloat> %0,
858    <vscale x 2 x bfloat> %1,
859    <vscale x 2 x i1> %2,
860    iXLen %3)
861
862  ret <vscale x 2 x bfloat> %a
863}
864
865declare <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16(
866  <vscale x 4 x bfloat>,
867  <vscale x 4 x bfloat>,
868  <vscale x 4 x i1>,
869  iXLen);
870
871define <vscale x 4 x bfloat> @intrinsic_vcompress_vm_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
872; CHECK-LABEL: intrinsic_vcompress_vm_nxv4bf16:
873; CHECK:       # %bb.0: # %entry
874; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
875; CHECK-NEXT:    vcompress.vm v8, v9, v0
876; CHECK-NEXT:    ret
877entry:
878  %a = call <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16(
879    <vscale x 4 x bfloat> %0,
880    <vscale x 4 x bfloat> %1,
881    <vscale x 4 x i1> %2,
882    iXLen %3)
883
884  ret <vscale x 4 x bfloat> %a
885}
886
887declare <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16(
888  <vscale x 8 x bfloat>,
889  <vscale x 8 x bfloat>,
890  <vscale x 8 x i1>,
891  iXLen);
892
893define <vscale x 8 x bfloat> @intrinsic_vcompress_vm_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
894; CHECK-LABEL: intrinsic_vcompress_vm_nxv8bf16:
895; CHECK:       # %bb.0: # %entry
896; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
897; CHECK-NEXT:    vcompress.vm v8, v10, v0
898; CHECK-NEXT:    ret
899entry:
900  %a = call <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16(
901    <vscale x 8 x bfloat> %0,
902    <vscale x 8 x bfloat> %1,
903    <vscale x 8 x i1> %2,
904    iXLen %3)
905
906  ret <vscale x 8 x bfloat> %a
907}
908
909declare <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16(
910  <vscale x 16 x bfloat>,
911  <vscale x 16 x bfloat>,
912  <vscale x 16 x i1>,
913  iXLen);
914
915define <vscale x 16 x bfloat> @intrinsic_vcompress_vm_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
916; CHECK-LABEL: intrinsic_vcompress_vm_nxv16bf16:
917; CHECK:       # %bb.0: # %entry
918; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
919; CHECK-NEXT:    vcompress.vm v8, v12, v0
920; CHECK-NEXT:    ret
921entry:
922  %a = call <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16(
923    <vscale x 16 x bfloat> %0,
924    <vscale x 16 x bfloat> %1,
925    <vscale x 16 x i1> %2,
926    iXLen %3)
927
928  ret <vscale x 16 x bfloat> %a
929}
930
931declare <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16(
932  <vscale x 32 x bfloat>,
933  <vscale x 32 x bfloat>,
934  <vscale x 32 x i1>,
935  iXLen);
936
937define <vscale x 32 x bfloat> @intrinsic_vcompress_vm_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
938; CHECK-LABEL: intrinsic_vcompress_vm_nxv32bf16:
939; CHECK:       # %bb.0: # %entry
940; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
941; CHECK-NEXT:    vcompress.vm v8, v16, v0
942; CHECK-NEXT:    ret
943entry:
944  %a = call <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16(
945    <vscale x 32 x bfloat> %0,
946    <vscale x 32 x bfloat> %1,
947    <vscale x 32 x i1> %2,
948    iXLen %3)
949
950  ret <vscale x 32 x bfloat> %a
951}
952
953