xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vector-compress.ll (revision 96f5c683500eb2d7f7c3984e3a056315c50c4662)
1b799cc34SPengcheng Wang; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2*96f5c683SLuke Lau; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfh,+zvfbfmin < %s | FileCheck %s
3*96f5c683SLuke Lau; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh,+zvfbfmin < %s | FileCheck %s
4*96f5c683SLuke Lau; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfhmin,+zvfbfmin < %s | FileCheck %s
5*96f5c683SLuke Lau; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfhmin,+zvfbfmin < %s | FileCheck %s
6b799cc34SPengcheng Wang
7b799cc34SPengcheng Wang; Vector compress for i8 type
8b799cc34SPengcheng Wang
9b799cc34SPengcheng Wangdefine <vscale x 1 x i8> @vector_compress_nxv1i8(<vscale x 1 x i8> %data, <vscale x 1 x i1> %mask) {
10b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1i8:
11b799cc34SPengcheng Wang; CHECK:       # %bb.0:
12b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
13b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
14b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
15b799cc34SPengcheng Wang; CHECK-NEXT:    ret
16b799cc34SPengcheng Wang  %ret = call <vscale x 1 x i8> @llvm.experimental.vector.compress.nxv1i8(<vscale x 1 x i8> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i8> undef)
17b799cc34SPengcheng Wang  ret <vscale x 1 x i8> %ret
18b799cc34SPengcheng Wang}
19b799cc34SPengcheng Wang
20b799cc34SPengcheng Wangdefine <vscale x 1 x i8> @vector_compress_nxv1i8_passthru(<vscale x 1 x i8> %passthru, <vscale x 1 x i8> %data, <vscale x 1 x i1> %mask) {
21b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1i8_passthru:
22b799cc34SPengcheng Wang; CHECK:       # %bb.0:
23b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, tu, ma
24b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
25b799cc34SPengcheng Wang; CHECK-NEXT:    ret
26b799cc34SPengcheng Wang  %ret = call <vscale x 1 x i8> @llvm.experimental.vector.compress.nxv1i8(<vscale x 1 x i8> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i8> %passthru)
27b799cc34SPengcheng Wang  ret <vscale x 1 x i8> %ret
28b799cc34SPengcheng Wang}
29b799cc34SPengcheng Wang
30b799cc34SPengcheng Wangdefine <vscale x 2 x i8> @vector_compress_nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i1> %mask) {
31b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2i8:
32b799cc34SPengcheng Wang; CHECK:       # %bb.0:
33b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
34b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
35b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
36b799cc34SPengcheng Wang; CHECK-NEXT:    ret
37b799cc34SPengcheng Wang  %ret = call <vscale x 2 x i8> @llvm.experimental.vector.compress.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
38b799cc34SPengcheng Wang  ret <vscale x 2 x i8> %ret
39b799cc34SPengcheng Wang}
40b799cc34SPengcheng Wang
41b799cc34SPengcheng Wangdefine <vscale x 2 x i8> @vector_compress_nxv2i8_passthru(<vscale x 2 x i8> %passthru, <vscale x 2 x i8> %data, <vscale x 2 x i1> %mask) {
42b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2i8_passthru:
43b799cc34SPengcheng Wang; CHECK:       # %bb.0:
44b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, tu, ma
45b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
46b799cc34SPengcheng Wang; CHECK-NEXT:    ret
47b799cc34SPengcheng Wang  %ret = call <vscale x 2 x i8> @llvm.experimental.vector.compress.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %passthru)
48b799cc34SPengcheng Wang  ret <vscale x 2 x i8> %ret
49b799cc34SPengcheng Wang}
50b799cc34SPengcheng Wang
51b799cc34SPengcheng Wangdefine <vscale x 4 x i8> @vector_compress_nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i1> %mask) {
52b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4i8:
53b799cc34SPengcheng Wang; CHECK:       # %bb.0:
54b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
55b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
56b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
57b799cc34SPengcheng Wang; CHECK-NEXT:    ret
58b799cc34SPengcheng Wang  %ret = call <vscale x 4 x i8> @llvm.experimental.vector.compress.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
59b799cc34SPengcheng Wang  ret <vscale x 4 x i8> %ret
60b799cc34SPengcheng Wang}
61b799cc34SPengcheng Wang
62b799cc34SPengcheng Wangdefine <vscale x 4 x i8> @vector_compress_nxv4i8_passthru(<vscale x 4 x i8> %passthru, <vscale x 4 x i8> %data, <vscale x 4 x i1> %mask) {
63b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4i8_passthru:
64b799cc34SPengcheng Wang; CHECK:       # %bb.0:
65b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, tu, ma
66b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
67b799cc34SPengcheng Wang; CHECK-NEXT:    ret
68b799cc34SPengcheng Wang  %ret = call <vscale x 4 x i8> @llvm.experimental.vector.compress.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i8> %passthru)
69b799cc34SPengcheng Wang  ret <vscale x 4 x i8> %ret
70b799cc34SPengcheng Wang}
71b799cc34SPengcheng Wang
72b799cc34SPengcheng Wangdefine <vscale x 8 x i8> @vector_compress_nxv8i8(<vscale x 8 x i8> %data, <vscale x 8 x i1> %mask) {
73b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8i8:
74b799cc34SPengcheng Wang; CHECK:       # %bb.0:
75b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
76b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
77b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v9
78b799cc34SPengcheng Wang; CHECK-NEXT:    ret
79b799cc34SPengcheng Wang  %ret = call <vscale x 8 x i8> @llvm.experimental.vector.compress.nxv8i8(<vscale x 8 x i8> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
80b799cc34SPengcheng Wang  ret <vscale x 8 x i8> %ret
81b799cc34SPengcheng Wang}
82b799cc34SPengcheng Wang
83b799cc34SPengcheng Wangdefine <vscale x 8 x i8> @vector_compress_nxv8i8_passthru(<vscale x 8 x i8> %passthru, <vscale x 8 x i8> %data, <vscale x 8 x i1> %mask) {
84b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8i8_passthru:
85b799cc34SPengcheng Wang; CHECK:       # %bb.0:
86b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, m1, tu, ma
87b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
88b799cc34SPengcheng Wang; CHECK-NEXT:    ret
89b799cc34SPengcheng Wang  %ret = call <vscale x 8 x i8> @llvm.experimental.vector.compress.nxv8i8(<vscale x 8 x i8> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i8> %passthru)
90b799cc34SPengcheng Wang  ret <vscale x 8 x i8> %ret
91b799cc34SPengcheng Wang}
92b799cc34SPengcheng Wang
93b799cc34SPengcheng Wangdefine <vscale x 16 x i8> @vector_compress_nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask) {
94b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv16i8:
95b799cc34SPengcheng Wang; CHECK:       # %bb.0:
96b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
97b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v10, v8, v0
98b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v10
99b799cc34SPengcheng Wang; CHECK-NEXT:    ret
100b799cc34SPengcheng Wang  %ret = call <vscale x 16 x i8> @llvm.experimental.vector.compress.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
101b799cc34SPengcheng Wang  ret <vscale x 16 x i8> %ret
102b799cc34SPengcheng Wang}
103b799cc34SPengcheng Wang
104b799cc34SPengcheng Wangdefine <vscale x 16 x i8> @vector_compress_nxv16i8_passthru(<vscale x 16 x i8> %passthru, <vscale x 16 x i8> %data, <vscale x 16 x i1> %mask) {
105b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv16i8_passthru:
106b799cc34SPengcheng Wang; CHECK:       # %bb.0:
107b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, m2, tu, ma
108b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v10, v0
109b799cc34SPengcheng Wang; CHECK-NEXT:    ret
110b799cc34SPengcheng Wang  %ret = call <vscale x 16 x i8> @llvm.experimental.vector.compress.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, <vscale x 16 x i8> %passthru)
111b799cc34SPengcheng Wang  ret <vscale x 16 x i8> %ret
112b799cc34SPengcheng Wang}
113b799cc34SPengcheng Wang
114b799cc34SPengcheng Wangdefine <vscale x 32 x i8> @vector_compress_nxv32i8(<vscale x 32 x i8> %data, <vscale x 32 x i1> %mask) {
115b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv32i8:
116b799cc34SPengcheng Wang; CHECK:       # %bb.0:
117b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
118b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v12, v8, v0
119b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v12
120b799cc34SPengcheng Wang; CHECK-NEXT:    ret
121b799cc34SPengcheng Wang  %ret = call <vscale x 32 x i8> @llvm.experimental.vector.compress.nxv32i8(<vscale x 32 x i8> %data, <vscale x 32 x i1> %mask, <vscale x 32 x i8> undef)
122b799cc34SPengcheng Wang  ret <vscale x 32 x i8> %ret
123b799cc34SPengcheng Wang}
124b799cc34SPengcheng Wang
125b799cc34SPengcheng Wangdefine <vscale x 32 x i8> @vector_compress_nxv32i8_passthru(<vscale x 32 x i8> %passthru, <vscale x 32 x i8> %data, <vscale x 32 x i1> %mask) {
126b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv32i8_passthru:
127b799cc34SPengcheng Wang; CHECK:       # %bb.0:
128b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, m4, tu, ma
129b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v12, v0
130b799cc34SPengcheng Wang; CHECK-NEXT:    ret
131b799cc34SPengcheng Wang  %ret = call <vscale x 32 x i8> @llvm.experimental.vector.compress.nxv32i8(<vscale x 32 x i8> %data, <vscale x 32 x i1> %mask, <vscale x 32 x i8> %passthru)
132b799cc34SPengcheng Wang  ret <vscale x 32 x i8> %ret
133b799cc34SPengcheng Wang}
134b799cc34SPengcheng Wang
135b799cc34SPengcheng Wangdefine <vscale x 64 x i8> @vector_compress_nxv64i8(<vscale x 64 x i8> %data, <vscale x 64 x i1> %mask) {
136b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv64i8:
137b799cc34SPengcheng Wang; CHECK:       # %bb.0:
138b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
139b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v16, v8, v0
140b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v16
141b799cc34SPengcheng Wang; CHECK-NEXT:    ret
142b799cc34SPengcheng Wang  %ret = call <vscale x 64 x i8> @llvm.experimental.vector.compress.nxv64i8(<vscale x 64 x i8> %data, <vscale x 64 x i1> %mask, <vscale x 64 x i8> undef)
143b799cc34SPengcheng Wang  ret <vscale x 64 x i8> %ret
144b799cc34SPengcheng Wang}
145b799cc34SPengcheng Wang
146b799cc34SPengcheng Wangdefine <vscale x 64 x i8> @vector_compress_nxv64i8_passthru(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %data, <vscale x 64 x i1> %mask) {
147b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv64i8_passthru:
148b799cc34SPengcheng Wang; CHECK:       # %bb.0:
149b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e8, m8, tu, ma
150b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v16, v0
151b799cc34SPengcheng Wang; CHECK-NEXT:    ret
152b799cc34SPengcheng Wang  %ret = call <vscale x 64 x i8> @llvm.experimental.vector.compress.nxv64i8(<vscale x 64 x i8> %data, <vscale x 64 x i1> %mask, <vscale x 64 x i8> %passthru)
153b799cc34SPengcheng Wang  ret <vscale x 64 x i8> %ret
154b799cc34SPengcheng Wang}
155b799cc34SPengcheng Wang
156b799cc34SPengcheng Wang; Vector compress for i16 type
157b799cc34SPengcheng Wang
158b799cc34SPengcheng Wangdefine <vscale x 1 x i16> @vector_compress_nxv1i16(<vscale x 1 x i16> %data, <vscale x 1 x i1> %mask) {
159b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1i16:
160b799cc34SPengcheng Wang; CHECK:       # %bb.0:
161b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
162b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
163b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
164b799cc34SPengcheng Wang; CHECK-NEXT:    ret
165b799cc34SPengcheng Wang  %ret = call <vscale x 1 x i16> @llvm.experimental.vector.compress.nxv1i16(<vscale x 1 x i16> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i16> undef)
166b799cc34SPengcheng Wang  ret <vscale x 1 x i16> %ret
167b799cc34SPengcheng Wang}
168b799cc34SPengcheng Wang
169b799cc34SPengcheng Wangdefine <vscale x 1 x i16> @vector_compress_nxv1i16_passthru(<vscale x 1 x i16> %passthru, <vscale x 1 x i16> %data, <vscale x 1 x i1> %mask) {
170b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1i16_passthru:
171b799cc34SPengcheng Wang; CHECK:       # %bb.0:
172b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, tu, ma
173b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
174b799cc34SPengcheng Wang; CHECK-NEXT:    ret
175b799cc34SPengcheng Wang  %ret = call <vscale x 1 x i16> @llvm.experimental.vector.compress.nxv1i16(<vscale x 1 x i16> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i16> %passthru)
176b799cc34SPengcheng Wang  ret <vscale x 1 x i16> %ret
177b799cc34SPengcheng Wang}
178b799cc34SPengcheng Wang
179b799cc34SPengcheng Wangdefine <vscale x 2 x i16> @vector_compress_nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i1> %mask) {
180b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2i16:
181b799cc34SPengcheng Wang; CHECK:       # %bb.0:
182b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
183b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
184b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
185b799cc34SPengcheng Wang; CHECK-NEXT:    ret
186b799cc34SPengcheng Wang  %ret = call <vscale x 2 x i16> @llvm.experimental.vector.compress.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
187b799cc34SPengcheng Wang  ret <vscale x 2 x i16> %ret
188b799cc34SPengcheng Wang}
189b799cc34SPengcheng Wang
190b799cc34SPengcheng Wangdefine <vscale x 2 x i16> @vector_compress_nxv2i16_passthru(<vscale x 2 x i16> %passthru, <vscale x 2 x i16> %data, <vscale x 2 x i1> %mask) {
191b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2i16_passthru:
192b799cc34SPengcheng Wang; CHECK:       # %bb.0:
193b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, tu, ma
194b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
195b799cc34SPengcheng Wang; CHECK-NEXT:    ret
196b799cc34SPengcheng Wang  %ret = call <vscale x 2 x i16> @llvm.experimental.vector.compress.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i16> %passthru)
197b799cc34SPengcheng Wang  ret <vscale x 2 x i16> %ret
198b799cc34SPengcheng Wang}
199b799cc34SPengcheng Wang
200b799cc34SPengcheng Wangdefine <vscale x 4 x i16> @vector_compress_nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x i1> %mask) {
201b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4i16:
202b799cc34SPengcheng Wang; CHECK:       # %bb.0:
203b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
204b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
205b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v9
206b799cc34SPengcheng Wang; CHECK-NEXT:    ret
207b799cc34SPengcheng Wang  %ret = call <vscale x 4 x i16> @llvm.experimental.vector.compress.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
208b799cc34SPengcheng Wang  ret <vscale x 4 x i16> %ret
209b799cc34SPengcheng Wang}
210b799cc34SPengcheng Wang
211b799cc34SPengcheng Wangdefine <vscale x 4 x i16> @vector_compress_nxv4i16_passthru(<vscale x 4 x i16> %passthru, <vscale x 4 x i16> %data, <vscale x 4 x i1> %mask) {
212b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4i16_passthru:
213b799cc34SPengcheng Wang; CHECK:       # %bb.0:
214b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
215b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
216b799cc34SPengcheng Wang; CHECK-NEXT:    ret
217b799cc34SPengcheng Wang  %ret = call <vscale x 4 x i16> @llvm.experimental.vector.compress.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i16> %passthru)
218b799cc34SPengcheng Wang  ret <vscale x 4 x i16> %ret
219b799cc34SPengcheng Wang}
220b799cc34SPengcheng Wang
221b799cc34SPengcheng Wangdefine <vscale x 8 x i16> @vector_compress_nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask) {
222b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8i16:
223b799cc34SPengcheng Wang; CHECK:       # %bb.0:
224b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
225b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v10, v8, v0
226b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v10
227b799cc34SPengcheng Wang; CHECK-NEXT:    ret
228b799cc34SPengcheng Wang  %ret = call <vscale x 8 x i16> @llvm.experimental.vector.compress.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
229b799cc34SPengcheng Wang  ret <vscale x 8 x i16> %ret
230b799cc34SPengcheng Wang}
231b799cc34SPengcheng Wang
232b799cc34SPengcheng Wangdefine <vscale x 8 x i16> @vector_compress_nxv8i16_passthru(<vscale x 8 x i16> %passthru, <vscale x 8 x i16> %data, <vscale x 8 x i1> %mask) {
233b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8i16_passthru:
234b799cc34SPengcheng Wang; CHECK:       # %bb.0:
235b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m2, tu, ma
236b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v10, v0
237b799cc34SPengcheng Wang; CHECK-NEXT:    ret
238b799cc34SPengcheng Wang  %ret = call <vscale x 8 x i16> @llvm.experimental.vector.compress.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i16> %passthru)
239b799cc34SPengcheng Wang  ret <vscale x 8 x i16> %ret
240b799cc34SPengcheng Wang}
241b799cc34SPengcheng Wang
242b799cc34SPengcheng Wangdefine <vscale x 16 x i16> @vector_compress_nxv16i16(<vscale x 16 x i16> %data, <vscale x 16 x i1> %mask) {
243b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv16i16:
244b799cc34SPengcheng Wang; CHECK:       # %bb.0:
245b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
246b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v12, v8, v0
247b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v12
248b799cc34SPengcheng Wang; CHECK-NEXT:    ret
249b799cc34SPengcheng Wang  %ret = call <vscale x 16 x i16> @llvm.experimental.vector.compress.nxv16i16(<vscale x 16 x i16> %data, <vscale x 16 x i1> %mask, <vscale x 16 x i16> undef)
250b799cc34SPengcheng Wang  ret <vscale x 16 x i16> %ret
251b799cc34SPengcheng Wang}
252b799cc34SPengcheng Wang
253b799cc34SPengcheng Wangdefine <vscale x 16 x i16> @vector_compress_nxv16i16_passthru(<vscale x 16 x i16> %passthru, <vscale x 16 x i16> %data, <vscale x 16 x i1> %mask) {
254b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv16i16_passthru:
255b799cc34SPengcheng Wang; CHECK:       # %bb.0:
256b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m4, tu, ma
257b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v12, v0
258b799cc34SPengcheng Wang; CHECK-NEXT:    ret
259b799cc34SPengcheng Wang  %ret = call <vscale x 16 x i16> @llvm.experimental.vector.compress.nxv16i16(<vscale x 16 x i16> %data, <vscale x 16 x i1> %mask, <vscale x 16 x i16> %passthru)
260b799cc34SPengcheng Wang  ret <vscale x 16 x i16> %ret
261b799cc34SPengcheng Wang}
262b799cc34SPengcheng Wang
263b799cc34SPengcheng Wangdefine <vscale x 32 x i16> @vector_compress_nxv32i16(<vscale x 32 x i16> %data, <vscale x 32 x i1> %mask) {
264b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv32i16:
265b799cc34SPengcheng Wang; CHECK:       # %bb.0:
266b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
267b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v16, v8, v0
268b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v16
269b799cc34SPengcheng Wang; CHECK-NEXT:    ret
270b799cc34SPengcheng Wang  %ret = call <vscale x 32 x i16> @llvm.experimental.vector.compress.nxv32i16(<vscale x 32 x i16> %data, <vscale x 32 x i1> %mask, <vscale x 32 x i16> undef)
271b799cc34SPengcheng Wang  ret <vscale x 32 x i16> %ret
272b799cc34SPengcheng Wang}
273b799cc34SPengcheng Wang
274b799cc34SPengcheng Wangdefine <vscale x 32 x i16> @vector_compress_nxv32i16_passthru(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %data, <vscale x 32 x i1> %mask) {
275b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv32i16_passthru:
276b799cc34SPengcheng Wang; CHECK:       # %bb.0:
277b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m8, tu, ma
278b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v16, v0
279b799cc34SPengcheng Wang; CHECK-NEXT:    ret
280b799cc34SPengcheng Wang  %ret = call <vscale x 32 x i16> @llvm.experimental.vector.compress.nxv32i16(<vscale x 32 x i16> %data, <vscale x 32 x i1> %mask, <vscale x 32 x i16> %passthru)
281b799cc34SPengcheng Wang  ret <vscale x 32 x i16> %ret
282b799cc34SPengcheng Wang}
283b799cc34SPengcheng Wang
284b799cc34SPengcheng Wang; Vector compress for i32 type
285b799cc34SPengcheng Wang
286b799cc34SPengcheng Wangdefine <vscale x 1 x i32> @vector_compress_nxv1i32(<vscale x 1 x i32> %data, <vscale x 1 x i1> %mask) {
287b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1i32:
288b799cc34SPengcheng Wang; CHECK:       # %bb.0:
289b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
290b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
291b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
292b799cc34SPengcheng Wang; CHECK-NEXT:    ret
293b799cc34SPengcheng Wang  %ret = call <vscale x 1 x i32> @llvm.experimental.vector.compress.nxv1i32(<vscale x 1 x i32> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i32> undef)
294b799cc34SPengcheng Wang  ret <vscale x 1 x i32> %ret
295b799cc34SPengcheng Wang}
296b799cc34SPengcheng Wang
297b799cc34SPengcheng Wangdefine <vscale x 1 x i32> @vector_compress_nxv1i32_passthru(<vscale x 1 x i32> %passthru, <vscale x 1 x i32> %data, <vscale x 1 x i1> %mask) {
298b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1i32_passthru:
299b799cc34SPengcheng Wang; CHECK:       # %bb.0:
300b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, tu, ma
301b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
302b799cc34SPengcheng Wang; CHECK-NEXT:    ret
303b799cc34SPengcheng Wang  %ret = call <vscale x 1 x i32> @llvm.experimental.vector.compress.nxv1i32(<vscale x 1 x i32> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i32> %passthru)
304b799cc34SPengcheng Wang  ret <vscale x 1 x i32> %ret
305b799cc34SPengcheng Wang}
306b799cc34SPengcheng Wang
307b799cc34SPengcheng Wangdefine <vscale x 2 x i32> @vector_compress_nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i1> %mask) {
308b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2i32:
309b799cc34SPengcheng Wang; CHECK:       # %bb.0:
310b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
311b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
312b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v9
313b799cc34SPengcheng Wang; CHECK-NEXT:    ret
314b799cc34SPengcheng Wang  %ret = call <vscale x 2 x i32> @llvm.experimental.vector.compress.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
315b799cc34SPengcheng Wang  ret <vscale x 2 x i32> %ret
316b799cc34SPengcheng Wang}
317b799cc34SPengcheng Wang
318b799cc34SPengcheng Wangdefine <vscale x 2 x i32> @vector_compress_nxv2i32_passthru(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %data, <vscale x 2 x i1> %mask) {
319b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2i32_passthru:
320b799cc34SPengcheng Wang; CHECK:       # %bb.0:
321b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, ma
322b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
323b799cc34SPengcheng Wang; CHECK-NEXT:    ret
324b799cc34SPengcheng Wang  %ret = call <vscale x 2 x i32> @llvm.experimental.vector.compress.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru)
325b799cc34SPengcheng Wang  ret <vscale x 2 x i32> %ret
326b799cc34SPengcheng Wang}
327b799cc34SPengcheng Wang
328b799cc34SPengcheng Wangdefine <vscale x 4 x i32> @vector_compress_nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask) {
329b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4i32:
330b799cc34SPengcheng Wang; CHECK:       # %bb.0:
331b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
332b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v10, v8, v0
333b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v10
334b799cc34SPengcheng Wang; CHECK-NEXT:    ret
335b799cc34SPengcheng Wang  %ret = call <vscale x 4 x i32> @llvm.experimental.vector.compress.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
336b799cc34SPengcheng Wang  ret <vscale x 4 x i32> %ret
337b799cc34SPengcheng Wang}
338b799cc34SPengcheng Wang
339b799cc34SPengcheng Wangdefine <vscale x 4 x i32> @vector_compress_nxv4i32_passthru(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %data, <vscale x 4 x i1> %mask) {
340b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4i32_passthru:
341b799cc34SPengcheng Wang; CHECK:       # %bb.0:
342b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m2, tu, ma
343b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v10, v0
344b799cc34SPengcheng Wang; CHECK-NEXT:    ret
345b799cc34SPengcheng Wang  %ret = call <vscale x 4 x i32> @llvm.experimental.vector.compress.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %passthru)
346b799cc34SPengcheng Wang  ret <vscale x 4 x i32> %ret
347b799cc34SPengcheng Wang}
348b799cc34SPengcheng Wang
349b799cc34SPengcheng Wangdefine <vscale x 8 x i32> @vector_compress_nxv8i32(<vscale x 8 x i32> %data, <vscale x 8 x i1> %mask) {
350b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8i32:
351b799cc34SPengcheng Wang; CHECK:       # %bb.0:
352b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
353b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v12, v8, v0
354b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v12
355b799cc34SPengcheng Wang; CHECK-NEXT:    ret
356b799cc34SPengcheng Wang  %ret = call <vscale x 8 x i32> @llvm.experimental.vector.compress.nxv8i32(<vscale x 8 x i32> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i32> undef)
357b799cc34SPengcheng Wang  ret <vscale x 8 x i32> %ret
358b799cc34SPengcheng Wang}
359b799cc34SPengcheng Wang
360b799cc34SPengcheng Wangdefine <vscale x 8 x i32> @vector_compress_nxv8i32_passthru(<vscale x 8 x i32> %passthru, <vscale x 8 x i32> %data, <vscale x 8 x i1> %mask) {
361b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8i32_passthru:
362b799cc34SPengcheng Wang; CHECK:       # %bb.0:
363b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m4, tu, ma
364b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v12, v0
365b799cc34SPengcheng Wang; CHECK-NEXT:    ret
366b799cc34SPengcheng Wang  %ret = call <vscale x 8 x i32> @llvm.experimental.vector.compress.nxv8i32(<vscale x 8 x i32> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i32> %passthru)
367b799cc34SPengcheng Wang  ret <vscale x 8 x i32> %ret
368b799cc34SPengcheng Wang}
369b799cc34SPengcheng Wang
370b799cc34SPengcheng Wangdefine <vscale x 16 x i32> @vector_compress_nxv16i32(<vscale x 16 x i32> %data, <vscale x 16 x i1> %mask) {
371b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv16i32:
372b799cc34SPengcheng Wang; CHECK:       # %bb.0:
373b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
374b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v16, v8, v0
375b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v16
376b799cc34SPengcheng Wang; CHECK-NEXT:    ret
377b799cc34SPengcheng Wang  %ret = call <vscale x 16 x i32> @llvm.experimental.vector.compress.nxv16i32(<vscale x 16 x i32> %data, <vscale x 16 x i1> %mask, <vscale x 16 x i32> undef)
378b799cc34SPengcheng Wang  ret <vscale x 16 x i32> %ret
379b799cc34SPengcheng Wang}
380b799cc34SPengcheng Wang
381b799cc34SPengcheng Wangdefine <vscale x 16 x i32> @vector_compress_nxv16i32_passthru(<vscale x 16 x i32> %passthru, <vscale x 16 x i32> %data, <vscale x 16 x i1> %mask) {
382b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv16i32_passthru:
383b799cc34SPengcheng Wang; CHECK:       # %bb.0:
384b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m8, tu, ma
385b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v16, v0
386b799cc34SPengcheng Wang; CHECK-NEXT:    ret
387b799cc34SPengcheng Wang  %ret = call <vscale x 16 x i32> @llvm.experimental.vector.compress.nxv16i32(<vscale x 16 x i32> %data, <vscale x 16 x i1> %mask, <vscale x 16 x i32> %passthru)
388b799cc34SPengcheng Wang  ret <vscale x 16 x i32> %ret
389b799cc34SPengcheng Wang}
390b799cc34SPengcheng Wang
391b799cc34SPengcheng Wang; Vector compress for i64 type
392b799cc34SPengcheng Wang
393b799cc34SPengcheng Wangdefine <vscale x 1 x i64> @vector_compress_nxv1i64(<vscale x 1 x i64> %data, <vscale x 1 x i1> %mask) {
394b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1i64:
395b799cc34SPengcheng Wang; CHECK:       # %bb.0:
396b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
397b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
398b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v9
399b799cc34SPengcheng Wang; CHECK-NEXT:    ret
400b799cc34SPengcheng Wang  %ret = call <vscale x 1 x i64> @llvm.experimental.vector.compress.nxv1i64(<vscale x 1 x i64> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i64> undef)
401b799cc34SPengcheng Wang  ret <vscale x 1 x i64> %ret
402b799cc34SPengcheng Wang}
403b799cc34SPengcheng Wang
404b799cc34SPengcheng Wangdefine <vscale x 1 x i64> @vector_compress_nxv1i64_passthru(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %data, <vscale x 1 x i1> %mask) {
405b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1i64_passthru:
406b799cc34SPengcheng Wang; CHECK:       # %bb.0:
407b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, ma
408b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
409b799cc34SPengcheng Wang; CHECK-NEXT:    ret
410b799cc34SPengcheng Wang  %ret = call <vscale x 1 x i64> @llvm.experimental.vector.compress.nxv1i64(<vscale x 1 x i64> %data, <vscale x 1 x i1> %mask, <vscale x 1 x i64> %passthru)
411b799cc34SPengcheng Wang  ret <vscale x 1 x i64> %ret
412b799cc34SPengcheng Wang}
413b799cc34SPengcheng Wang
414b799cc34SPengcheng Wangdefine <vscale x 2 x i64> @vector_compress_nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask) {
415b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2i64:
416b799cc34SPengcheng Wang; CHECK:       # %bb.0:
417b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
418b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v10, v8, v0
419b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v10
420b799cc34SPengcheng Wang; CHECK-NEXT:    ret
421b799cc34SPengcheng Wang  %ret = call <vscale x 2 x i64> @llvm.experimental.vector.compress.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
422b799cc34SPengcheng Wang  ret <vscale x 2 x i64> %ret
423b799cc34SPengcheng Wang}
424b799cc34SPengcheng Wang
425b799cc34SPengcheng Wangdefine <vscale x 2 x i64> @vector_compress_nxv2i64_passthru(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %data, <vscale x 2 x i1> %mask) {
426b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2i64_passthru:
427b799cc34SPengcheng Wang; CHECK:       # %bb.0:
428b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m2, tu, ma
429b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v10, v0
430b799cc34SPengcheng Wang; CHECK-NEXT:    ret
431b799cc34SPengcheng Wang  %ret = call <vscale x 2 x i64> @llvm.experimental.vector.compress.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, <vscale x 2 x i64> %passthru)
432b799cc34SPengcheng Wang  ret <vscale x 2 x i64> %ret
433b799cc34SPengcheng Wang}
434b799cc34SPengcheng Wang
435b799cc34SPengcheng Wangdefine <vscale x 4 x i64> @vector_compress_nxv4i64(<vscale x 4 x i64> %data, <vscale x 4 x i1> %mask) {
436b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4i64:
437b799cc34SPengcheng Wang; CHECK:       # %bb.0:
438b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
439b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v12, v8, v0
440b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v12
441b799cc34SPengcheng Wang; CHECK-NEXT:    ret
442b799cc34SPengcheng Wang  %ret = call <vscale x 4 x i64> @llvm.experimental.vector.compress.nxv4i64(<vscale x 4 x i64> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i64> undef)
443b799cc34SPengcheng Wang  ret <vscale x 4 x i64> %ret
444b799cc34SPengcheng Wang}
445b799cc34SPengcheng Wang
446b799cc34SPengcheng Wangdefine <vscale x 4 x i64> @vector_compress_nxv4i64_passthru(<vscale x 4 x i64> %passthru, <vscale x 4 x i64> %data, <vscale x 4 x i1> %mask) {
447b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4i64_passthru:
448b799cc34SPengcheng Wang; CHECK:       # %bb.0:
449b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m4, tu, ma
450b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v12, v0
451b799cc34SPengcheng Wang; CHECK-NEXT:    ret
452b799cc34SPengcheng Wang  %ret = call <vscale x 4 x i64> @llvm.experimental.vector.compress.nxv4i64(<vscale x 4 x i64> %data, <vscale x 4 x i1> %mask, <vscale x 4 x i64> %passthru)
453b799cc34SPengcheng Wang  ret <vscale x 4 x i64> %ret
454b799cc34SPengcheng Wang}
455b799cc34SPengcheng Wang
456b799cc34SPengcheng Wangdefine <vscale x 8 x i64> @vector_compress_nxv8i64(<vscale x 8 x i64> %data, <vscale x 8 x i1> %mask) {
457b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8i64:
458b799cc34SPengcheng Wang; CHECK:       # %bb.0:
459b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
460b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v16, v8, v0
461b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v16
462b799cc34SPengcheng Wang; CHECK-NEXT:    ret
463b799cc34SPengcheng Wang  %ret = call <vscale x 8 x i64> @llvm.experimental.vector.compress.nxv8i64(<vscale x 8 x i64> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
464b799cc34SPengcheng Wang  ret <vscale x 8 x i64> %ret
465b799cc34SPengcheng Wang}
466b799cc34SPengcheng Wang
467b799cc34SPengcheng Wangdefine <vscale x 8 x i64> @vector_compress_nxv8i64_passthru(<vscale x 8 x i64> %passthru, <vscale x 8 x i64> %data, <vscale x 8 x i1> %mask) {
468b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8i64_passthru:
469b799cc34SPengcheng Wang; CHECK:       # %bb.0:
470b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m8, tu, ma
471b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v16, v0
472b799cc34SPengcheng Wang; CHECK-NEXT:    ret
473b799cc34SPengcheng Wang  %ret = call <vscale x 8 x i64> @llvm.experimental.vector.compress.nxv8i64(<vscale x 8 x i64> %data, <vscale x 8 x i1> %mask, <vscale x 8 x i64> %passthru)
474b799cc34SPengcheng Wang  ret <vscale x 8 x i64> %ret
475b799cc34SPengcheng Wang}
476b799cc34SPengcheng Wang
477*96f5c683SLuke Lau; Vector compress for bf16 type
478*96f5c683SLuke Lau
479*96f5c683SLuke Laudefine <vscale x 1 x bfloat> @vector_compress_nxv1bf16(<vscale x 1 x bfloat> %data, <vscale x 1 x i1> %mask) {
480*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_nxv1bf16:
481*96f5c683SLuke Lau; CHECK:       # %bb.0:
482*96f5c683SLuke Lau; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
483*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v9, v8, v0
484*96f5c683SLuke Lau; CHECK-NEXT:    vmv1r.v v8, v9
485*96f5c683SLuke Lau; CHECK-NEXT:    ret
486*96f5c683SLuke Lau  %ret = call <vscale x 1 x bfloat> @llvm.experimental.vector.compress.nxv1bf16(<vscale x 1 x bfloat> %data, <vscale x 1 x i1> %mask, <vscale x 1 x bfloat> undef)
487*96f5c683SLuke Lau  ret <vscale x 1 x bfloat> %ret
488*96f5c683SLuke Lau}
489*96f5c683SLuke Lau
490*96f5c683SLuke Laudefine <vscale x 1 x bfloat> @vector_compress_nxv1bf16_passthru(<vscale x 1 x bfloat> %passthru, <vscale x 1 x bfloat> %data, <vscale x 1 x i1> %mask) {
491*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_nxv1bf16_passthru:
492*96f5c683SLuke Lau; CHECK:       # %bb.0:
493*96f5c683SLuke Lau; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, tu, ma
494*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v8, v9, v0
495*96f5c683SLuke Lau; CHECK-NEXT:    ret
496*96f5c683SLuke Lau  %ret = call <vscale x 1 x bfloat> @llvm.experimental.vector.compress.nxv1bf16(<vscale x 1 x bfloat> %data, <vscale x 1 x i1> %mask, <vscale x 1 x bfloat> %passthru)
497*96f5c683SLuke Lau  ret <vscale x 1 x bfloat> %ret
498*96f5c683SLuke Lau}
499*96f5c683SLuke Lau
500*96f5c683SLuke Laudefine <vscale x 2 x bfloat> @vector_compress_nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x i1> %mask) {
501*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_nxv2bf16:
502*96f5c683SLuke Lau; CHECK:       # %bb.0:
503*96f5c683SLuke Lau; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
504*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v9, v8, v0
505*96f5c683SLuke Lau; CHECK-NEXT:    vmv1r.v v8, v9
506*96f5c683SLuke Lau; CHECK-NEXT:    ret
507*96f5c683SLuke Lau  %ret = call <vscale x 2 x bfloat> @llvm.experimental.vector.compress.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
508*96f5c683SLuke Lau  ret <vscale x 2 x bfloat> %ret
509*96f5c683SLuke Lau}
510*96f5c683SLuke Lau
511*96f5c683SLuke Laudefine <vscale x 2 x bfloat> @vector_compress_nxv2bf16_passthru(<vscale x 2 x bfloat> %passthru, <vscale x 2 x bfloat> %data, <vscale x 2 x i1> %mask) {
512*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_nxv2bf16_passthru:
513*96f5c683SLuke Lau; CHECK:       # %bb.0:
514*96f5c683SLuke Lau; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, tu, ma
515*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v8, v9, v0
516*96f5c683SLuke Lau; CHECK-NEXT:    ret
517*96f5c683SLuke Lau  %ret = call <vscale x 2 x bfloat> @llvm.experimental.vector.compress.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> %passthru)
518*96f5c683SLuke Lau  ret <vscale x 2 x bfloat> %ret
519*96f5c683SLuke Lau}
520*96f5c683SLuke Lau
521*96f5c683SLuke Laudefine <vscale x 4 x bfloat> @vector_compress_nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x i1> %mask) {
522*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_nxv4bf16:
523*96f5c683SLuke Lau; CHECK:       # %bb.0:
524*96f5c683SLuke Lau; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
525*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v9, v8, v0
526*96f5c683SLuke Lau; CHECK-NEXT:    vmv.v.v v8, v9
527*96f5c683SLuke Lau; CHECK-NEXT:    ret
528*96f5c683SLuke Lau  %ret = call <vscale x 4 x bfloat> @llvm.experimental.vector.compress.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
529*96f5c683SLuke Lau  ret <vscale x 4 x bfloat> %ret
530*96f5c683SLuke Lau}
531*96f5c683SLuke Lau
532*96f5c683SLuke Laudefine <vscale x 4 x bfloat> @vector_compress_nxv4bf16_passthru(<vscale x 4 x bfloat> %passthru, <vscale x 4 x bfloat> %data, <vscale x 4 x i1> %mask) {
533*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_nxv4bf16_passthru:
534*96f5c683SLuke Lau; CHECK:       # %bb.0:
535*96f5c683SLuke Lau; CHECK-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
536*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v8, v9, v0
537*96f5c683SLuke Lau; CHECK-NEXT:    ret
538*96f5c683SLuke Lau  %ret = call <vscale x 4 x bfloat> @llvm.experimental.vector.compress.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> %passthru)
539*96f5c683SLuke Lau  ret <vscale x 4 x bfloat> %ret
540*96f5c683SLuke Lau}
541*96f5c683SLuke Lau
542*96f5c683SLuke Laudefine <vscale x 8 x bfloat> @vector_compress_nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %mask) {
543*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_nxv8bf16:
544*96f5c683SLuke Lau; CHECK:       # %bb.0:
545*96f5c683SLuke Lau; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
546*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v10, v8, v0
547*96f5c683SLuke Lau; CHECK-NEXT:    vmv.v.v v8, v10
548*96f5c683SLuke Lau; CHECK-NEXT:    ret
549*96f5c683SLuke Lau  %ret = call <vscale x 8 x bfloat> @llvm.experimental.vector.compress.nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> undef)
550*96f5c683SLuke Lau  ret <vscale x 8 x bfloat> %ret
551*96f5c683SLuke Lau}
552*96f5c683SLuke Lau
553*96f5c683SLuke Laudefine <vscale x 8 x bfloat> @vector_compress_nxv8bf16_passthru(<vscale x 8 x bfloat> %passthru, <vscale x 8 x bfloat> %data, <vscale x 8 x i1> %mask) {
554*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_nxv8bf16_passthru:
555*96f5c683SLuke Lau; CHECK:       # %bb.0:
556*96f5c683SLuke Lau; CHECK-NEXT:    vsetvli a0, zero, e16, m2, tu, ma
557*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v8, v10, v0
558*96f5c683SLuke Lau; CHECK-NEXT:    ret
559*96f5c683SLuke Lau  %ret = call <vscale x 8 x bfloat> @llvm.experimental.vector.compress.nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> %passthru)
560*96f5c683SLuke Lau  ret <vscale x 8 x bfloat> %ret
561*96f5c683SLuke Lau}
562*96f5c683SLuke Lau
563*96f5c683SLuke Laudefine <vscale x 16 x bfloat> @vector_compress_nxv16bf16(<vscale x 16 x bfloat> %data, <vscale x 16 x i1> %mask) {
564*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_nxv16bf16:
565*96f5c683SLuke Lau; CHECK:       # %bb.0:
566*96f5c683SLuke Lau; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
567*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v12, v8, v0
568*96f5c683SLuke Lau; CHECK-NEXT:    vmv.v.v v8, v12
569*96f5c683SLuke Lau; CHECK-NEXT:    ret
570*96f5c683SLuke Lau  %ret = call <vscale x 16 x bfloat> @llvm.experimental.vector.compress.nxv16bf16(<vscale x 16 x bfloat> %data, <vscale x 16 x i1> %mask, <vscale x 16 x bfloat> undef)
571*96f5c683SLuke Lau  ret <vscale x 16 x bfloat> %ret
572*96f5c683SLuke Lau}
573*96f5c683SLuke Lau
574*96f5c683SLuke Laudefine <vscale x 16 x bfloat> @vector_compress_nxv16bf16_passthru(<vscale x 16 x bfloat> %passthru, <vscale x 16 x bfloat> %data, <vscale x 16 x i1> %mask) {
575*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_nxv16bf16_passthru:
576*96f5c683SLuke Lau; CHECK:       # %bb.0:
577*96f5c683SLuke Lau; CHECK-NEXT:    vsetvli a0, zero, e16, m4, tu, ma
578*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v8, v12, v0
579*96f5c683SLuke Lau; CHECK-NEXT:    ret
580*96f5c683SLuke Lau  %ret = call <vscale x 16 x bfloat> @llvm.experimental.vector.compress.nxv16bf16(<vscale x 16 x bfloat> %data, <vscale x 16 x i1> %mask, <vscale x 16 x bfloat> %passthru)
581*96f5c683SLuke Lau  ret <vscale x 16 x bfloat> %ret
582*96f5c683SLuke Lau}
583*96f5c683SLuke Lau
584*96f5c683SLuke Laudefine <vscale x 32 x bfloat> @vector_compress_nxv32bf16(<vscale x 32 x bfloat> %data, <vscale x 32 x i1> %mask) {
585*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_nxv32bf16:
586*96f5c683SLuke Lau; CHECK:       # %bb.0:
587*96f5c683SLuke Lau; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
588*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v16, v8, v0
589*96f5c683SLuke Lau; CHECK-NEXT:    vmv.v.v v8, v16
590*96f5c683SLuke Lau; CHECK-NEXT:    ret
591*96f5c683SLuke Lau  %ret = call <vscale x 32 x bfloat> @llvm.experimental.vector.compress.nxv32bf16(<vscale x 32 x bfloat> %data, <vscale x 32 x i1> %mask, <vscale x 32 x bfloat> undef)
592*96f5c683SLuke Lau  ret <vscale x 32 x bfloat> %ret
593*96f5c683SLuke Lau}
594*96f5c683SLuke Lau
595*96f5c683SLuke Laudefine <vscale x 32 x bfloat> @vector_compress_nxv32bf16_passthru(<vscale x 32 x bfloat> %passthru, <vscale x 32 x bfloat> %data, <vscale x 32 x i1> %mask) {
596*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_nxv32bf16_passthru:
597*96f5c683SLuke Lau; CHECK:       # %bb.0:
598*96f5c683SLuke Lau; CHECK-NEXT:    vsetvli a0, zero, e16, m8, tu, ma
599*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v8, v16, v0
600*96f5c683SLuke Lau; CHECK-NEXT:    ret
601*96f5c683SLuke Lau  %ret = call <vscale x 32 x bfloat> @llvm.experimental.vector.compress.nxv32bf16(<vscale x 32 x bfloat> %data, <vscale x 32 x i1> %mask, <vscale x 32 x bfloat> %passthru)
602*96f5c683SLuke Lau  ret <vscale x 32 x bfloat> %ret
603*96f5c683SLuke Lau}
604*96f5c683SLuke Lau
605b799cc34SPengcheng Wang; Vector compress for f16 type
606b799cc34SPengcheng Wang
607b799cc34SPengcheng Wangdefine <vscale x 1 x half> @vector_compress_nxv1f16(<vscale x 1 x half> %data, <vscale x 1 x i1> %mask) {
608b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1f16:
609b799cc34SPengcheng Wang; CHECK:       # %bb.0:
610b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
611b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
612b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
613b799cc34SPengcheng Wang; CHECK-NEXT:    ret
614b799cc34SPengcheng Wang  %ret = call <vscale x 1 x half> @llvm.experimental.vector.compress.nxv1f16(<vscale x 1 x half> %data, <vscale x 1 x i1> %mask, <vscale x 1 x half> undef)
615b799cc34SPengcheng Wang  ret <vscale x 1 x half> %ret
616b799cc34SPengcheng Wang}
617b799cc34SPengcheng Wang
618b799cc34SPengcheng Wangdefine <vscale x 1 x half> @vector_compress_nxv1f16_passthru(<vscale x 1 x half> %passthru, <vscale x 1 x half> %data, <vscale x 1 x i1> %mask) {
619b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1f16_passthru:
620b799cc34SPengcheng Wang; CHECK:       # %bb.0:
621b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, tu, ma
622b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
623b799cc34SPengcheng Wang; CHECK-NEXT:    ret
624b799cc34SPengcheng Wang  %ret = call <vscale x 1 x half> @llvm.experimental.vector.compress.nxv1f16(<vscale x 1 x half> %data, <vscale x 1 x i1> %mask, <vscale x 1 x half> %passthru)
625b799cc34SPengcheng Wang  ret <vscale x 1 x half> %ret
626b799cc34SPengcheng Wang}
627b799cc34SPengcheng Wang
628b799cc34SPengcheng Wangdefine <vscale x 2 x half> @vector_compress_nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x i1> %mask) {
629b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2f16:
630b799cc34SPengcheng Wang; CHECK:       # %bb.0:
631b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
632b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
633b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
634b799cc34SPengcheng Wang; CHECK-NEXT:    ret
635b799cc34SPengcheng Wang  %ret = call <vscale x 2 x half> @llvm.experimental.vector.compress.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
636b799cc34SPengcheng Wang  ret <vscale x 2 x half> %ret
637b799cc34SPengcheng Wang}
638b799cc34SPengcheng Wang
639b799cc34SPengcheng Wangdefine <vscale x 2 x half> @vector_compress_nxv2f16_passthru(<vscale x 2 x half> %passthru, <vscale x 2 x half> %data, <vscale x 2 x i1> %mask) {
640b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2f16_passthru:
641b799cc34SPengcheng Wang; CHECK:       # %bb.0:
642b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, tu, ma
643b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
644b799cc34SPengcheng Wang; CHECK-NEXT:    ret
645b799cc34SPengcheng Wang  %ret = call <vscale x 2 x half> @llvm.experimental.vector.compress.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x i1> %mask, <vscale x 2 x half> %passthru)
646b799cc34SPengcheng Wang  ret <vscale x 2 x half> %ret
647b799cc34SPengcheng Wang}
648b799cc34SPengcheng Wang
649b799cc34SPengcheng Wangdefine <vscale x 4 x half> @vector_compress_nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x i1> %mask) {
650b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4f16:
651b799cc34SPengcheng Wang; CHECK:       # %bb.0:
652b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
653b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
654b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v9
655b799cc34SPengcheng Wang; CHECK-NEXT:    ret
656b799cc34SPengcheng Wang  %ret = call <vscale x 4 x half> @llvm.experimental.vector.compress.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
657b799cc34SPengcheng Wang  ret <vscale x 4 x half> %ret
658b799cc34SPengcheng Wang}
659b799cc34SPengcheng Wang
660b799cc34SPengcheng Wangdefine <vscale x 4 x half> @vector_compress_nxv4f16_passthru(<vscale x 4 x half> %passthru, <vscale x 4 x half> %data, <vscale x 4 x i1> %mask) {
661b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4f16_passthru:
662b799cc34SPengcheng Wang; CHECK:       # %bb.0:
663b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
664b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
665b799cc34SPengcheng Wang; CHECK-NEXT:    ret
666b799cc34SPengcheng Wang  %ret = call <vscale x 4 x half> @llvm.experimental.vector.compress.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x i1> %mask, <vscale x 4 x half> %passthru)
667b799cc34SPengcheng Wang  ret <vscale x 4 x half> %ret
668b799cc34SPengcheng Wang}
669b799cc34SPengcheng Wang
670b799cc34SPengcheng Wangdefine <vscale x 8 x half> @vector_compress_nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %mask) {
671b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8f16:
672b799cc34SPengcheng Wang; CHECK:       # %bb.0:
673b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
674b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v10, v8, v0
675b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v10
676b799cc34SPengcheng Wang; CHECK-NEXT:    ret
677b799cc34SPengcheng Wang  %ret = call <vscale x 8 x half> @llvm.experimental.vector.compress.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
678b799cc34SPengcheng Wang  ret <vscale x 8 x half> %ret
679b799cc34SPengcheng Wang}
680b799cc34SPengcheng Wang
681b799cc34SPengcheng Wangdefine <vscale x 8 x half> @vector_compress_nxv8f16_passthru(<vscale x 8 x half> %passthru, <vscale x 8 x half> %data, <vscale x 8 x i1> %mask) {
682b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8f16_passthru:
683b799cc34SPengcheng Wang; CHECK:       # %bb.0:
684b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m2, tu, ma
685b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v10, v0
686b799cc34SPengcheng Wang; CHECK-NEXT:    ret
687b799cc34SPengcheng Wang  %ret = call <vscale x 8 x half> @llvm.experimental.vector.compress.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %mask, <vscale x 8 x half> %passthru)
688b799cc34SPengcheng Wang  ret <vscale x 8 x half> %ret
689b799cc34SPengcheng Wang}
690b799cc34SPengcheng Wang
691b799cc34SPengcheng Wangdefine <vscale x 16 x half> @vector_compress_nxv16f16(<vscale x 16 x half> %data, <vscale x 16 x i1> %mask) {
692b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv16f16:
693b799cc34SPengcheng Wang; CHECK:       # %bb.0:
694b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
695b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v12, v8, v0
696b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v12
697b799cc34SPengcheng Wang; CHECK-NEXT:    ret
698b799cc34SPengcheng Wang  %ret = call <vscale x 16 x half> @llvm.experimental.vector.compress.nxv16f16(<vscale x 16 x half> %data, <vscale x 16 x i1> %mask, <vscale x 16 x half> undef)
699b799cc34SPengcheng Wang  ret <vscale x 16 x half> %ret
700b799cc34SPengcheng Wang}
701b799cc34SPengcheng Wang
702b799cc34SPengcheng Wangdefine <vscale x 16 x half> @vector_compress_nxv16f16_passthru(<vscale x 16 x half> %passthru, <vscale x 16 x half> %data, <vscale x 16 x i1> %mask) {
703b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv16f16_passthru:
704b799cc34SPengcheng Wang; CHECK:       # %bb.0:
705b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m4, tu, ma
706b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v12, v0
707b799cc34SPengcheng Wang; CHECK-NEXT:    ret
708b799cc34SPengcheng Wang  %ret = call <vscale x 16 x half> @llvm.experimental.vector.compress.nxv16f16(<vscale x 16 x half> %data, <vscale x 16 x i1> %mask, <vscale x 16 x half> %passthru)
709b799cc34SPengcheng Wang  ret <vscale x 16 x half> %ret
710b799cc34SPengcheng Wang}
711b799cc34SPengcheng Wang
712b799cc34SPengcheng Wangdefine <vscale x 32 x half> @vector_compress_nxv32f16(<vscale x 32 x half> %data, <vscale x 32 x i1> %mask) {
713b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv32f16:
714b799cc34SPengcheng Wang; CHECK:       # %bb.0:
715b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
716b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v16, v8, v0
717b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v16
718b799cc34SPengcheng Wang; CHECK-NEXT:    ret
719b799cc34SPengcheng Wang  %ret = call <vscale x 32 x half> @llvm.experimental.vector.compress.nxv32f16(<vscale x 32 x half> %data, <vscale x 32 x i1> %mask, <vscale x 32 x half> undef)
720b799cc34SPengcheng Wang  ret <vscale x 32 x half> %ret
721b799cc34SPengcheng Wang}
722b799cc34SPengcheng Wang
723b799cc34SPengcheng Wangdefine <vscale x 32 x half> @vector_compress_nxv32f16_passthru(<vscale x 32 x half> %passthru, <vscale x 32 x half> %data, <vscale x 32 x i1> %mask) {
724b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv32f16_passthru:
725b799cc34SPengcheng Wang; CHECK:       # %bb.0:
726b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e16, m8, tu, ma
727b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v16, v0
728b799cc34SPengcheng Wang; CHECK-NEXT:    ret
729b799cc34SPengcheng Wang  %ret = call <vscale x 32 x half> @llvm.experimental.vector.compress.nxv32f16(<vscale x 32 x half> %data, <vscale x 32 x i1> %mask, <vscale x 32 x half> %passthru)
730b799cc34SPengcheng Wang  ret <vscale x 32 x half> %ret
731b799cc34SPengcheng Wang}
732b799cc34SPengcheng Wang
733b799cc34SPengcheng Wang; Vector compress for f32 type
734b799cc34SPengcheng Wang
735b799cc34SPengcheng Wangdefine <vscale x 1 x float> @vector_compress_nxv1f32(<vscale x 1 x float> %data, <vscale x 1 x i1> %mask) {
736b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1f32:
737b799cc34SPengcheng Wang; CHECK:       # %bb.0:
738b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
739b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
740b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
741b799cc34SPengcheng Wang; CHECK-NEXT:    ret
742b799cc34SPengcheng Wang  %ret = call <vscale x 1 x float> @llvm.experimental.vector.compress.nxv1f32(<vscale x 1 x float> %data, <vscale x 1 x i1> %mask, <vscale x 1 x float> undef)
743b799cc34SPengcheng Wang  ret <vscale x 1 x float> %ret
744b799cc34SPengcheng Wang}
745b799cc34SPengcheng Wang
746b799cc34SPengcheng Wangdefine <vscale x 1 x float> @vector_compress_nxv1f32_passthru(<vscale x 1 x float> %passthru, <vscale x 1 x float> %data, <vscale x 1 x i1> %mask) {
747b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1f32_passthru:
748b799cc34SPengcheng Wang; CHECK:       # %bb.0:
749b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, tu, ma
750b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
751b799cc34SPengcheng Wang; CHECK-NEXT:    ret
752b799cc34SPengcheng Wang  %ret = call <vscale x 1 x float> @llvm.experimental.vector.compress.nxv1f32(<vscale x 1 x float> %data, <vscale x 1 x i1> %mask, <vscale x 1 x float> %passthru)
753b799cc34SPengcheng Wang  ret <vscale x 1 x float> %ret
754b799cc34SPengcheng Wang}
755b799cc34SPengcheng Wang
756b799cc34SPengcheng Wangdefine <vscale x 2 x float> @vector_compress_nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x i1> %mask) {
757b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2f32:
758b799cc34SPengcheng Wang; CHECK:       # %bb.0:
759b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
760b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
761b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v9
762b799cc34SPengcheng Wang; CHECK-NEXT:    ret
763b799cc34SPengcheng Wang  %ret = call <vscale x 2 x float> @llvm.experimental.vector.compress.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
764b799cc34SPengcheng Wang  ret <vscale x 2 x float> %ret
765b799cc34SPengcheng Wang}
766b799cc34SPengcheng Wang
767b799cc34SPengcheng Wangdefine <vscale x 2 x float> @vector_compress_nxv2f32_passthru(<vscale x 2 x float> %passthru, <vscale x 2 x float> %data, <vscale x 2 x i1> %mask) {
768b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2f32_passthru:
769b799cc34SPengcheng Wang; CHECK:       # %bb.0:
770b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, ma
771b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
772b799cc34SPengcheng Wang; CHECK-NEXT:    ret
773b799cc34SPengcheng Wang  %ret = call <vscale x 2 x float> @llvm.experimental.vector.compress.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x i1> %mask, <vscale x 2 x float> %passthru)
774b799cc34SPengcheng Wang  ret <vscale x 2 x float> %ret
775b799cc34SPengcheng Wang}
776b799cc34SPengcheng Wang
777b799cc34SPengcheng Wangdefine <vscale x 4 x float> @vector_compress_nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask) {
778b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4f32:
779b799cc34SPengcheng Wang; CHECK:       # %bb.0:
780b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
781b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v10, v8, v0
782b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v10
783b799cc34SPengcheng Wang; CHECK-NEXT:    ret
784b799cc34SPengcheng Wang  %ret = call <vscale x 4 x float> @llvm.experimental.vector.compress.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
785b799cc34SPengcheng Wang  ret <vscale x 4 x float> %ret
786b799cc34SPengcheng Wang}
787b799cc34SPengcheng Wang
788b799cc34SPengcheng Wangdefine <vscale x 4 x float> @vector_compress_nxv4f32_passthru(<vscale x 4 x float> %passthru, <vscale x 4 x float> %data, <vscale x 4 x i1> %mask) {
789b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4f32_passthru:
790b799cc34SPengcheng Wang; CHECK:       # %bb.0:
791b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m2, tu, ma
792b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v10, v0
793b799cc34SPengcheng Wang; CHECK-NEXT:    ret
794b799cc34SPengcheng Wang  %ret = call <vscale x 4 x float> @llvm.experimental.vector.compress.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthru)
795b799cc34SPengcheng Wang  ret <vscale x 4 x float> %ret
796b799cc34SPengcheng Wang}
797b799cc34SPengcheng Wang
798b799cc34SPengcheng Wangdefine <vscale x 8 x float> @vector_compress_nxv8f32(<vscale x 8 x float> %data, <vscale x 8 x i1> %mask) {
799b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8f32:
800b799cc34SPengcheng Wang; CHECK:       # %bb.0:
801b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
802b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v12, v8, v0
803b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v12
804b799cc34SPengcheng Wang; CHECK-NEXT:    ret
805b799cc34SPengcheng Wang  %ret = call <vscale x 8 x float> @llvm.experimental.vector.compress.nxv8f32(<vscale x 8 x float> %data, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
806b799cc34SPengcheng Wang  ret <vscale x 8 x float> %ret
807b799cc34SPengcheng Wang}
808b799cc34SPengcheng Wang
809b799cc34SPengcheng Wangdefine <vscale x 8 x float> @vector_compress_nxv8f32_passthru(<vscale x 8 x float> %passthru, <vscale x 8 x float> %data, <vscale x 8 x i1> %mask) {
810b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8f32_passthru:
811b799cc34SPengcheng Wang; CHECK:       # %bb.0:
812b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m4, tu, ma
813b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v12, v0
814b799cc34SPengcheng Wang; CHECK-NEXT:    ret
815b799cc34SPengcheng Wang  %ret = call <vscale x 8 x float> @llvm.experimental.vector.compress.nxv8f32(<vscale x 8 x float> %data, <vscale x 8 x i1> %mask, <vscale x 8 x float> %passthru)
816b799cc34SPengcheng Wang  ret <vscale x 8 x float> %ret
817b799cc34SPengcheng Wang}
818b799cc34SPengcheng Wang
819b799cc34SPengcheng Wangdefine <vscale x 16 x float> @vector_compress_nxv16f32(<vscale x 16 x float> %data, <vscale x 16 x i1> %mask) {
820b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv16f32:
821b799cc34SPengcheng Wang; CHECK:       # %bb.0:
822b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
823b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v16, v8, v0
824b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v16
825b799cc34SPengcheng Wang; CHECK-NEXT:    ret
826b799cc34SPengcheng Wang  %ret = call <vscale x 16 x float> @llvm.experimental.vector.compress.nxv16f32(<vscale x 16 x float> %data, <vscale x 16 x i1> %mask, <vscale x 16 x float> undef)
827b799cc34SPengcheng Wang  ret <vscale x 16 x float> %ret
828b799cc34SPengcheng Wang}
829b799cc34SPengcheng Wang
830b799cc34SPengcheng Wangdefine <vscale x 16 x float> @vector_compress_nxv16f32_passthru(<vscale x 16 x float> %passthru, <vscale x 16 x float> %data, <vscale x 16 x i1> %mask) {
831b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv16f32_passthru:
832b799cc34SPengcheng Wang; CHECK:       # %bb.0:
833b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e32, m8, tu, ma
834b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v16, v0
835b799cc34SPengcheng Wang; CHECK-NEXT:    ret
836b799cc34SPengcheng Wang  %ret = call <vscale x 16 x float> @llvm.experimental.vector.compress.nxv16f32(<vscale x 16 x float> %data, <vscale x 16 x i1> %mask, <vscale x 16 x float> %passthru)
837b799cc34SPengcheng Wang  ret <vscale x 16 x float> %ret
838b799cc34SPengcheng Wang}
839b799cc34SPengcheng Wang
840b799cc34SPengcheng Wang; Vector compress for f64 type
841b799cc34SPengcheng Wang
842b799cc34SPengcheng Wangdefine <vscale x 1 x double> @vector_compress_nxv1f64(<vscale x 1 x double> %data, <vscale x 1 x i1> %mask) {
843b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1f64:
844b799cc34SPengcheng Wang; CHECK:       # %bb.0:
845b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
846b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
847b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v9
848b799cc34SPengcheng Wang; CHECK-NEXT:    ret
849b799cc34SPengcheng Wang  %ret = call <vscale x 1 x double> @llvm.experimental.vector.compress.nxv1f64(<vscale x 1 x double> %data, <vscale x 1 x i1> %mask, <vscale x 1 x double> undef)
850b799cc34SPengcheng Wang  ret <vscale x 1 x double> %ret
851b799cc34SPengcheng Wang}
852b799cc34SPengcheng Wang
853b799cc34SPengcheng Wangdefine <vscale x 1 x double> @vector_compress_nxv1f64_passthru(<vscale x 1 x double> %passthru, <vscale x 1 x double> %data, <vscale x 1 x i1> %mask) {
854b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv1f64_passthru:
855b799cc34SPengcheng Wang; CHECK:       # %bb.0:
856b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, ma
857b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
858b799cc34SPengcheng Wang; CHECK-NEXT:    ret
859b799cc34SPengcheng Wang  %ret = call <vscale x 1 x double> @llvm.experimental.vector.compress.nxv1f64(<vscale x 1 x double> %data, <vscale x 1 x i1> %mask, <vscale x 1 x double> %passthru)
860b799cc34SPengcheng Wang  ret <vscale x 1 x double> %ret
861b799cc34SPengcheng Wang}
862b799cc34SPengcheng Wang
863b799cc34SPengcheng Wangdefine <vscale x 2 x double> @vector_compress_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask) {
864b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2f64:
865b799cc34SPengcheng Wang; CHECK:       # %bb.0:
866b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
867b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v10, v8, v0
868b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v10
869b799cc34SPengcheng Wang; CHECK-NEXT:    ret
870b799cc34SPengcheng Wang  %ret = call <vscale x 2 x double> @llvm.experimental.vector.compress.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
871b799cc34SPengcheng Wang  ret <vscale x 2 x double> %ret
872b799cc34SPengcheng Wang}
873b799cc34SPengcheng Wang
874b799cc34SPengcheng Wangdefine <vscale x 2 x double> @vector_compress_nxv2f64_passthru(<vscale x 2 x double> %passthru, <vscale x 2 x double> %data, <vscale x 2 x i1> %mask) {
875b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv2f64_passthru:
876b799cc34SPengcheng Wang; CHECK:       # %bb.0:
877b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m2, tu, ma
878b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v10, v0
879b799cc34SPengcheng Wang; CHECK-NEXT:    ret
880b799cc34SPengcheng Wang  %ret = call <vscale x 2 x double> @llvm.experimental.vector.compress.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, <vscale x 2 x double> %passthru)
881b799cc34SPengcheng Wang  ret <vscale x 2 x double> %ret
882b799cc34SPengcheng Wang}
883b799cc34SPengcheng Wang
884b799cc34SPengcheng Wangdefine <vscale x 4 x double> @vector_compress_nxv4f64(<vscale x 4 x double> %data, <vscale x 4 x i1> %mask) {
885b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4f64:
886b799cc34SPengcheng Wang; CHECK:       # %bb.0:
887b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
888b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v12, v8, v0
889b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v12
890b799cc34SPengcheng Wang; CHECK-NEXT:    ret
891b799cc34SPengcheng Wang  %ret = call <vscale x 4 x double> @llvm.experimental.vector.compress.nxv4f64(<vscale x 4 x double> %data, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
892b799cc34SPengcheng Wang  ret <vscale x 4 x double> %ret
893b799cc34SPengcheng Wang}
894b799cc34SPengcheng Wang
895b799cc34SPengcheng Wangdefine <vscale x 4 x double> @vector_compress_nxv4f64_passthru(<vscale x 4 x double> %passthru, <vscale x 4 x double> %data, <vscale x 4 x i1> %mask) {
896b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv4f64_passthru:
897b799cc34SPengcheng Wang; CHECK:       # %bb.0:
898b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m4, tu, ma
899b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v12, v0
900b799cc34SPengcheng Wang; CHECK-NEXT:    ret
901b799cc34SPengcheng Wang  %ret = call <vscale x 4 x double> @llvm.experimental.vector.compress.nxv4f64(<vscale x 4 x double> %data, <vscale x 4 x i1> %mask, <vscale x 4 x double> %passthru)
902b799cc34SPengcheng Wang  ret <vscale x 4 x double> %ret
903b799cc34SPengcheng Wang}
904b799cc34SPengcheng Wang
905b799cc34SPengcheng Wangdefine <vscale x 8 x double> @vector_compress_nxv8f64(<vscale x 8 x double> %data, <vscale x 8 x i1> %mask) {
906b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8f64:
907b799cc34SPengcheng Wang; CHECK:       # %bb.0:
908b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
909b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v16, v8, v0
910b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v16
911b799cc34SPengcheng Wang; CHECK-NEXT:    ret
912b799cc34SPengcheng Wang  %ret = call <vscale x 8 x double> @llvm.experimental.vector.compress.nxv8f64(<vscale x 8 x double> %data, <vscale x 8 x i1> %mask, <vscale x 8 x double> undef)
913b799cc34SPengcheng Wang  ret <vscale x 8 x double> %ret
914b799cc34SPengcheng Wang}
915b799cc34SPengcheng Wang
916b799cc34SPengcheng Wangdefine <vscale x 8 x double> @vector_compress_nxv8f64_passthru(<vscale x 8 x double> %passthru, <vscale x 8 x double> %data, <vscale x 8 x i1> %mask) {
917b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_nxv8f64_passthru:
918b799cc34SPengcheng Wang; CHECK:       # %bb.0:
919b799cc34SPengcheng Wang; CHECK-NEXT:    vsetvli a0, zero, e64, m8, tu, ma
920b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v16, v0
921b799cc34SPengcheng Wang; CHECK-NEXT:    ret
922b799cc34SPengcheng Wang  %ret = call <vscale x 8 x double> @llvm.experimental.vector.compress.nxv8f64(<vscale x 8 x double> %data, <vscale x 8 x i1> %mask, <vscale x 8 x double> %passthru)
923b799cc34SPengcheng Wang  ret <vscale x 8 x double> %ret
924b799cc34SPengcheng Wang}
925