xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compress-fp.ll (revision 96f5c683500eb2d7f7c3984e3a056315c50c4662)
1b799cc34SPengcheng Wang; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2*96f5c683SLuke Lau; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfh,+zvfbfmin < %s | FileCheck %s
3*96f5c683SLuke Lau; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh,+zvfbfmin < %s | FileCheck %s
4*96f5c683SLuke Lau; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfhmin,+zvfbfmin < %s | FileCheck %s
5*96f5c683SLuke Lau; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfhmin,+zvfbfmin < %s | FileCheck %s
6*96f5c683SLuke Lau
7*96f5c683SLuke Laudefine <1 x bfloat> @vector_compress_v1bf16(<1 x bfloat> %v, <1 x i1> %mask) {
8*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_v1bf16:
9*96f5c683SLuke Lau; CHECK:       # %bb.0:
10*96f5c683SLuke Lau; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
11*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v9, v8, v0
12*96f5c683SLuke Lau; CHECK-NEXT:    vmv1r.v v8, v9
13*96f5c683SLuke Lau; CHECK-NEXT:    ret
14*96f5c683SLuke Lau  %ret = call <1 x bfloat> @llvm.experimental.vector.compress.v1bf16(<1 x bfloat> %v, <1 x i1> %mask, <1 x bfloat> undef)
15*96f5c683SLuke Lau  ret <1 x bfloat> %ret
16*96f5c683SLuke Lau}
17*96f5c683SLuke Lau
18*96f5c683SLuke Laudefine <1 x bfloat> @vector_compress_v1bf16_passthru(<1 x bfloat> %passthru, <1 x bfloat> %v, <1 x i1> %mask) {
19*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_v1bf16_passthru:
20*96f5c683SLuke Lau; CHECK:       # %bb.0:
21*96f5c683SLuke Lau; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, tu, ma
22*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v8, v9, v0
23*96f5c683SLuke Lau; CHECK-NEXT:    ret
24*96f5c683SLuke Lau  %ret = call <1 x bfloat> @llvm.experimental.vector.compress.v1bf16(<1 x bfloat> %v, <1 x i1> %mask, <1 x bfloat> %passthru)
25*96f5c683SLuke Lau  ret <1 x bfloat> %ret
26*96f5c683SLuke Lau}
27*96f5c683SLuke Lau
28*96f5c683SLuke Laudefine <2 x bfloat> @vector_compress_v2bf16(<2 x bfloat> %v, <2 x i1> %mask) {
29*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_v2bf16:
30*96f5c683SLuke Lau; CHECK:       # %bb.0:
31*96f5c683SLuke Lau; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
32*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v9, v8, v0
33*96f5c683SLuke Lau; CHECK-NEXT:    vmv1r.v v8, v9
34*96f5c683SLuke Lau; CHECK-NEXT:    ret
35*96f5c683SLuke Lau  %ret = call <2 x bfloat> @llvm.experimental.vector.compress.v2bf16(<2 x bfloat> %v, <2 x i1> %mask, <2 x bfloat> undef)
36*96f5c683SLuke Lau  ret <2 x bfloat> %ret
37*96f5c683SLuke Lau}
38*96f5c683SLuke Lau
39*96f5c683SLuke Laudefine <2 x bfloat> @vector_compress_v2bf16_passthru(<2 x bfloat> %passthru, <2 x bfloat> %v, <2 x i1> %mask) {
40*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_v2bf16_passthru:
41*96f5c683SLuke Lau; CHECK:       # %bb.0:
42*96f5c683SLuke Lau; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, tu, ma
43*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v8, v9, v0
44*96f5c683SLuke Lau; CHECK-NEXT:    ret
45*96f5c683SLuke Lau  %ret = call <2 x bfloat> @llvm.experimental.vector.compress.v2bf16(<2 x bfloat> %v, <2 x i1> %mask, <2 x bfloat> %passthru)
46*96f5c683SLuke Lau  ret <2 x bfloat> %ret
47*96f5c683SLuke Lau}
48*96f5c683SLuke Lau
49*96f5c683SLuke Laudefine <4 x bfloat> @vector_compress_v4bf16(<4 x bfloat> %v, <4 x i1> %mask) {
50*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_v4bf16:
51*96f5c683SLuke Lau; CHECK:       # %bb.0:
52*96f5c683SLuke Lau; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
53*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v9, v8, v0
54*96f5c683SLuke Lau; CHECK-NEXT:    vmv1r.v v8, v9
55*96f5c683SLuke Lau; CHECK-NEXT:    ret
56*96f5c683SLuke Lau  %ret = call <4 x bfloat> @llvm.experimental.vector.compress.v4bf16(<4 x bfloat> %v, <4 x i1> %mask, <4 x bfloat> undef)
57*96f5c683SLuke Lau  ret <4 x bfloat> %ret
58*96f5c683SLuke Lau}
59*96f5c683SLuke Lau
60*96f5c683SLuke Laudefine <4 x bfloat> @vector_compress_v4bf16_passthru(<4 x bfloat> %passthru, <4 x bfloat> %v, <4 x i1> %mask) {
61*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_v4bf16_passthru:
62*96f5c683SLuke Lau; CHECK:       # %bb.0:
63*96f5c683SLuke Lau; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, tu, ma
64*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v8, v9, v0
65*96f5c683SLuke Lau; CHECK-NEXT:    ret
66*96f5c683SLuke Lau  %ret = call <4 x bfloat> @llvm.experimental.vector.compress.v4bf16(<4 x bfloat> %v, <4 x i1> %mask, <4 x bfloat> %passthru)
67*96f5c683SLuke Lau  ret <4 x bfloat> %ret
68*96f5c683SLuke Lau}
69*96f5c683SLuke Lau
70*96f5c683SLuke Laudefine <8 x bfloat> @vector_compress_v8bf16(<8 x bfloat> %v, <8 x i1> %mask) {
71*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_v8bf16:
72*96f5c683SLuke Lau; CHECK:       # %bb.0:
73*96f5c683SLuke Lau; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
74*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v9, v8, v0
75*96f5c683SLuke Lau; CHECK-NEXT:    vmv.v.v v8, v9
76*96f5c683SLuke Lau; CHECK-NEXT:    ret
77*96f5c683SLuke Lau  %ret = call <8 x bfloat> @llvm.experimental.vector.compress.v8bf16(<8 x bfloat> %v, <8 x i1> %mask, <8 x bfloat> undef)
78*96f5c683SLuke Lau  ret <8 x bfloat> %ret
79*96f5c683SLuke Lau}
80*96f5c683SLuke Lau
81*96f5c683SLuke Laudefine <8 x bfloat> @vector_compress_v8bf16_passthru(<8 x bfloat> %passthru, <8 x bfloat> %v, <8 x i1> %mask) {
82*96f5c683SLuke Lau; CHECK-LABEL: vector_compress_v8bf16_passthru:
83*96f5c683SLuke Lau; CHECK:       # %bb.0:
84*96f5c683SLuke Lau; CHECK-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
85*96f5c683SLuke Lau; CHECK-NEXT:    vcompress.vm v8, v9, v0
86*96f5c683SLuke Lau; CHECK-NEXT:    ret
87*96f5c683SLuke Lau  %ret = call <8 x bfloat> @llvm.experimental.vector.compress.v8bf16(<8 x bfloat> %v, <8 x i1> %mask, <8 x bfloat> %passthru)
88*96f5c683SLuke Lau  ret <8 x bfloat> %ret
89*96f5c683SLuke Lau}
90b799cc34SPengcheng Wang
91b799cc34SPengcheng Wangdefine <1 x half> @vector_compress_v1f16(<1 x half> %v, <1 x i1> %mask) {
92b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v1f16:
93b799cc34SPengcheng Wang; CHECK:       # %bb.0:
94b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
95b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
96b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
97b799cc34SPengcheng Wang; CHECK-NEXT:    ret
98b799cc34SPengcheng Wang  %ret = call <1 x half> @llvm.experimental.vector.compress.v1f16(<1 x half> %v, <1 x i1> %mask, <1 x half> undef)
99b799cc34SPengcheng Wang  ret <1 x half> %ret
100b799cc34SPengcheng Wang}
101b799cc34SPengcheng Wang
102b799cc34SPengcheng Wangdefine <1 x half> @vector_compress_v1f16_passthru(<1 x half> %passthru, <1 x half> %v, <1 x i1> %mask) {
103b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v1f16_passthru:
104b799cc34SPengcheng Wang; CHECK:       # %bb.0:
105b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, tu, ma
106b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
107b799cc34SPengcheng Wang; CHECK-NEXT:    ret
108b799cc34SPengcheng Wang  %ret = call <1 x half> @llvm.experimental.vector.compress.v1f16(<1 x half> %v, <1 x i1> %mask, <1 x half> %passthru)
109b799cc34SPengcheng Wang  ret <1 x half> %ret
110b799cc34SPengcheng Wang}
111b799cc34SPengcheng Wang
112b799cc34SPengcheng Wangdefine <2 x half> @vector_compress_v2f16(<2 x half> %v, <2 x i1> %mask) {
113b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v2f16:
114b799cc34SPengcheng Wang; CHECK:       # %bb.0:
115b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
116b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
117b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
118b799cc34SPengcheng Wang; CHECK-NEXT:    ret
119b799cc34SPengcheng Wang  %ret = call <2 x half> @llvm.experimental.vector.compress.v2f16(<2 x half> %v, <2 x i1> %mask, <2 x half> undef)
120b799cc34SPengcheng Wang  ret <2 x half> %ret
121b799cc34SPengcheng Wang}
122b799cc34SPengcheng Wang
123b799cc34SPengcheng Wangdefine <2 x half> @vector_compress_v2f16_passthru(<2 x half> %passthru, <2 x half> %v, <2 x i1> %mask) {
124b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v2f16_passthru:
125b799cc34SPengcheng Wang; CHECK:       # %bb.0:
126b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, tu, ma
127b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
128b799cc34SPengcheng Wang; CHECK-NEXT:    ret
129b799cc34SPengcheng Wang  %ret = call <2 x half> @llvm.experimental.vector.compress.v2f16(<2 x half> %v, <2 x i1> %mask, <2 x half> %passthru)
130b799cc34SPengcheng Wang  ret <2 x half> %ret
131b799cc34SPengcheng Wang}
132b799cc34SPengcheng Wang
133b799cc34SPengcheng Wangdefine <4 x half> @vector_compress_v4f16(<4 x half> %v, <4 x i1> %mask) {
134b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v4f16:
135b799cc34SPengcheng Wang; CHECK:       # %bb.0:
136b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
137b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
138b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
139b799cc34SPengcheng Wang; CHECK-NEXT:    ret
140b799cc34SPengcheng Wang  %ret = call <4 x half> @llvm.experimental.vector.compress.v4f16(<4 x half> %v, <4 x i1> %mask, <4 x half> undef)
141b799cc34SPengcheng Wang  ret <4 x half> %ret
142b799cc34SPengcheng Wang}
143b799cc34SPengcheng Wang
144b799cc34SPengcheng Wangdefine <4 x half> @vector_compress_v4f16_passthru(<4 x half> %passthru, <4 x half> %v, <4 x i1> %mask) {
145b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v4f16_passthru:
146b799cc34SPengcheng Wang; CHECK:       # %bb.0:
147b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, tu, ma
148b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
149b799cc34SPengcheng Wang; CHECK-NEXT:    ret
150b799cc34SPengcheng Wang  %ret = call <4 x half> @llvm.experimental.vector.compress.v4f16(<4 x half> %v, <4 x i1> %mask, <4 x half> %passthru)
151b799cc34SPengcheng Wang  ret <4 x half> %ret
152b799cc34SPengcheng Wang}
153b799cc34SPengcheng Wang
154b799cc34SPengcheng Wangdefine <8 x half> @vector_compress_v8f16(<8 x half> %v, <8 x i1> %mask) {
155b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v8f16:
156b799cc34SPengcheng Wang; CHECK:       # %bb.0:
157b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
158b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
159b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v9
160b799cc34SPengcheng Wang; CHECK-NEXT:    ret
161b799cc34SPengcheng Wang  %ret = call <8 x half> @llvm.experimental.vector.compress.v8f16(<8 x half> %v, <8 x i1> %mask, <8 x half> undef)
162b799cc34SPengcheng Wang  ret <8 x half> %ret
163b799cc34SPengcheng Wang}
164b799cc34SPengcheng Wang
165b799cc34SPengcheng Wangdefine <8 x half> @vector_compress_v8f16_passthru(<8 x half> %passthru, <8 x half> %v, <8 x i1> %mask) {
166b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v8f16_passthru:
167b799cc34SPengcheng Wang; CHECK:       # %bb.0:
168b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 8, e16, m1, tu, ma
169b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
170b799cc34SPengcheng Wang; CHECK-NEXT:    ret
171b799cc34SPengcheng Wang  %ret = call <8 x half> @llvm.experimental.vector.compress.v8f16(<8 x half> %v, <8 x i1> %mask, <8 x half> %passthru)
172b799cc34SPengcheng Wang  ret <8 x half> %ret
173b799cc34SPengcheng Wang}
174b799cc34SPengcheng Wang
175b799cc34SPengcheng Wangdefine <1 x float> @vector_compress_v1f32(<1 x float> %v, <1 x i1> %mask) {
176b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v1f32:
177b799cc34SPengcheng Wang; CHECK:       # %bb.0:
178b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
179b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
180b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
181b799cc34SPengcheng Wang; CHECK-NEXT:    ret
182b799cc34SPengcheng Wang  %ret = call <1 x float> @llvm.experimental.vector.compress.v1f32(<1 x float> %v, <1 x i1> %mask, <1 x float> undef)
183b799cc34SPengcheng Wang  ret <1 x float> %ret
184b799cc34SPengcheng Wang}
185b799cc34SPengcheng Wang
186b799cc34SPengcheng Wangdefine <1 x float> @vector_compress_v1f32_passthru(<1 x float> %passthru, <1 x float> %v, <1 x i1> %mask) {
187b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v1f32_passthru:
188b799cc34SPengcheng Wang; CHECK:       # %bb.0:
189b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, tu, ma
190b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
191b799cc34SPengcheng Wang; CHECK-NEXT:    ret
192b799cc34SPengcheng Wang  %ret = call <1 x float> @llvm.experimental.vector.compress.v1f32(<1 x float> %v, <1 x i1> %mask, <1 x float> %passthru)
193b799cc34SPengcheng Wang  ret <1 x float> %ret
194b799cc34SPengcheng Wang}
195b799cc34SPengcheng Wang
196b799cc34SPengcheng Wangdefine <2 x float> @vector_compress_v2f32(<2 x float> %v, <2 x i1> %mask) {
197b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v2f32:
198b799cc34SPengcheng Wang; CHECK:       # %bb.0:
199b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
200b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
201b799cc34SPengcheng Wang; CHECK-NEXT:    vmv1r.v v8, v9
202b799cc34SPengcheng Wang; CHECK-NEXT:    ret
203b799cc34SPengcheng Wang  %ret = call <2 x float> @llvm.experimental.vector.compress.v2f32(<2 x float> %v, <2 x i1> %mask, <2 x float> undef)
204b799cc34SPengcheng Wang  ret <2 x float> %ret
205b799cc34SPengcheng Wang}
206b799cc34SPengcheng Wang
207b799cc34SPengcheng Wangdefine <2 x float> @vector_compress_v2f32_passthru(<2 x float> %passthru, <2 x float> %v, <2 x i1> %mask) {
208b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v2f32_passthru:
209b799cc34SPengcheng Wang; CHECK:       # %bb.0:
210b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, tu, ma
211b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
212b799cc34SPengcheng Wang; CHECK-NEXT:    ret
213b799cc34SPengcheng Wang  %ret = call <2 x float> @llvm.experimental.vector.compress.v2f32(<2 x float> %v, <2 x i1> %mask, <2 x float> %passthru)
214b799cc34SPengcheng Wang  ret <2 x float> %ret
215b799cc34SPengcheng Wang}
216b799cc34SPengcheng Wang
217b799cc34SPengcheng Wangdefine <4 x float> @vector_compress_v4f32(<4 x float> %v, <4 x i1> %mask) {
218b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v4f32:
219b799cc34SPengcheng Wang; CHECK:       # %bb.0:
220b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
221b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
222b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v9
223b799cc34SPengcheng Wang; CHECK-NEXT:    ret
224b799cc34SPengcheng Wang  %ret = call <4 x float> @llvm.experimental.vector.compress.v4f32(<4 x float> %v, <4 x i1> %mask, <4 x float> undef)
225b799cc34SPengcheng Wang  ret <4 x float> %ret
226b799cc34SPengcheng Wang}
227b799cc34SPengcheng Wang
228b799cc34SPengcheng Wangdefine <4 x float> @vector_compress_v4f32_passthru(<4 x float> %passthru, <4 x float> %v, <4 x i1> %mask) {
229b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v4f32_passthru:
230b799cc34SPengcheng Wang; CHECK:       # %bb.0:
231b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
232b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
233b799cc34SPengcheng Wang; CHECK-NEXT:    ret
234b799cc34SPengcheng Wang  %ret = call <4 x float> @llvm.experimental.vector.compress.v4f32(<4 x float> %v, <4 x i1> %mask, <4 x float> %passthru)
235b799cc34SPengcheng Wang  ret <4 x float> %ret
236b799cc34SPengcheng Wang}
237b799cc34SPengcheng Wang
238b799cc34SPengcheng Wangdefine <8 x float> @vector_compress_v8f32(<8 x float> %v, <8 x i1> %mask) {
239b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v8f32:
240b799cc34SPengcheng Wang; CHECK:       # %bb.0:
241b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
242b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v10, v8, v0
243b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v10
244b799cc34SPengcheng Wang; CHECK-NEXT:    ret
245b799cc34SPengcheng Wang  %ret = call <8 x float> @llvm.experimental.vector.compress.v8f32(<8 x float> %v, <8 x i1> %mask, <8 x float> undef)
246b799cc34SPengcheng Wang  ret <8 x float> %ret
247b799cc34SPengcheng Wang}
248b799cc34SPengcheng Wang
249b799cc34SPengcheng Wangdefine <8 x float> @vector_compress_v8f32_passthru(<8 x float> %passthru, <8 x float> %v, <8 x i1> %mask) {
250b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v8f32_passthru:
251b799cc34SPengcheng Wang; CHECK:       # %bb.0:
252b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 8, e32, m2, tu, ma
253b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v10, v0
254b799cc34SPengcheng Wang; CHECK-NEXT:    ret
255b799cc34SPengcheng Wang  %ret = call <8 x float> @llvm.experimental.vector.compress.v8f32(<8 x float> %v, <8 x i1> %mask, <8 x float> %passthru)
256b799cc34SPengcheng Wang  ret <8 x float> %ret
257b799cc34SPengcheng Wang}
258b799cc34SPengcheng Wang
259b799cc34SPengcheng Wangdefine <1 x double> @vector_compress_v1f64(<1 x double> %v, <1 x i1> %mask) {
260b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v1f64:
261b799cc34SPengcheng Wang; CHECK:       # %bb.0:
262b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
263b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
264b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v9
265b799cc34SPengcheng Wang; CHECK-NEXT:    ret
266b799cc34SPengcheng Wang  %ret = call <1 x double> @llvm.experimental.vector.compress.v1f64(<1 x double> %v, <1 x i1> %mask, <1 x double> undef)
267b799cc34SPengcheng Wang  ret <1 x double> %ret
268b799cc34SPengcheng Wang}
269b799cc34SPengcheng Wang
270b799cc34SPengcheng Wangdefine <1 x double> @vector_compress_v1f64_passthru(<1 x double> %passthru, <1 x double> %v, <1 x i1> %mask) {
271b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v1f64_passthru:
272b799cc34SPengcheng Wang; CHECK:       # %bb.0:
273b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 1, e64, m1, tu, ma
274b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
275b799cc34SPengcheng Wang; CHECK-NEXT:    ret
276b799cc34SPengcheng Wang  %ret = call <1 x double> @llvm.experimental.vector.compress.v1f64(<1 x double> %v, <1 x i1> %mask, <1 x double> %passthru)
277b799cc34SPengcheng Wang  ret <1 x double> %ret
278b799cc34SPengcheng Wang}
279b799cc34SPengcheng Wang
280b799cc34SPengcheng Wangdefine <2 x double> @vector_compress_v2f64(<2 x double> %v, <2 x i1> %mask) {
281b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v2f64:
282b799cc34SPengcheng Wang; CHECK:       # %bb.0:
283b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
284b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v9, v8, v0
285b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v9
286b799cc34SPengcheng Wang; CHECK-NEXT:    ret
287b799cc34SPengcheng Wang  %ret = call <2 x double> @llvm.experimental.vector.compress.v2f64(<2 x double> %v, <2 x i1> %mask, <2 x double> undef)
288b799cc34SPengcheng Wang  ret <2 x double> %ret
289b799cc34SPengcheng Wang}
290b799cc34SPengcheng Wang
291b799cc34SPengcheng Wangdefine <2 x double> @vector_compress_v2f64_passthru(<2 x double> %passthru, <2 x double> %v, <2 x i1> %mask) {
292b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v2f64_passthru:
293b799cc34SPengcheng Wang; CHECK:       # %bb.0:
294b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
295b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v9, v0
296b799cc34SPengcheng Wang; CHECK-NEXT:    ret
297b799cc34SPengcheng Wang  %ret = call <2 x double> @llvm.experimental.vector.compress.v2f64(<2 x double> %v, <2 x i1> %mask, <2 x double> %passthru)
298b799cc34SPengcheng Wang  ret <2 x double> %ret
299b799cc34SPengcheng Wang}
300b799cc34SPengcheng Wang
301b799cc34SPengcheng Wangdefine <4 x double> @vector_compress_v4f64(<4 x double> %v, <4 x i1> %mask) {
302b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v4f64:
303b799cc34SPengcheng Wang; CHECK:       # %bb.0:
304b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
305b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v10, v8, v0
306b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v10
307b799cc34SPengcheng Wang; CHECK-NEXT:    ret
308b799cc34SPengcheng Wang  %ret = call <4 x double> @llvm.experimental.vector.compress.v4f64(<4 x double> %v, <4 x i1> %mask, <4 x double> undef)
309b799cc34SPengcheng Wang  ret <4 x double> %ret
310b799cc34SPengcheng Wang}
311b799cc34SPengcheng Wang
312b799cc34SPengcheng Wangdefine <4 x double> @vector_compress_v4f64_passthru(<4 x double> %passthru, <4 x double> %v, <4 x i1> %mask) {
313b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v4f64_passthru:
314b799cc34SPengcheng Wang; CHECK:       # %bb.0:
315b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
316b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v10, v0
317b799cc34SPengcheng Wang; CHECK-NEXT:    ret
318b799cc34SPengcheng Wang  %ret = call <4 x double> @llvm.experimental.vector.compress.v4f64(<4 x double> %v, <4 x i1> %mask, <4 x double> %passthru)
319b799cc34SPengcheng Wang  ret <4 x double> %ret
320b799cc34SPengcheng Wang}
321b799cc34SPengcheng Wang
322b799cc34SPengcheng Wangdefine <8 x double> @vector_compress_v8f64(<8 x double> %v, <8 x i1> %mask) {
323b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v8f64:
324b799cc34SPengcheng Wang; CHECK:       # %bb.0:
325b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
326b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v12, v8, v0
327b799cc34SPengcheng Wang; CHECK-NEXT:    vmv.v.v v8, v12
328b799cc34SPengcheng Wang; CHECK-NEXT:    ret
329b799cc34SPengcheng Wang  %ret = call <8 x double> @llvm.experimental.vector.compress.v8f64(<8 x double> %v, <8 x i1> %mask, <8 x double> undef)
330b799cc34SPengcheng Wang  ret <8 x double> %ret
331b799cc34SPengcheng Wang}
332b799cc34SPengcheng Wang
333b799cc34SPengcheng Wangdefine <8 x double> @vector_compress_v8f64_passthru(<8 x double> %passthru, <8 x double> %v, <8 x i1> %mask) {
334b799cc34SPengcheng Wang; CHECK-LABEL: vector_compress_v8f64_passthru:
335b799cc34SPengcheng Wang; CHECK:       # %bb.0:
336b799cc34SPengcheng Wang; CHECK-NEXT:    vsetivli zero, 8, e64, m4, tu, ma
337b799cc34SPengcheng Wang; CHECK-NEXT:    vcompress.vm v8, v12, v0
338b799cc34SPengcheng Wang; CHECK-NEXT:    ret
339b799cc34SPengcheng Wang  %ret = call <8 x double> @llvm.experimental.vector.compress.v8f64(<8 x double> %v, <8 x i1> %mask, <8 x double> %passthru)
340b799cc34SPengcheng Wang  ret <8 x double> %ret
341b799cc34SPengcheng Wang}
342