xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-fp.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
161c283dbSYeting Kuo; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2*1cb59983SLuke Lau; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
3*1cb59983SLuke Lau; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
461c283dbSYeting Kuo
561c283dbSYeting Kuodeclare void @llvm.masked.compressstore.v1f16(<1 x half>, ptr, <1 x i1>)
661c283dbSYeting Kuodefine void @compressstore_v1f16(ptr %base, <1 x half> %v, <1 x i1> %mask) {
761c283dbSYeting Kuo; RV32-LABEL: compressstore_v1f16:
861c283dbSYeting Kuo; RV32:       # %bb.0:
961c283dbSYeting Kuo; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
10aa68e281SKolya Panchenko; RV32-NEXT:    vcompress.vm v9, v8, v0
11aa68e281SKolya Panchenko; RV32-NEXT:    vcpop.m a1, v0
12aa68e281SKolya Panchenko; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
13aa68e281SKolya Panchenko; RV32-NEXT:    vse16.v v9, (a0)
1461c283dbSYeting Kuo; RV32-NEXT:    ret
1561c283dbSYeting Kuo;
1661c283dbSYeting Kuo; RV64-LABEL: compressstore_v1f16:
1761c283dbSYeting Kuo; RV64:       # %bb.0:
1861c283dbSYeting Kuo; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
19aa68e281SKolya Panchenko; RV64-NEXT:    vcompress.vm v9, v8, v0
20aa68e281SKolya Panchenko; RV64-NEXT:    vcpop.m a1, v0
21aa68e281SKolya Panchenko; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
22aa68e281SKolya Panchenko; RV64-NEXT:    vse16.v v9, (a0)
2361c283dbSYeting Kuo; RV64-NEXT:    ret
2461c283dbSYeting Kuo  call void @llvm.masked.compressstore.v1f16(<1 x half> %v, ptr align 2 %base, <1 x i1> %mask)
2561c283dbSYeting Kuo  ret void
2661c283dbSYeting Kuo}
2761c283dbSYeting Kuo
2861c283dbSYeting Kuodeclare void @llvm.masked.compressstore.v2f16(<2 x half>, ptr, <2 x i1>)
2961c283dbSYeting Kuodefine void @compressstore_v2f16(ptr %base, <2 x half> %v, <2 x i1> %mask) {
3061c283dbSYeting Kuo; RV32-LABEL: compressstore_v2f16:
3161c283dbSYeting Kuo; RV32:       # %bb.0:
32aa68e281SKolya Panchenko; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
33aa68e281SKolya Panchenko; RV32-NEXT:    vcompress.vm v9, v8, v0
34aa68e281SKolya Panchenko; RV32-NEXT:    vcpop.m a1, v0
35aa68e281SKolya Panchenko; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
36aa68e281SKolya Panchenko; RV32-NEXT:    vse16.v v9, (a0)
3761c283dbSYeting Kuo; RV32-NEXT:    ret
3861c283dbSYeting Kuo;
3961c283dbSYeting Kuo; RV64-LABEL: compressstore_v2f16:
4061c283dbSYeting Kuo; RV64:       # %bb.0:
41aa68e281SKolya Panchenko; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
42aa68e281SKolya Panchenko; RV64-NEXT:    vcompress.vm v9, v8, v0
43aa68e281SKolya Panchenko; RV64-NEXT:    vcpop.m a1, v0
44aa68e281SKolya Panchenko; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
45aa68e281SKolya Panchenko; RV64-NEXT:    vse16.v v9, (a0)
4661c283dbSYeting Kuo; RV64-NEXT:    ret
4761c283dbSYeting Kuo  call void @llvm.masked.compressstore.v2f16(<2 x half> %v, ptr align 2 %base, <2 x i1> %mask)
4861c283dbSYeting Kuo  ret void
4961c283dbSYeting Kuo}
5061c283dbSYeting Kuo
5161c283dbSYeting Kuodeclare void @llvm.masked.compressstore.v4f16(<4 x half>, ptr, <4 x i1>)
5261c283dbSYeting Kuodefine void @compressstore_v4f16(ptr %base, <4 x half> %v, <4 x i1> %mask) {
5361c283dbSYeting Kuo; RV32-LABEL: compressstore_v4f16:
5461c283dbSYeting Kuo; RV32:       # %bb.0:
55aa68e281SKolya Panchenko; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
56aa68e281SKolya Panchenko; RV32-NEXT:    vcompress.vm v9, v8, v0
57aa68e281SKolya Panchenko; RV32-NEXT:    vcpop.m a1, v0
58aa68e281SKolya Panchenko; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
5961c283dbSYeting Kuo; RV32-NEXT:    vse16.v v9, (a0)
6061c283dbSYeting Kuo; RV32-NEXT:    ret
6161c283dbSYeting Kuo;
6261c283dbSYeting Kuo; RV64-LABEL: compressstore_v4f16:
6361c283dbSYeting Kuo; RV64:       # %bb.0:
64aa68e281SKolya Panchenko; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
65aa68e281SKolya Panchenko; RV64-NEXT:    vcompress.vm v9, v8, v0
66aa68e281SKolya Panchenko; RV64-NEXT:    vcpop.m a1, v0
67aa68e281SKolya Panchenko; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
6861c283dbSYeting Kuo; RV64-NEXT:    vse16.v v9, (a0)
6961c283dbSYeting Kuo; RV64-NEXT:    ret
7061c283dbSYeting Kuo  call void @llvm.masked.compressstore.v4f16(<4 x half> %v, ptr align 2 %base, <4 x i1> %mask)
7161c283dbSYeting Kuo  ret void
7261c283dbSYeting Kuo}
7361c283dbSYeting Kuo
7461c283dbSYeting Kuodeclare void @llvm.masked.compressstore.v8f16(<8 x half>, ptr, <8 x i1>)
7561c283dbSYeting Kuodefine void @compressstore_v8f16(ptr %base, <8 x half> %v, <8 x i1> %mask) {
7661c283dbSYeting Kuo; RV32-LABEL: compressstore_v8f16:
7761c283dbSYeting Kuo; RV32:       # %bb.0:
78aa68e281SKolya Panchenko; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
79aa68e281SKolya Panchenko; RV32-NEXT:    vcompress.vm v9, v8, v0
80aa68e281SKolya Panchenko; RV32-NEXT:    vcpop.m a1, v0
81aa68e281SKolya Panchenko; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
8261c283dbSYeting Kuo; RV32-NEXT:    vse16.v v9, (a0)
8361c283dbSYeting Kuo; RV32-NEXT:    ret
8461c283dbSYeting Kuo;
8561c283dbSYeting Kuo; RV64-LABEL: compressstore_v8f16:
8661c283dbSYeting Kuo; RV64:       # %bb.0:
87aa68e281SKolya Panchenko; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
88aa68e281SKolya Panchenko; RV64-NEXT:    vcompress.vm v9, v8, v0
89aa68e281SKolya Panchenko; RV64-NEXT:    vcpop.m a1, v0
90aa68e281SKolya Panchenko; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
9161c283dbSYeting Kuo; RV64-NEXT:    vse16.v v9, (a0)
9261c283dbSYeting Kuo; RV64-NEXT:    ret
9361c283dbSYeting Kuo  call void @llvm.masked.compressstore.v8f16(<8 x half> %v, ptr align 2 %base, <8 x i1> %mask)
9461c283dbSYeting Kuo  ret void
9561c283dbSYeting Kuo}
9661c283dbSYeting Kuo
9761c283dbSYeting Kuodeclare void @llvm.masked.compressstore.v1f32(<1 x float>, ptr, <1 x i1>)
9861c283dbSYeting Kuodefine void @compressstore_v1f32(ptr %base, <1 x float> %v, <1 x i1> %mask) {
9961c283dbSYeting Kuo; RV32-LABEL: compressstore_v1f32:
10061c283dbSYeting Kuo; RV32:       # %bb.0:
10161c283dbSYeting Kuo; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
102aa68e281SKolya Panchenko; RV32-NEXT:    vcompress.vm v9, v8, v0
103aa68e281SKolya Panchenko; RV32-NEXT:    vcpop.m a1, v0
104aa68e281SKolya Panchenko; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
105aa68e281SKolya Panchenko; RV32-NEXT:    vse32.v v9, (a0)
10661c283dbSYeting Kuo; RV32-NEXT:    ret
10761c283dbSYeting Kuo;
10861c283dbSYeting Kuo; RV64-LABEL: compressstore_v1f32:
10961c283dbSYeting Kuo; RV64:       # %bb.0:
11061c283dbSYeting Kuo; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
111aa68e281SKolya Panchenko; RV64-NEXT:    vcompress.vm v9, v8, v0
112aa68e281SKolya Panchenko; RV64-NEXT:    vcpop.m a1, v0
113aa68e281SKolya Panchenko; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
114aa68e281SKolya Panchenko; RV64-NEXT:    vse32.v v9, (a0)
11561c283dbSYeting Kuo; RV64-NEXT:    ret
11661c283dbSYeting Kuo  call void @llvm.masked.compressstore.v1f32(<1 x float> %v, ptr align 4 %base, <1 x i1> %mask)
11761c283dbSYeting Kuo  ret void
11861c283dbSYeting Kuo}
11961c283dbSYeting Kuo
12061c283dbSYeting Kuodeclare void @llvm.masked.compressstore.v2f32(<2 x float>, ptr, <2 x i1>)
12161c283dbSYeting Kuodefine void @compressstore_v2f32(ptr %base, <2 x float> %v, <2 x i1> %mask) {
12261c283dbSYeting Kuo; RV32-LABEL: compressstore_v2f32:
12361c283dbSYeting Kuo; RV32:       # %bb.0:
124aa68e281SKolya Panchenko; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
125aa68e281SKolya Panchenko; RV32-NEXT:    vcompress.vm v9, v8, v0
126aa68e281SKolya Panchenko; RV32-NEXT:    vcpop.m a1, v0
127aa68e281SKolya Panchenko; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
128aa68e281SKolya Panchenko; RV32-NEXT:    vse32.v v9, (a0)
12961c283dbSYeting Kuo; RV32-NEXT:    ret
13061c283dbSYeting Kuo;
13161c283dbSYeting Kuo; RV64-LABEL: compressstore_v2f32:
13261c283dbSYeting Kuo; RV64:       # %bb.0:
133aa68e281SKolya Panchenko; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
134aa68e281SKolya Panchenko; RV64-NEXT:    vcompress.vm v9, v8, v0
135aa68e281SKolya Panchenko; RV64-NEXT:    vcpop.m a1, v0
136aa68e281SKolya Panchenko; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
137aa68e281SKolya Panchenko; RV64-NEXT:    vse32.v v9, (a0)
13861c283dbSYeting Kuo; RV64-NEXT:    ret
13961c283dbSYeting Kuo  call void @llvm.masked.compressstore.v2f32(<2 x float> %v, ptr align 4 %base, <2 x i1> %mask)
14061c283dbSYeting Kuo  ret void
14161c283dbSYeting Kuo}
14261c283dbSYeting Kuo
14361c283dbSYeting Kuodeclare void @llvm.masked.compressstore.v4f32(<4 x float>, ptr, <4 x i1>)
14461c283dbSYeting Kuodefine void @compressstore_v4f32(ptr %base, <4 x float> %v, <4 x i1> %mask) {
14561c283dbSYeting Kuo; RV32-LABEL: compressstore_v4f32:
14661c283dbSYeting Kuo; RV32:       # %bb.0:
147aa68e281SKolya Panchenko; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
148aa68e281SKolya Panchenko; RV32-NEXT:    vcompress.vm v9, v8, v0
149aa68e281SKolya Panchenko; RV32-NEXT:    vcpop.m a1, v0
150aa68e281SKolya Panchenko; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
15161c283dbSYeting Kuo; RV32-NEXT:    vse32.v v9, (a0)
15261c283dbSYeting Kuo; RV32-NEXT:    ret
15361c283dbSYeting Kuo;
15461c283dbSYeting Kuo; RV64-LABEL: compressstore_v4f32:
15561c283dbSYeting Kuo; RV64:       # %bb.0:
156aa68e281SKolya Panchenko; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
157aa68e281SKolya Panchenko; RV64-NEXT:    vcompress.vm v9, v8, v0
158aa68e281SKolya Panchenko; RV64-NEXT:    vcpop.m a1, v0
159aa68e281SKolya Panchenko; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
16061c283dbSYeting Kuo; RV64-NEXT:    vse32.v v9, (a0)
16161c283dbSYeting Kuo; RV64-NEXT:    ret
16261c283dbSYeting Kuo  call void @llvm.masked.compressstore.v4f32(<4 x float> %v, ptr align 4 %base, <4 x i1> %mask)
16361c283dbSYeting Kuo  ret void
16461c283dbSYeting Kuo}
16561c283dbSYeting Kuo
16661c283dbSYeting Kuodeclare void @llvm.masked.compressstore.v8f32(<8 x float>, ptr, <8 x i1>)
16761c283dbSYeting Kuodefine void @compressstore_v8f32(ptr %base, <8 x float> %v, <8 x i1> %mask) {
16861c283dbSYeting Kuo; RV32-LABEL: compressstore_v8f32:
16961c283dbSYeting Kuo; RV32:       # %bb.0:
170aa68e281SKolya Panchenko; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
171aa68e281SKolya Panchenko; RV32-NEXT:    vcompress.vm v10, v8, v0
172aa68e281SKolya Panchenko; RV32-NEXT:    vcpop.m a1, v0
173aa68e281SKolya Panchenko; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
17461c283dbSYeting Kuo; RV32-NEXT:    vse32.v v10, (a0)
17561c283dbSYeting Kuo; RV32-NEXT:    ret
17661c283dbSYeting Kuo;
17761c283dbSYeting Kuo; RV64-LABEL: compressstore_v8f32:
17861c283dbSYeting Kuo; RV64:       # %bb.0:
179aa68e281SKolya Panchenko; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
180aa68e281SKolya Panchenko; RV64-NEXT:    vcompress.vm v10, v8, v0
181aa68e281SKolya Panchenko; RV64-NEXT:    vcpop.m a1, v0
182aa68e281SKolya Panchenko; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
18361c283dbSYeting Kuo; RV64-NEXT:    vse32.v v10, (a0)
18461c283dbSYeting Kuo; RV64-NEXT:    ret
18561c283dbSYeting Kuo  call void @llvm.masked.compressstore.v8f32(<8 x float> %v, ptr align 4 %base, <8 x i1> %mask)
18661c283dbSYeting Kuo  ret void
18761c283dbSYeting Kuo}
18861c283dbSYeting Kuo
18961c283dbSYeting Kuodeclare void @llvm.masked.compressstore.v1f64(<1 x double>, ptr, <1 x i1>)
19061c283dbSYeting Kuodefine void @compressstore_v1f64(ptr %base, <1 x double> %v, <1 x i1> %mask) {
19161c283dbSYeting Kuo; RV32-LABEL: compressstore_v1f64:
19261c283dbSYeting Kuo; RV32:       # %bb.0:
19361c283dbSYeting Kuo; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
194aa68e281SKolya Panchenko; RV32-NEXT:    vcompress.vm v9, v8, v0
195aa68e281SKolya Panchenko; RV32-NEXT:    vcpop.m a1, v0
196aa68e281SKolya Panchenko; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
197aa68e281SKolya Panchenko; RV32-NEXT:    vse64.v v9, (a0)
19861c283dbSYeting Kuo; RV32-NEXT:    ret
19961c283dbSYeting Kuo;
20061c283dbSYeting Kuo; RV64-LABEL: compressstore_v1f64:
20161c283dbSYeting Kuo; RV64:       # %bb.0:
20261c283dbSYeting Kuo; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
203aa68e281SKolya Panchenko; RV64-NEXT:    vcompress.vm v9, v8, v0
204aa68e281SKolya Panchenko; RV64-NEXT:    vcpop.m a1, v0
205aa68e281SKolya Panchenko; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
206aa68e281SKolya Panchenko; RV64-NEXT:    vse64.v v9, (a0)
20761c283dbSYeting Kuo; RV64-NEXT:    ret
20861c283dbSYeting Kuo  call void @llvm.masked.compressstore.v1f64(<1 x double> %v, ptr align 8 %base, <1 x i1> %mask)
20961c283dbSYeting Kuo  ret void
21061c283dbSYeting Kuo}
21161c283dbSYeting Kuo
21261c283dbSYeting Kuodeclare void @llvm.masked.compressstore.v2f64(<2 x double>, ptr, <2 x i1>)
21361c283dbSYeting Kuodefine void @compressstore_v2f64(ptr %base, <2 x double> %v, <2 x i1> %mask) {
21461c283dbSYeting Kuo; RV32-LABEL: compressstore_v2f64:
21561c283dbSYeting Kuo; RV32:       # %bb.0:
216aa68e281SKolya Panchenko; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
217aa68e281SKolya Panchenko; RV32-NEXT:    vcompress.vm v9, v8, v0
218aa68e281SKolya Panchenko; RV32-NEXT:    vcpop.m a1, v0
219aa68e281SKolya Panchenko; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
220aa68e281SKolya Panchenko; RV32-NEXT:    vse64.v v9, (a0)
22161c283dbSYeting Kuo; RV32-NEXT:    ret
22261c283dbSYeting Kuo;
22361c283dbSYeting Kuo; RV64-LABEL: compressstore_v2f64:
22461c283dbSYeting Kuo; RV64:       # %bb.0:
225aa68e281SKolya Panchenko; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
226aa68e281SKolya Panchenko; RV64-NEXT:    vcompress.vm v9, v8, v0
227aa68e281SKolya Panchenko; RV64-NEXT:    vcpop.m a1, v0
228aa68e281SKolya Panchenko; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
229aa68e281SKolya Panchenko; RV64-NEXT:    vse64.v v9, (a0)
23061c283dbSYeting Kuo; RV64-NEXT:    ret
23161c283dbSYeting Kuo  call void @llvm.masked.compressstore.v2f64(<2 x double> %v, ptr align 8 %base, <2 x i1> %mask)
23261c283dbSYeting Kuo  ret void
23361c283dbSYeting Kuo}
23461c283dbSYeting Kuo
23561c283dbSYeting Kuodeclare void @llvm.masked.compressstore.v4f64(<4 x double>, ptr, <4 x i1>)
23661c283dbSYeting Kuodefine void @compressstore_v4f64(ptr %base, <4 x double> %v, <4 x i1> %mask) {
23761c283dbSYeting Kuo; RV32-LABEL: compressstore_v4f64:
23861c283dbSYeting Kuo; RV32:       # %bb.0:
239aa68e281SKolya Panchenko; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
240aa68e281SKolya Panchenko; RV32-NEXT:    vcompress.vm v10, v8, v0
241aa68e281SKolya Panchenko; RV32-NEXT:    vcpop.m a1, v0
242aa68e281SKolya Panchenko; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
24361c283dbSYeting Kuo; RV32-NEXT:    vse64.v v10, (a0)
24461c283dbSYeting Kuo; RV32-NEXT:    ret
24561c283dbSYeting Kuo;
24661c283dbSYeting Kuo; RV64-LABEL: compressstore_v4f64:
24761c283dbSYeting Kuo; RV64:       # %bb.0:
248aa68e281SKolya Panchenko; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
249aa68e281SKolya Panchenko; RV64-NEXT:    vcompress.vm v10, v8, v0
250aa68e281SKolya Panchenko; RV64-NEXT:    vcpop.m a1, v0
251aa68e281SKolya Panchenko; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
25261c283dbSYeting Kuo; RV64-NEXT:    vse64.v v10, (a0)
25361c283dbSYeting Kuo; RV64-NEXT:    ret
25461c283dbSYeting Kuo  call void @llvm.masked.compressstore.v4f64(<4 x double> %v, ptr align 8 %base, <4 x i1> %mask)
25561c283dbSYeting Kuo  ret void
25661c283dbSYeting Kuo}
25761c283dbSYeting Kuo
25861c283dbSYeting Kuodeclare void @llvm.masked.compressstore.v8f64(<8 x double>, ptr, <8 x i1>)
25961c283dbSYeting Kuodefine void @compressstore_v8f64(ptr %base, <8 x double> %v, <8 x i1> %mask) {
26061c283dbSYeting Kuo; RV32-LABEL: compressstore_v8f64:
26161c283dbSYeting Kuo; RV32:       # %bb.0:
26261c283dbSYeting Kuo; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
263aa68e281SKolya Panchenko; RV32-NEXT:    vcompress.vm v12, v8, v0
264aa68e281SKolya Panchenko; RV32-NEXT:    vcpop.m a1, v0
265aa68e281SKolya Panchenko; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
266aa68e281SKolya Panchenko; RV32-NEXT:    vse64.v v12, (a0)
26761c283dbSYeting Kuo; RV32-NEXT:    ret
26861c283dbSYeting Kuo;
26961c283dbSYeting Kuo; RV64-LABEL: compressstore_v8f64:
27061c283dbSYeting Kuo; RV64:       # %bb.0:
27161c283dbSYeting Kuo; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
272aa68e281SKolya Panchenko; RV64-NEXT:    vcompress.vm v12, v8, v0
273aa68e281SKolya Panchenko; RV64-NEXT:    vcpop.m a1, v0
274aa68e281SKolya Panchenko; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
275aa68e281SKolya Panchenko; RV64-NEXT:    vse64.v v12, (a0)
27661c283dbSYeting Kuo; RV64-NEXT:    ret
27761c283dbSYeting Kuo  call void @llvm.masked.compressstore.v8f64(<8 x double> %v, ptr align 8 %base, <8 x i1> %mask)
27861c283dbSYeting Kuo  ret void
27961c283dbSYeting Kuo}
280