xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compressstore-int.ll (revision aa68e2814d9a4bad21e4def900152b2e78e25e98)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
3; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
4
5declare void @llvm.masked.compressstore.v1i8(<1 x i8>, ptr, <1 x i1>)
6define void @compressstore_v1i8(ptr %base, <1 x i8> %v, <1 x i1> %mask) {
7; CHECK-LABEL: compressstore_v1i8:
8; CHECK:       # %bb.0:
9; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
10; CHECK-NEXT:    vcompress.vm v9, v8, v0
11; CHECK-NEXT:    vcpop.m a1, v0
12; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
13; CHECK-NEXT:    vse8.v v9, (a0)
14; CHECK-NEXT:    ret
15  call void @llvm.masked.compressstore.v1i8(<1 x i8> %v, ptr %base, <1 x i1> %mask)
16  ret void
17}
18
19declare void @llvm.masked.compressstore.v2i8(<2 x i8>, ptr, <2 x i1>)
20define void @compressstore_v2i8(ptr %base, <2 x i8> %v, <2 x i1> %mask) {
21; CHECK-LABEL: compressstore_v2i8:
22; CHECK:       # %bb.0:
23; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
24; CHECK-NEXT:    vcompress.vm v9, v8, v0
25; CHECK-NEXT:    vcpop.m a1, v0
26; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
27; CHECK-NEXT:    vse8.v v9, (a0)
28; CHECK-NEXT:    ret
29  call void @llvm.masked.compressstore.v2i8(<2 x i8> %v, ptr %base, <2 x i1> %mask)
30  ret void
31}
32
33declare void @llvm.masked.compressstore.v4i8(<4 x i8>, ptr, <4 x i1>)
34define void @compressstore_v4i8(ptr %base, <4 x i8> %v, <4 x i1> %mask) {
35; CHECK-LABEL: compressstore_v4i8:
36; CHECK:       # %bb.0:
37; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
38; CHECK-NEXT:    vcompress.vm v9, v8, v0
39; CHECK-NEXT:    vcpop.m a1, v0
40; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
41; CHECK-NEXT:    vse8.v v9, (a0)
42; CHECK-NEXT:    ret
43  call void @llvm.masked.compressstore.v4i8(<4 x i8> %v, ptr %base, <4 x i1> %mask)
44  ret void
45}
46
47declare void @llvm.masked.compressstore.v8i8(<8 x i8>, ptr, <8 x i1>)
48define void @compressstore_v8i8(ptr %base, <8 x i8> %v, <8 x i1> %mask) {
49; CHECK-LABEL: compressstore_v8i8:
50; CHECK:       # %bb.0:
51; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
52; CHECK-NEXT:    vcompress.vm v9, v8, v0
53; CHECK-NEXT:    vcpop.m a1, v0
54; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
55; CHECK-NEXT:    vse8.v v9, (a0)
56; CHECK-NEXT:    ret
57  call void @llvm.masked.compressstore.v8i8(<8 x i8> %v, ptr %base, <8 x i1> %mask)
58  ret void
59}
60
61declare void @llvm.masked.compressstore.v1i16(<1 x i16>, ptr, <1 x i1>)
62define void @compressstore_v1i16(ptr %base, <1 x i16> %v, <1 x i1> %mask) {
63; CHECK-LABEL: compressstore_v1i16:
64; CHECK:       # %bb.0:
65; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
66; CHECK-NEXT:    vcompress.vm v9, v8, v0
67; CHECK-NEXT:    vcpop.m a1, v0
68; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
69; CHECK-NEXT:    vse16.v v9, (a0)
70; CHECK-NEXT:    ret
71  call void @llvm.masked.compressstore.v1i16(<1 x i16> %v, ptr align 2 %base, <1 x i1> %mask)
72  ret void
73}
74
75declare void @llvm.masked.compressstore.v2i16(<2 x i16>, ptr, <2 x i1>)
76define void @compressstore_v2i16(ptr %base, <2 x i16> %v, <2 x i1> %mask) {
77; CHECK-LABEL: compressstore_v2i16:
78; CHECK:       # %bb.0:
79; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
80; CHECK-NEXT:    vcompress.vm v9, v8, v0
81; CHECK-NEXT:    vcpop.m a1, v0
82; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
83; CHECK-NEXT:    vse16.v v9, (a0)
84; CHECK-NEXT:    ret
85  call void @llvm.masked.compressstore.v2i16(<2 x i16> %v, ptr align 2 %base, <2 x i1> %mask)
86  ret void
87}
88
89declare void @llvm.masked.compressstore.v4i16(<4 x i16>, ptr, <4 x i1>)
90define void @compressstore_v4i16(ptr %base, <4 x i16> %v, <4 x i1> %mask) {
91; CHECK-LABEL: compressstore_v4i16:
92; CHECK:       # %bb.0:
93; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
94; CHECK-NEXT:    vcompress.vm v9, v8, v0
95; CHECK-NEXT:    vcpop.m a1, v0
96; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
97; CHECK-NEXT:    vse16.v v9, (a0)
98; CHECK-NEXT:    ret
99  call void @llvm.masked.compressstore.v4i16(<4 x i16> %v, ptr align 2 %base, <4 x i1> %mask)
100  ret void
101}
102
103declare void @llvm.masked.compressstore.v8i16(<8 x i16>, ptr, <8 x i1>)
104define void @compressstore_v8i16(ptr %base, <8 x i16> %v, <8 x i1> %mask) {
105; CHECK-LABEL: compressstore_v8i16:
106; CHECK:       # %bb.0:
107; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
108; CHECK-NEXT:    vcompress.vm v9, v8, v0
109; CHECK-NEXT:    vcpop.m a1, v0
110; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
111; CHECK-NEXT:    vse16.v v9, (a0)
112; CHECK-NEXT:    ret
113  call void @llvm.masked.compressstore.v8i16(<8 x i16> %v, ptr align 2 %base, <8 x i1> %mask)
114  ret void
115}
116
117declare void @llvm.masked.compressstore.v1i32(<1 x i32>, ptr, <1 x i1>)
118define void @compressstore_v1i32(ptr %base, <1 x i32> %v, <1 x i1> %mask) {
119; CHECK-LABEL: compressstore_v1i32:
120; CHECK:       # %bb.0:
121; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
122; CHECK-NEXT:    vcompress.vm v9, v8, v0
123; CHECK-NEXT:    vcpop.m a1, v0
124; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
125; CHECK-NEXT:    vse32.v v9, (a0)
126; CHECK-NEXT:    ret
127  call void @llvm.masked.compressstore.v1i32(<1 x i32> %v, ptr align 4 %base, <1 x i1> %mask)
128  ret void
129}
130
131declare void @llvm.masked.compressstore.v2i32(<2 x i32>, ptr, <2 x i1>)
132define void @compressstore_v2i32(ptr %base, <2 x i32> %v, <2 x i1> %mask) {
133; CHECK-LABEL: compressstore_v2i32:
134; CHECK:       # %bb.0:
135; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
136; CHECK-NEXT:    vcompress.vm v9, v8, v0
137; CHECK-NEXT:    vcpop.m a1, v0
138; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
139; CHECK-NEXT:    vse32.v v9, (a0)
140; CHECK-NEXT:    ret
141  call void @llvm.masked.compressstore.v2i32(<2 x i32> %v, ptr align 4 %base, <2 x i1> %mask)
142  ret void
143}
144
145declare void @llvm.masked.compressstore.v4i32(<4 x i32>, ptr, <4 x i1>)
146define void @compressstore_v4i32(ptr %base, <4 x i32> %v, <4 x i1> %mask) {
147; CHECK-LABEL: compressstore_v4i32:
148; CHECK:       # %bb.0:
149; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
150; CHECK-NEXT:    vcompress.vm v9, v8, v0
151; CHECK-NEXT:    vcpop.m a1, v0
152; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
153; CHECK-NEXT:    vse32.v v9, (a0)
154; CHECK-NEXT:    ret
155  call void @llvm.masked.compressstore.v4i32(<4 x i32> %v, ptr align 4 %base, <4 x i1> %mask)
156  ret void
157}
158
159declare void @llvm.masked.compressstore.v8i32(<8 x i32>, ptr, <8 x i1>)
160define void @compressstore_v8i32(ptr %base, <8 x i32> %v, <8 x i1> %mask) {
161; CHECK-LABEL: compressstore_v8i32:
162; CHECK:       # %bb.0:
163; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
164; CHECK-NEXT:    vcompress.vm v10, v8, v0
165; CHECK-NEXT:    vcpop.m a1, v0
166; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
167; CHECK-NEXT:    vse32.v v10, (a0)
168; CHECK-NEXT:    ret
169  call void @llvm.masked.compressstore.v8i32(<8 x i32> %v, ptr align 4 %base, <8 x i1> %mask)
170  ret void
171}
172
173declare void @llvm.masked.compressstore.v1i64(<1 x i64>, ptr, <1 x i1>)
174define void @compressstore_v1i64(ptr %base, <1 x i64> %v, <1 x i1> %mask) {
175; CHECK-LABEL: compressstore_v1i64:
176; CHECK:       # %bb.0:
177; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
178; CHECK-NEXT:    vcompress.vm v9, v8, v0
179; CHECK-NEXT:    vcpop.m a1, v0
180; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
181; CHECK-NEXT:    vse64.v v9, (a0)
182; CHECK-NEXT:    ret
183  call void @llvm.masked.compressstore.v1i64(<1 x i64> %v, ptr align 8 %base, <1 x i1> %mask)
184  ret void
185}
186
187declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr, <2 x i1>)
188define void @compressstore_v2i64(ptr %base, <2 x i64> %v, <2 x i1> %mask) {
189; CHECK-LABEL: compressstore_v2i64:
190; CHECK:       # %bb.0:
191; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
192; CHECK-NEXT:    vcompress.vm v9, v8, v0
193; CHECK-NEXT:    vcpop.m a1, v0
194; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
195; CHECK-NEXT:    vse64.v v9, (a0)
196; CHECK-NEXT:    ret
197  call void @llvm.masked.compressstore.v2i64(<2 x i64> %v, ptr align 8 %base, <2 x i1> %mask)
198  ret void
199}
200
201declare void @llvm.masked.compressstore.v4i64(<4 x i64>, ptr, <4 x i1>)
202define void @compressstore_v4i64(ptr %base, <4 x i64> %v, <4 x i1> %mask) {
203; CHECK-LABEL: compressstore_v4i64:
204; CHECK:       # %bb.0:
205; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
206; CHECK-NEXT:    vcompress.vm v10, v8, v0
207; CHECK-NEXT:    vcpop.m a1, v0
208; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
209; CHECK-NEXT:    vse64.v v10, (a0)
210; CHECK-NEXT:    ret
211  call void @llvm.masked.compressstore.v4i64(<4 x i64> %v, ptr align 8 %base, <4 x i1> %mask)
212  ret void
213}
214
215declare void @llvm.masked.compressstore.v8i64(<8 x i64>, ptr, <8 x i1>)
216define void @compressstore_v8i64(ptr %base, <8 x i64> %v, <8 x i1> %mask) {
217; CHECK-LABEL: compressstore_v8i64:
218; CHECK:       # %bb.0:
219; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
220; CHECK-NEXT:    vcompress.vm v12, v8, v0
221; CHECK-NEXT:    vcpop.m a1, v0
222; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
223; CHECK-NEXT:    vse64.v v12, (a0)
224; CHECK-NEXT:    ret
225  call void @llvm.masked.compressstore.v8i64(<8 x i64> %v, ptr align 8 %base, <8 x i1> %mask)
226  ret void
227}
228;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
229; RV32: {{.*}}
230; RV64: {{.*}}
231