xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll (revision d8d131dfa99762ccdd2116661980b7d0493cd7b5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5define void @masked_store_nxv1i8(<vscale x 1 x i8> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
6; CHECK-LABEL: masked_store_nxv1i8:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
9; CHECK-NEXT:    vse8.v v8, (a0), v0.t
10; CHECK-NEXT:    ret
11  call void @llvm.masked.store.v1i8.p0(<vscale x 1 x i8> %val, ptr %a, i32 1, <vscale x 1 x i1> %mask)
12  ret void
13}
14declare void @llvm.masked.store.v1i8.p0(<vscale x 1 x i8>, ptr, i32, <vscale x 1 x i1>)
15
16define void @masked_store_nxv1i16(<vscale x 1 x i16> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
17; CHECK-LABEL: masked_store_nxv1i16:
18; CHECK:       # %bb.0:
19; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
20; CHECK-NEXT:    vse16.v v8, (a0), v0.t
21; CHECK-NEXT:    ret
22  call void @llvm.masked.store.v1i16.p0(<vscale x 1 x i16> %val, ptr %a, i32 2, <vscale x 1 x i1> %mask)
23  ret void
24}
25declare void @llvm.masked.store.v1i16.p0(<vscale x 1 x i16>, ptr, i32, <vscale x 1 x i1>)
26
27define void @masked_store_nxv1i32(<vscale x 1 x i32> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
28; CHECK-LABEL: masked_store_nxv1i32:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
31; CHECK-NEXT:    vse32.v v8, (a0), v0.t
32; CHECK-NEXT:    ret
33  call void @llvm.masked.store.v1i32.p0(<vscale x 1 x i32> %val, ptr %a, i32 4, <vscale x 1 x i1> %mask)
34  ret void
35}
36declare void @llvm.masked.store.v1i32.p0(<vscale x 1 x i32>, ptr, i32, <vscale x 1 x i1>)
37
38define void @masked_store_nxv1i64(<vscale x 1 x i64> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
39; CHECK-LABEL: masked_store_nxv1i64:
40; CHECK:       # %bb.0:
41; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
42; CHECK-NEXT:    vse64.v v8, (a0), v0.t
43; CHECK-NEXT:    ret
44  call void @llvm.masked.store.v1i64.p0(<vscale x 1 x i64> %val, ptr %a, i32 8, <vscale x 1 x i1> %mask)
45  ret void
46}
47declare void @llvm.masked.store.v1i64.p0(<vscale x 1 x i64>, ptr, i32, <vscale x 1 x i1>)
48
49define void @masked_store_nxv2i8(<vscale x 2 x i8> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
50; CHECK-LABEL: masked_store_nxv2i8:
51; CHECK:       # %bb.0:
52; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
53; CHECK-NEXT:    vse8.v v8, (a0), v0.t
54; CHECK-NEXT:    ret
55  call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, ptr %a, i32 1, <vscale x 2 x i1> %mask)
56  ret void
57}
58declare void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>)
59
60define void @masked_store_nxv2i16(<vscale x 2 x i16> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
61; CHECK-LABEL: masked_store_nxv2i16:
62; CHECK:       # %bb.0:
63; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
64; CHECK-NEXT:    vse16.v v8, (a0), v0.t
65; CHECK-NEXT:    ret
66  call void @llvm.masked.store.v2i16.p0(<vscale x 2 x i16> %val, ptr %a, i32 2, <vscale x 2 x i1> %mask)
67  ret void
68}
69declare void @llvm.masked.store.v2i16.p0(<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>)
70
71define void @masked_store_nxv2i32(<vscale x 2 x i32> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
72; CHECK-LABEL: masked_store_nxv2i32:
73; CHECK:       # %bb.0:
74; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
75; CHECK-NEXT:    vse32.v v8, (a0), v0.t
76; CHECK-NEXT:    ret
77  call void @llvm.masked.store.v2i32.p0(<vscale x 2 x i32> %val, ptr %a, i32 4, <vscale x 2 x i1> %mask)
78  ret void
79}
80declare void @llvm.masked.store.v2i32.p0(<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>)
81
82define void @masked_store_nxv2i64(<vscale x 2 x i64> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
83; CHECK-LABEL: masked_store_nxv2i64:
84; CHECK:       # %bb.0:
85; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
86; CHECK-NEXT:    vse64.v v8, (a0), v0.t
87; CHECK-NEXT:    ret
88  call void @llvm.masked.store.v2i64.p0(<vscale x 2 x i64> %val, ptr %a, i32 8, <vscale x 2 x i1> %mask)
89  ret void
90}
91declare void @llvm.masked.store.v2i64.p0(<vscale x 2 x i64>, ptr, i32, <vscale x 2 x i1>)
92
93define void @masked_store_nxv4i8(<vscale x 4 x i8> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
94; CHECK-LABEL: masked_store_nxv4i8:
95; CHECK:       # %bb.0:
96; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
97; CHECK-NEXT:    vse8.v v8, (a0), v0.t
98; CHECK-NEXT:    ret
99  call void @llvm.masked.store.v4i8.p0(<vscale x 4 x i8> %val, ptr %a, i32 1, <vscale x 4 x i1> %mask)
100  ret void
101}
102declare void @llvm.masked.store.v4i8.p0(<vscale x 4 x i8>, ptr, i32, <vscale x 4 x i1>)
103
104define void @masked_store_nxv4i16(<vscale x 4 x i16> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
105; CHECK-LABEL: masked_store_nxv4i16:
106; CHECK:       # %bb.0:
107; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
108; CHECK-NEXT:    vse16.v v8, (a0), v0.t
109; CHECK-NEXT:    ret
110  call void @llvm.masked.store.v4i16.p0(<vscale x 4 x i16> %val, ptr %a, i32 2, <vscale x 4 x i1> %mask)
111  ret void
112}
113declare void @llvm.masked.store.v4i16.p0(<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>)
114
115define void @masked_store_nxv4i32(<vscale x 4 x i32> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
116; CHECK-LABEL: masked_store_nxv4i32:
117; CHECK:       # %bb.0:
118; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
119; CHECK-NEXT:    vse32.v v8, (a0), v0.t
120; CHECK-NEXT:    ret
121  call void @llvm.masked.store.v4i32.p0(<vscale x 4 x i32> %val, ptr %a, i32 4, <vscale x 4 x i1> %mask)
122  ret void
123}
124declare void @llvm.masked.store.v4i32.p0(<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>)
125
126define void @masked_store_nxv4i64(<vscale x 4 x i64> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
127; CHECK-LABEL: masked_store_nxv4i64:
128; CHECK:       # %bb.0:
129; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
130; CHECK-NEXT:    vse64.v v8, (a0), v0.t
131; CHECK-NEXT:    ret
132  call void @llvm.masked.store.v4i64.p0(<vscale x 4 x i64> %val, ptr %a, i32 8, <vscale x 4 x i1> %mask)
133  ret void
134}
135declare void @llvm.masked.store.v4i64.p0(<vscale x 4 x i64>, ptr, i32, <vscale x 4 x i1>)
136
137define void @masked_store_nxv8i8(<vscale x 8 x i8> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
138; CHECK-LABEL: masked_store_nxv8i8:
139; CHECK:       # %bb.0:
140; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
141; CHECK-NEXT:    vse8.v v8, (a0), v0.t
142; CHECK-NEXT:    ret
143  call void @llvm.masked.store.v8i8.p0(<vscale x 8 x i8> %val, ptr %a, i32 1, <vscale x 8 x i1> %mask)
144  ret void
145}
146declare void @llvm.masked.store.v8i8.p0(<vscale x 8 x i8>, ptr, i32, <vscale x 8 x i1>)
147
148define void @masked_store_nxv8i16(<vscale x 8 x i16> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
149; CHECK-LABEL: masked_store_nxv8i16:
150; CHECK:       # %bb.0:
151; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
152; CHECK-NEXT:    vse16.v v8, (a0), v0.t
153; CHECK-NEXT:    ret
154  call void @llvm.masked.store.v8i16.p0(<vscale x 8 x i16> %val, ptr %a, i32 2, <vscale x 8 x i1> %mask)
155  ret void
156}
157declare void @llvm.masked.store.v8i16.p0(<vscale x 8 x i16>, ptr, i32, <vscale x 8 x i1>)
158
159define void @masked_store_nxv8i32(<vscale x 8 x i32> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
160; CHECK-LABEL: masked_store_nxv8i32:
161; CHECK:       # %bb.0:
162; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
163; CHECK-NEXT:    vse32.v v8, (a0), v0.t
164; CHECK-NEXT:    ret
165  call void @llvm.masked.store.v8i32.p0(<vscale x 8 x i32> %val, ptr %a, i32 4, <vscale x 8 x i1> %mask)
166  ret void
167}
168declare void @llvm.masked.store.v8i32.p0(<vscale x 8 x i32>, ptr, i32, <vscale x 8 x i1>)
169
170define void @masked_store_nxv8i64(<vscale x 8 x i64> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
171; CHECK-LABEL: masked_store_nxv8i64:
172; CHECK:       # %bb.0:
173; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
174; CHECK-NEXT:    vse64.v v8, (a0), v0.t
175; CHECK-NEXT:    ret
176  call void @llvm.masked.store.v8i64.p0(<vscale x 8 x i64> %val, ptr %a, i32 8, <vscale x 8 x i1> %mask)
177  ret void
178}
179declare void @llvm.masked.store.v8i64.p0(<vscale x 8 x i64>, ptr, i32, <vscale x 8 x i1>)
180
181define void @masked_store_nxv16i8(<vscale x 16 x i8> %val, ptr %a, <vscale x 16 x i1> %mask) nounwind {
182; CHECK-LABEL: masked_store_nxv16i8:
183; CHECK:       # %bb.0:
184; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
185; CHECK-NEXT:    vse8.v v8, (a0), v0.t
186; CHECK-NEXT:    ret
187  call void @llvm.masked.store.v16i8.p0(<vscale x 16 x i8> %val, ptr %a, i32 1, <vscale x 16 x i1> %mask)
188  ret void
189}
190declare void @llvm.masked.store.v16i8.p0(<vscale x 16 x i8>, ptr, i32, <vscale x 16 x i1>)
191
192define void @masked_store_nxv16i16(<vscale x 16 x i16> %val, ptr %a, <vscale x 16 x i1> %mask) nounwind {
193; CHECK-LABEL: masked_store_nxv16i16:
194; CHECK:       # %bb.0:
195; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
196; CHECK-NEXT:    vse16.v v8, (a0), v0.t
197; CHECK-NEXT:    ret
198  call void @llvm.masked.store.v16i16.p0(<vscale x 16 x i16> %val, ptr %a, i32 2, <vscale x 16 x i1> %mask)
199  ret void
200}
201declare void @llvm.masked.store.v16i16.p0(<vscale x 16 x i16>, ptr, i32, <vscale x 16 x i1>)
202
203define void @masked_store_nxv16i32(<vscale x 16 x i32> %val, ptr %a, <vscale x 16 x i1> %mask) nounwind {
204; CHECK-LABEL: masked_store_nxv16i32:
205; CHECK:       # %bb.0:
206; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
207; CHECK-NEXT:    vse32.v v8, (a0), v0.t
208; CHECK-NEXT:    ret
209  call void @llvm.masked.store.v16i32.p0(<vscale x 16 x i32> %val, ptr %a, i32 4, <vscale x 16 x i1> %mask)
210  ret void
211}
212declare void @llvm.masked.store.v16i32.p0(<vscale x 16 x i32>, ptr, i32, <vscale x 16 x i1>)
213
214define void @masked_store_nxv32i8(<vscale x 32 x i8> %val, ptr %a, <vscale x 32 x i1> %mask) nounwind {
215; CHECK-LABEL: masked_store_nxv32i8:
216; CHECK:       # %bb.0:
217; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
218; CHECK-NEXT:    vse8.v v8, (a0), v0.t
219; CHECK-NEXT:    ret
220  call void @llvm.masked.store.v32i8.p0(<vscale x 32 x i8> %val, ptr %a, i32 1, <vscale x 32 x i1> %mask)
221  ret void
222}
223declare void @llvm.masked.store.v32i8.p0(<vscale x 32 x i8>, ptr, i32, <vscale x 32 x i1>)
224
225define void @masked_store_nxv32i16(<vscale x 32 x i16> %val, ptr %a, <vscale x 32 x i1> %mask) nounwind {
226; CHECK-LABEL: masked_store_nxv32i16:
227; CHECK:       # %bb.0:
228; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
229; CHECK-NEXT:    vse16.v v8, (a0), v0.t
230; CHECK-NEXT:    ret
231  call void @llvm.masked.store.v32i16.p0(<vscale x 32 x i16> %val, ptr %a, i32 2, <vscale x 32 x i1> %mask)
232  ret void
233}
234declare void @llvm.masked.store.v32i16.p0(<vscale x 32 x i16>, ptr, i32, <vscale x 32 x i1>)
235
236define void @masked_store_nxv64i8(<vscale x 64 x i8> %val, ptr %a, <vscale x 64 x i1> %mask) nounwind {
237; CHECK-LABEL: masked_store_nxv64i8:
238; CHECK:       # %bb.0:
239; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
240; CHECK-NEXT:    vse8.v v8, (a0), v0.t
241; CHECK-NEXT:    ret
242  call void @llvm.masked.store.v64i8.p0(<vscale x 64 x i8> %val, ptr %a, i32 4, <vscale x 64 x i1> %mask)
243  ret void
244}
245declare void @llvm.masked.store.v64i8.p0(<vscale x 64 x i8>, ptr, i32, <vscale x 64 x i1>)
246
247define void @masked_store_zero_mask(<vscale x 2 x i8> %val, ptr %a) nounwind {
248; CHECK-LABEL: masked_store_zero_mask:
249; CHECK:       # %bb.0:
250; CHECK-NEXT:    ret
251  call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, ptr %a, i32 1, <vscale x 2 x i1> zeroinitializer)
252  ret void
253}
254
255define void @masked_store_allones_mask(<vscale x 2 x i8> %val, ptr %a) nounwind {
256; CHECK-LABEL: masked_store_allones_mask:
257; CHECK:       # %bb.0:
258; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
259; CHECK-NEXT:    vse8.v v8, (a0)
260; CHECK-NEXT:    ret
261  call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, ptr %a, i32 1, <vscale x 2 x i1> splat (i1 1))
262  ret void
263}
264