xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s
5; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s
6
7define void @masked_store_nxv1bf16(<vscale x 1 x bfloat> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
8; CHECK-LABEL: masked_store_nxv1bf16:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
11; CHECK-NEXT:    vse16.v v8, (a0), v0.t
12; CHECK-NEXT:    ret
13  call void @llvm.masked.store.nxv1bf16.p0(<vscale x 1 x bfloat> %val, ptr %a, i32 2, <vscale x 1 x i1> %mask)
14  ret void
15}
16declare void @llvm.masked.store.nxv1bf16.p0(<vscale x 1 x bfloat>, ptr, i32, <vscale x 1 x i1>)
17
18define void @masked_store_nxv1f16(<vscale x 1 x half> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
19; CHECK-LABEL: masked_store_nxv1f16:
20; CHECK:       # %bb.0:
21; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
22; CHECK-NEXT:    vse16.v v8, (a0), v0.t
23; CHECK-NEXT:    ret
24  call void @llvm.masked.store.nxv1f16.p0(<vscale x 1 x half> %val, ptr %a, i32 2, <vscale x 1 x i1> %mask)
25  ret void
26}
27declare void @llvm.masked.store.nxv1f16.p0(<vscale x 1 x half>, ptr, i32, <vscale x 1 x i1>)
28
29define void @masked_store_nxv1f32(<vscale x 1 x float> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
30; CHECK-LABEL: masked_store_nxv1f32:
31; CHECK:       # %bb.0:
32; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
33; CHECK-NEXT:    vse32.v v8, (a0), v0.t
34; CHECK-NEXT:    ret
35  call void @llvm.masked.store.nxv1f32.p0(<vscale x 1 x float> %val, ptr %a, i32 4, <vscale x 1 x i1> %mask)
36  ret void
37}
38declare void @llvm.masked.store.nxv1f32.p0(<vscale x 1 x float>, ptr, i32, <vscale x 1 x i1>)
39
40define void @masked_store_nxv1f64(<vscale x 1 x double> %val, ptr %a, <vscale x 1 x i1> %mask) nounwind {
41; CHECK-LABEL: masked_store_nxv1f64:
42; CHECK:       # %bb.0:
43; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
44; CHECK-NEXT:    vse64.v v8, (a0), v0.t
45; CHECK-NEXT:    ret
46  call void @llvm.masked.store.nxv1f64.p0(<vscale x 1 x double> %val, ptr %a, i32 8, <vscale x 1 x i1> %mask)
47  ret void
48}
49declare void @llvm.masked.store.nxv1f64.p0(<vscale x 1 x double>, ptr, i32, <vscale x 1 x i1>)
50
51define void @masked_store_nxv2bf16(<vscale x 2 x bfloat> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
52; CHECK-LABEL: masked_store_nxv2bf16:
53; CHECK:       # %bb.0:
54; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
55; CHECK-NEXT:    vse16.v v8, (a0), v0.t
56; CHECK-NEXT:    ret
57  call void @llvm.masked.store.nxv2bf16.p0(<vscale x 2 x bfloat> %val, ptr %a, i32 2, <vscale x 2 x i1> %mask)
58  ret void
59}
60declare void @llvm.masked.store.nxv2bf16.p0(<vscale x 2 x bfloat>, ptr, i32, <vscale x 2 x i1>)
61
62define void @masked_store_nxv2f16(<vscale x 2 x half> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
63; CHECK-LABEL: masked_store_nxv2f16:
64; CHECK:       # %bb.0:
65; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
66; CHECK-NEXT:    vse16.v v8, (a0), v0.t
67; CHECK-NEXT:    ret
68  call void @llvm.masked.store.nxv2f16.p0(<vscale x 2 x half> %val, ptr %a, i32 2, <vscale x 2 x i1> %mask)
69  ret void
70}
71declare void @llvm.masked.store.nxv2f16.p0(<vscale x 2 x half>, ptr, i32, <vscale x 2 x i1>)
72
73define void @masked_store_nxv2f32(<vscale x 2 x float> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
74; CHECK-LABEL: masked_store_nxv2f32:
75; CHECK:       # %bb.0:
76; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
77; CHECK-NEXT:    vse32.v v8, (a0), v0.t
78; CHECK-NEXT:    ret
79  call void @llvm.masked.store.nxv2f32.p0(<vscale x 2 x float> %val, ptr %a, i32 4, <vscale x 2 x i1> %mask)
80  ret void
81}
82declare void @llvm.masked.store.nxv2f32.p0(<vscale x 2 x float>, ptr, i32, <vscale x 2 x i1>)
83
84define void @masked_store_nxv2f64(<vscale x 2 x double> %val, ptr %a, <vscale x 2 x i1> %mask) nounwind {
85; CHECK-LABEL: masked_store_nxv2f64:
86; CHECK:       # %bb.0:
87; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
88; CHECK-NEXT:    vse64.v v8, (a0), v0.t
89; CHECK-NEXT:    ret
90  call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> %val, ptr %a, i32 8, <vscale x 2 x i1> %mask)
91  ret void
92}
93declare void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double>, ptr, i32, <vscale x 2 x i1>)
94
95define void @masked_store_nxv4bf16(<vscale x 4 x bfloat> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
96; CHECK-LABEL: masked_store_nxv4bf16:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
99; CHECK-NEXT:    vse16.v v8, (a0), v0.t
100; CHECK-NEXT:    ret
101  call void @llvm.masked.store.nxv4bf16.p0(<vscale x 4 x bfloat> %val, ptr %a, i32 2, <vscale x 4 x i1> %mask)
102  ret void
103}
104declare void @llvm.masked.store.nxv4bf16.p0(<vscale x 4 x bfloat>, ptr, i32, <vscale x 4 x i1>)
105
106define void @masked_store_nxv4f16(<vscale x 4 x half> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
107; CHECK-LABEL: masked_store_nxv4f16:
108; CHECK:       # %bb.0:
109; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
110; CHECK-NEXT:    vse16.v v8, (a0), v0.t
111; CHECK-NEXT:    ret
112  call void @llvm.masked.store.nxv4f16.p0(<vscale x 4 x half> %val, ptr %a, i32 2, <vscale x 4 x i1> %mask)
113  ret void
114}
115declare void @llvm.masked.store.nxv4f16.p0(<vscale x 4 x half>, ptr, i32, <vscale x 4 x i1>)
116
117define void @masked_store_nxv4f32(<vscale x 4 x float> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
118; CHECK-LABEL: masked_store_nxv4f32:
119; CHECK:       # %bb.0:
120; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
121; CHECK-NEXT:    vse32.v v8, (a0), v0.t
122; CHECK-NEXT:    ret
123  call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %val, ptr %a, i32 4, <vscale x 4 x i1> %mask)
124  ret void
125}
126declare void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float>, ptr, i32, <vscale x 4 x i1>)
127
128define void @masked_store_nxv4f64(<vscale x 4 x double> %val, ptr %a, <vscale x 4 x i1> %mask) nounwind {
129; CHECK-LABEL: masked_store_nxv4f64:
130; CHECK:       # %bb.0:
131; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
132; CHECK-NEXT:    vse64.v v8, (a0), v0.t
133; CHECK-NEXT:    ret
134  call void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double> %val, ptr %a, i32 8, <vscale x 4 x i1> %mask)
135  ret void
136}
137declare void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double>, ptr, i32, <vscale x 4 x i1>)
138
139define void @masked_store_nxv8bf16(<vscale x 8 x bfloat> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
140; CHECK-LABEL: masked_store_nxv8bf16:
141; CHECK:       # %bb.0:
142; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
143; CHECK-NEXT:    vse16.v v8, (a0), v0.t
144; CHECK-NEXT:    ret
145  call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> %val, ptr %a, i32 2, <vscale x 8 x i1> %mask)
146  ret void
147}
148declare void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat>, ptr, i32, <vscale x 8 x i1>)
149
150define void @masked_store_nxv8f16(<vscale x 8 x half> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
151; CHECK-LABEL: masked_store_nxv8f16:
152; CHECK:       # %bb.0:
153; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
154; CHECK-NEXT:    vse16.v v8, (a0), v0.t
155; CHECK-NEXT:    ret
156  call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> %val, ptr %a, i32 2, <vscale x 8 x i1> %mask)
157  ret void
158}
159declare void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half>, ptr, i32, <vscale x 8 x i1>)
160
161define void @masked_store_nxv8f32(<vscale x 8 x float> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
162; CHECK-LABEL: masked_store_nxv8f32:
163; CHECK:       # %bb.0:
164; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
165; CHECK-NEXT:    vse32.v v8, (a0), v0.t
166; CHECK-NEXT:    ret
167  call void @llvm.masked.store.nxv8f32.p0(<vscale x 8 x float> %val, ptr %a, i32 4, <vscale x 8 x i1> %mask)
168  ret void
169}
170declare void @llvm.masked.store.nxv8f32.p0(<vscale x 8 x float>, ptr, i32, <vscale x 8 x i1>)
171
172define void @masked_store_nxv8f64(<vscale x 8 x double> %val, ptr %a, <vscale x 8 x i1> %mask) nounwind {
173; CHECK-LABEL: masked_store_nxv8f64:
174; CHECK:       # %bb.0:
175; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
176; CHECK-NEXT:    vse64.v v8, (a0), v0.t
177; CHECK-NEXT:    ret
178  call void @llvm.masked.store.nxv8f64.p0(<vscale x 8 x double> %val, ptr %a, i32 8, <vscale x 8 x i1> %mask)
179  ret void
180}
181declare void @llvm.masked.store.nxv8f64.p0(<vscale x 8 x double>, ptr, i32, <vscale x 8 x i1>)
182
183define void @masked_store_nxv16bf16(<vscale x 16 x bfloat> %val, ptr %a, <vscale x 16 x i1> %mask) nounwind {
184; CHECK-LABEL: masked_store_nxv16bf16:
185; CHECK:       # %bb.0:
186; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
187; CHECK-NEXT:    vse16.v v8, (a0), v0.t
188; CHECK-NEXT:    ret
189  call void @llvm.masked.store.nxv16bf16.p0(<vscale x 16 x bfloat> %val, ptr %a, i32 2, <vscale x 16 x i1> %mask)
190  ret void
191}
192declare void @llvm.masked.store.nxv16bf16.p0(<vscale x 16 x bfloat>, ptr, i32, <vscale x 16 x i1>)
193
194define void @masked_store_nxv16f16(<vscale x 16 x half> %val, ptr %a, <vscale x 16 x i1> %mask) nounwind {
195; CHECK-LABEL: masked_store_nxv16f16:
196; CHECK:       # %bb.0:
197; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
198; CHECK-NEXT:    vse16.v v8, (a0), v0.t
199; CHECK-NEXT:    ret
200  call void @llvm.masked.store.nxv16f16.p0(<vscale x 16 x half> %val, ptr %a, i32 2, <vscale x 16 x i1> %mask)
201  ret void
202}
203declare void @llvm.masked.store.nxv16f16.p0(<vscale x 16 x half>, ptr, i32, <vscale x 16 x i1>)
204
205define void @masked_store_nxv16f32(<vscale x 16 x float> %val, ptr %a, <vscale x 16 x i1> %mask) nounwind {
206; CHECK-LABEL: masked_store_nxv16f32:
207; CHECK:       # %bb.0:
208; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
209; CHECK-NEXT:    vse32.v v8, (a0), v0.t
210; CHECK-NEXT:    ret
211  call void @llvm.masked.store.nxv16f32.p0(<vscale x 16 x float> %val, ptr %a, i32 4, <vscale x 16 x i1> %mask)
212  ret void
213}
214declare void @llvm.masked.store.nxv16f32.p0(<vscale x 16 x float>, ptr, i32, <vscale x 16 x i1>)
215
216define void @masked_store_nxv32bf16(<vscale x 32 x bfloat> %val, ptr %a, <vscale x 32 x i1> %mask) nounwind {
217; CHECK-LABEL: masked_store_nxv32bf16:
218; CHECK:       # %bb.0:
219; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
220; CHECK-NEXT:    vse16.v v8, (a0), v0.t
221; CHECK-NEXT:    ret
222  call void @llvm.masked.store.nxv32bf16.p0(<vscale x 32 x bfloat> %val, ptr %a, i32 2, <vscale x 32 x i1> %mask)
223  ret void
224}
225declare void @llvm.masked.store.nxv32bf16.p0(<vscale x 32 x bfloat>, ptr, i32, <vscale x 32 x i1>)
226
227define void @masked_store_nxv32f16(<vscale x 32 x half> %val, ptr %a, <vscale x 32 x i1> %mask) nounwind {
228; CHECK-LABEL: masked_store_nxv32f16:
229; CHECK:       # %bb.0:
230; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
231; CHECK-NEXT:    vse16.v v8, (a0), v0.t
232; CHECK-NEXT:    ret
233  call void @llvm.masked.store.nxv32f16.p0(<vscale x 32 x half> %val, ptr %a, i32 2, <vscale x 32 x i1> %mask)
234  ret void
235}
236declare void @llvm.masked.store.nxv32f16.p0(<vscale x 32 x half>, ptr, i32, <vscale x 32 x i1>)
237