xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5define <1 x i8> @masked_load_v1i8(ptr %a, <1 x i1> %mask) {
6; CHECK-LABEL: masked_load_v1i8:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
9; CHECK-NEXT:    vle8.v v8, (a0), v0.t
10; CHECK-NEXT:    ret
11  %load = call <1 x i8> @llvm.masked.load.v1i8(ptr %a, i32 8, <1 x i1> %mask, <1 x i8> undef)
12  ret <1 x i8> %load
13}
14
15define <1 x i16> @masked_load_v1i16(ptr %a, <1 x i1> %mask) {
16; CHECK-LABEL: masked_load_v1i16:
17; CHECK:       # %bb.0:
18; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
19; CHECK-NEXT:    vle16.v v8, (a0), v0.t
20; CHECK-NEXT:    ret
21  %load = call <1 x i16> @llvm.masked.load.v1i16(ptr %a, i32 8, <1 x i1> %mask, <1 x i16> undef)
22  ret <1 x i16> %load
23}
24
25define <1 x i32> @masked_load_v1i32(ptr %a, <1 x i1> %mask) {
26; CHECK-LABEL: masked_load_v1i32:
27; CHECK:       # %bb.0:
28; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
29; CHECK-NEXT:    vle32.v v8, (a0), v0.t
30; CHECK-NEXT:    ret
31  %load = call <1 x i32> @llvm.masked.load.v1i32(ptr %a, i32 8, <1 x i1> %mask, <1 x i32> undef)
32  ret <1 x i32> %load
33}
34
35define <1 x i64> @masked_load_v1i64(ptr %a, <1 x i1> %mask) {
36; CHECK-LABEL: masked_load_v1i64:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
39; CHECK-NEXT:    vle64.v v8, (a0), v0.t
40; CHECK-NEXT:    ret
41  %load = call <1 x i64> @llvm.masked.load.v1i64(ptr %a, i32 8, <1 x i1> %mask, <1 x i64> undef)
42  ret <1 x i64> %load
43}
44
45define <2 x i8> @masked_load_v2i8(ptr %a, <2 x i1> %mask) {
46; CHECK-LABEL: masked_load_v2i8:
47; CHECK:       # %bb.0:
48; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
49; CHECK-NEXT:    vle8.v v8, (a0), v0.t
50; CHECK-NEXT:    ret
51  %load = call <2 x i8> @llvm.masked.load.v2i8(ptr %a, i32 8, <2 x i1> %mask, <2 x i8> undef)
52  ret <2 x i8> %load
53}
54
55define <2 x i16> @masked_load_v2i16(ptr %a, <2 x i1> %mask) {
56; CHECK-LABEL: masked_load_v2i16:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
59; CHECK-NEXT:    vle16.v v8, (a0), v0.t
60; CHECK-NEXT:    ret
61  %load = call <2 x i16> @llvm.masked.load.v2i16(ptr %a, i32 8, <2 x i1> %mask, <2 x i16> undef)
62  ret <2 x i16> %load
63}
64
65define <2 x i32> @masked_load_v2i32(ptr %a, <2 x i1> %mask) {
66; CHECK-LABEL: masked_load_v2i32:
67; CHECK:       # %bb.0:
68; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
69; CHECK-NEXT:    vle32.v v8, (a0), v0.t
70; CHECK-NEXT:    ret
71  %load = call <2 x i32> @llvm.masked.load.v2i32(ptr %a, i32 8, <2 x i1> %mask, <2 x i32> undef)
72  ret <2 x i32> %load
73}
74
75define <2 x i64> @masked_load_v2i64(ptr %a, <2 x i1> %mask) {
76; CHECK-LABEL: masked_load_v2i64:
77; CHECK:       # %bb.0:
78; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
79; CHECK-NEXT:    vle64.v v8, (a0), v0.t
80; CHECK-NEXT:    ret
81  %load = call <2 x i64> @llvm.masked.load.v2i64(ptr %a, i32 8, <2 x i1> %mask, <2 x i64> undef)
82  ret <2 x i64> %load
83}
84
85define <4 x i8> @masked_load_v4i8(ptr %a, <4 x i1> %mask) {
86; CHECK-LABEL: masked_load_v4i8:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
89; CHECK-NEXT:    vle8.v v8, (a0), v0.t
90; CHECK-NEXT:    ret
91  %load = call <4 x i8> @llvm.masked.load.v4i8(ptr %a, i32 8, <4 x i1> %mask, <4 x i8> undef)
92  ret <4 x i8> %load
93}
94
95define <4 x i16> @masked_load_v4i16(ptr %a, <4 x i1> %mask) {
96; CHECK-LABEL: masked_load_v4i16:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
99; CHECK-NEXT:    vle16.v v8, (a0), v0.t
100; CHECK-NEXT:    ret
101  %load = call <4 x i16> @llvm.masked.load.v4i16(ptr %a, i32 8, <4 x i1> %mask, <4 x i16> undef)
102  ret <4 x i16> %load
103}
104
105define <4 x i32> @masked_load_v4i32(ptr %a, <4 x i1> %mask) {
106; CHECK-LABEL: masked_load_v4i32:
107; CHECK:       # %bb.0:
108; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
109; CHECK-NEXT:    vle32.v v8, (a0), v0.t
110; CHECK-NEXT:    ret
111  %load = call <4 x i32> @llvm.masked.load.v4i32(ptr %a, i32 8, <4 x i1> %mask, <4 x i32> undef)
112  ret <4 x i32> %load
113}
114
115define <4 x i64> @masked_load_v4i64(ptr %a, <4 x i1> %mask) {
116; CHECK-LABEL: masked_load_v4i64:
117; CHECK:       # %bb.0:
118; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
119; CHECK-NEXT:    vle64.v v8, (a0), v0.t
120; CHECK-NEXT:    ret
121  %load = call <4 x i64> @llvm.masked.load.v4i64(ptr %a, i32 8, <4 x i1> %mask, <4 x i64> undef)
122  ret <4 x i64> %load
123}
124
125define <8 x i8> @masked_load_v8i8(ptr %a, <8 x i1> %mask) {
126; CHECK-LABEL: masked_load_v8i8:
127; CHECK:       # %bb.0:
128; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
129; CHECK-NEXT:    vle8.v v8, (a0), v0.t
130; CHECK-NEXT:    ret
131  %load = call <8 x i8> @llvm.masked.load.v8i8(ptr %a, i32 8, <8 x i1> %mask, <8 x i8> undef)
132  ret <8 x i8> %load
133}
134
135define <8 x i16> @masked_load_v8i16(ptr %a, <8 x i1> %mask) {
136; CHECK-LABEL: masked_load_v8i16:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
139; CHECK-NEXT:    vle16.v v8, (a0), v0.t
140; CHECK-NEXT:    ret
141  %load = call <8 x i16> @llvm.masked.load.v8i16(ptr %a, i32 8, <8 x i1> %mask, <8 x i16> undef)
142  ret <8 x i16> %load
143}
144
145define <8 x i32> @masked_load_v8i32(ptr %a, <8 x i1> %mask) {
146; CHECK-LABEL: masked_load_v8i32:
147; CHECK:       # %bb.0:
148; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
149; CHECK-NEXT:    vle32.v v8, (a0), v0.t
150; CHECK-NEXT:    ret
151  %load = call <8 x i32> @llvm.masked.load.v8i32(ptr %a, i32 8, <8 x i1> %mask, <8 x i32> undef)
152  ret <8 x i32> %load
153}
154
155define <8 x i64> @masked_load_v8i64(ptr %a, <8 x i1> %mask) {
156; CHECK-LABEL: masked_load_v8i64:
157; CHECK:       # %bb.0:
158; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
159; CHECK-NEXT:    vle64.v v8, (a0), v0.t
160; CHECK-NEXT:    ret
161  %load = call <8 x i64> @llvm.masked.load.v8i64(ptr %a, i32 8, <8 x i1> %mask, <8 x i64> undef)
162  ret <8 x i64> %load
163}
164
165define <16 x i8> @masked_load_v16i8(ptr %a, <16 x i1> %mask) {
166; CHECK-LABEL: masked_load_v16i8:
167; CHECK:       # %bb.0:
168; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
169; CHECK-NEXT:    vle8.v v8, (a0), v0.t
170; CHECK-NEXT:    ret
171  %load = call <16 x i8> @llvm.masked.load.v16i8(ptr %a, i32 8, <16 x i1> %mask, <16 x i8> undef)
172  ret <16 x i8> %load
173}
174
175define <16 x i16> @masked_load_v16i16(ptr %a, <16 x i1> %mask) {
176; CHECK-LABEL: masked_load_v16i16:
177; CHECK:       # %bb.0:
178; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
179; CHECK-NEXT:    vle16.v v8, (a0), v0.t
180; CHECK-NEXT:    ret
181  %load = call <16 x i16> @llvm.masked.load.v16i16(ptr %a, i32 8, <16 x i1> %mask, <16 x i16> undef)
182  ret <16 x i16> %load
183}
184
185define <16 x i32> @masked_load_v16i32(ptr %a, <16 x i1> %mask) {
186; CHECK-LABEL: masked_load_v16i32:
187; CHECK:       # %bb.0:
188; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
189; CHECK-NEXT:    vle32.v v8, (a0), v0.t
190; CHECK-NEXT:    ret
191  %load = call <16 x i32> @llvm.masked.load.v16i32(ptr %a, i32 8, <16 x i1> %mask, <16 x i32> undef)
192  ret <16 x i32> %load
193}
194
195define <16 x i64> @masked_load_v16i64(ptr %a, <16 x i1> %mask) {
196; CHECK-LABEL: masked_load_v16i64:
197; CHECK:       # %bb.0:
198; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
199; CHECK-NEXT:    vle64.v v8, (a0), v0.t
200; CHECK-NEXT:    ret
201  %load = call <16 x i64> @llvm.masked.load.v16i64(ptr %a, i32 8, <16 x i1> %mask, <16 x i64> undef)
202  ret <16 x i64> %load
203}
204
205define <32 x i8> @masked_load_v32i8(ptr %a, <32 x i1> %mask) {
206; CHECK-LABEL: masked_load_v32i8:
207; CHECK:       # %bb.0:
208; CHECK-NEXT:    li a1, 32
209; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
210; CHECK-NEXT:    vle8.v v8, (a0), v0.t
211; CHECK-NEXT:    ret
212  %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %a, i32 8, <32 x i1> %mask, <32 x i8> undef)
213  ret <32 x i8> %load
214}
215
216define <32 x i16> @masked_load_v32i16(ptr %a, <32 x i1> %mask) {
217; CHECK-LABEL: masked_load_v32i16:
218; CHECK:       # %bb.0:
219; CHECK-NEXT:    li a1, 32
220; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
221; CHECK-NEXT:    vle16.v v8, (a0), v0.t
222; CHECK-NEXT:    ret
223  %load = call <32 x i16> @llvm.masked.load.v32i16(ptr %a, i32 8, <32 x i1> %mask, <32 x i16> undef)
224  ret <32 x i16> %load
225}
226
227define <32 x i32> @masked_load_v32i32(ptr %a, <32 x i1> %mask) {
228; CHECK-LABEL: masked_load_v32i32:
229; CHECK:       # %bb.0:
230; CHECK-NEXT:    li a1, 32
231; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
232; CHECK-NEXT:    vle32.v v8, (a0), v0.t
233; CHECK-NEXT:    ret
234  %load = call <32 x i32> @llvm.masked.load.v32i32(ptr %a, i32 8, <32 x i1> %mask, <32 x i32> undef)
235  ret <32 x i32> %load
236}
237
238define <32 x i64> @masked_load_v32i64(ptr %a, <32 x i1> %mask) {
239; CHECK-LABEL: masked_load_v32i64:
240; CHECK:       # %bb.0:
241; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
242; CHECK-NEXT:    vle64.v v8, (a0), v0.t
243; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
244; CHECK-NEXT:    vslidedown.vi v0, v0, 2
245; CHECK-NEXT:    addi a0, a0, 128
246; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
247; CHECK-NEXT:    vle64.v v16, (a0), v0.t
248; CHECK-NEXT:    ret
249  %load = call <32 x i64> @llvm.masked.load.v32i64(ptr %a, i32 8, <32 x i1> %mask, <32 x i64> undef)
250  ret <32 x i64> %load
251}
252
253define <64 x i8> @masked_load_v64i8(ptr %a, <64 x i1> %mask) {
254; CHECK-LABEL: masked_load_v64i8:
255; CHECK:       # %bb.0:
256; CHECK-NEXT:    li a1, 64
257; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
258; CHECK-NEXT:    vle8.v v8, (a0), v0.t
259; CHECK-NEXT:    ret
260  %load = call <64 x i8> @llvm.masked.load.v64i8(ptr %a, i32 8, <64 x i1> %mask, <64 x i8> undef)
261  ret <64 x i8> %load
262}
263
264define <64 x i16> @masked_load_v64i16(ptr %a, <64 x i1> %mask) {
265; CHECK-LABEL: masked_load_v64i16:
266; CHECK:       # %bb.0:
267; CHECK-NEXT:    li a1, 64
268; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
269; CHECK-NEXT:    vle16.v v8, (a0), v0.t
270; CHECK-NEXT:    ret
271  %load = call <64 x i16> @llvm.masked.load.v64i16(ptr %a, i32 8, <64 x i1> %mask, <64 x i16> undef)
272  ret <64 x i16> %load
273}
274
275define <64 x i32> @masked_load_v64i32(ptr %a, <64 x i1> %mask) {
276; CHECK-LABEL: masked_load_v64i32:
277; CHECK:       # %bb.0:
278; CHECK-NEXT:    li a1, 32
279; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
280; CHECK-NEXT:    vslidedown.vi v16, v0, 4
281; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
282; CHECK-NEXT:    vle32.v v8, (a0), v0.t
283; CHECK-NEXT:    addi a0, a0, 128
284; CHECK-NEXT:    vmv1r.v v0, v16
285; CHECK-NEXT:    vle32.v v16, (a0), v0.t
286; CHECK-NEXT:    ret
287  %load = call <64 x i32> @llvm.masked.load.v64i32(ptr %a, i32 8, <64 x i1> %mask, <64 x i32> undef)
288  ret <64 x i32> %load
289}
290
291define <128 x i8> @masked_load_v128i8(ptr %a, <128 x i1> %mask) {
292; CHECK-LABEL: masked_load_v128i8:
293; CHECK:       # %bb.0:
294; CHECK-NEXT:    li a1, 128
295; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
296; CHECK-NEXT:    vle8.v v8, (a0), v0.t
297; CHECK-NEXT:    ret
298  %load = call <128 x i8> @llvm.masked.load.v128i8(ptr %a, i32 8, <128 x i1> %mask, <128 x i8> undef)
299  ret <128 x i8> %load
300}
301
302define <128 x i16> @masked_load_v128i16(ptr %a, <128 x i1> %mask) {
303; CHECK-LABEL: masked_load_v128i16:
304; CHECK:       # %bb.0:
305; CHECK-NEXT:    li a1, 64
306; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
307; CHECK-NEXT:    vslidedown.vi v16, v0, 8
308; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
309; CHECK-NEXT:    vle16.v v8, (a0), v0.t
310; CHECK-NEXT:    addi a0, a0, 128
311; CHECK-NEXT:    vmv1r.v v0, v16
312; CHECK-NEXT:    vle16.v v16, (a0), v0.t
313; CHECK-NEXT:    ret
314  %load = call <128 x i16> @llvm.masked.load.v128i16(ptr %a, i32 8, <128 x i1> %mask, <128 x i16> undef)
315  ret <128 x i16> %load
316}
317
318define <256 x i8> @masked_load_v256i8(ptr %a, <256 x i1> %mask) {
319; CHECK-LABEL: masked_load_v256i8:
320; CHECK:       # %bb.0:
321; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
322; CHECK-NEXT:    vmv1r.v v16, v8
323; CHECK-NEXT:    li a1, 128
324; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
325; CHECK-NEXT:    vle8.v v8, (a0), v0.t
326; CHECK-NEXT:    addi a0, a0, 128
327; CHECK-NEXT:    vmv1r.v v0, v16
328; CHECK-NEXT:    vle8.v v16, (a0), v0.t
329; CHECK-NEXT:    ret
330  %load = call <256 x i8> @llvm.masked.load.v256i8(ptr %a, i32 8, <256 x i1> %mask, <256 x i8> undef)
331  ret <256 x i8> %load
332}
333
334