xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll (revision d8d131dfa99762ccdd2116661980b7d0493cd7b5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5define <vscale x 1 x i8> @masked_load_nxv1i8(ptr %a, <vscale x 1 x i1> %mask) nounwind {
6; CHECK-LABEL: masked_load_nxv1i8:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
9; CHECK-NEXT:    vle8.v v8, (a0), v0.t
10; CHECK-NEXT:    ret
11  %load = call <vscale x 1 x i8> @llvm.masked.load.nxv1i8(ptr %a, i32 1, <vscale x 1 x i1> %mask, <vscale x 1 x i8> undef)
12  ret <vscale x 1 x i8> %load
13}
14declare <vscale x 1 x i8> @llvm.masked.load.nxv1i8(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i8>)
15
16define <vscale x 1 x i16> @masked_load_nxv1i16(ptr %a, <vscale x 1 x i1> %mask) nounwind {
17; CHECK-LABEL: masked_load_nxv1i16:
18; CHECK:       # %bb.0:
19; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
20; CHECK-NEXT:    vle16.v v8, (a0), v0.t
21; CHECK-NEXT:    ret
22  %load = call <vscale x 1 x i16> @llvm.masked.load.nxv1i16(ptr %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x i16> undef)
23  ret <vscale x 1 x i16> %load
24}
25declare <vscale x 1 x i16> @llvm.masked.load.nxv1i16(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i16>)
26
27define <vscale x 1 x i32> @masked_load_nxv1i32(ptr %a, <vscale x 1 x i1> %mask) nounwind {
28; CHECK-LABEL: masked_load_nxv1i32:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
31; CHECK-NEXT:    vle32.v v8, (a0), v0.t
32; CHECK-NEXT:    ret
33  %load = call <vscale x 1 x i32> @llvm.masked.load.nxv1i32(ptr %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x i32> undef)
34  ret <vscale x 1 x i32> %load
35}
36declare <vscale x 1 x i32> @llvm.masked.load.nxv1i32(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i32>)
37
38define <vscale x 1 x i64> @masked_load_nxv1i64(ptr %a, <vscale x 1 x i1> %mask) nounwind {
39; CHECK-LABEL: masked_load_nxv1i64:
40; CHECK:       # %bb.0:
41; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
42; CHECK-NEXT:    vle64.v v8, (a0), v0.t
43; CHECK-NEXT:    ret
44  %load = call <vscale x 1 x i64> @llvm.masked.load.nxv1i64(ptr %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x i64> undef)
45  ret <vscale x 1 x i64> %load
46}
47declare <vscale x 1 x i64> @llvm.masked.load.nxv1i64(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i64>)
48
49define <vscale x 2 x i8> @masked_load_nxv2i8(ptr %a, <vscale x 2 x i1> %mask) nounwind {
50; CHECK-LABEL: masked_load_nxv2i8:
51; CHECK:       # %bb.0:
52; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
53; CHECK-NEXT:    vle8.v v8, (a0), v0.t
54; CHECK-NEXT:    ret
55  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
56  ret <vscale x 2 x i8> %load
57}
58declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
59
60define <vscale x 2 x i16> @masked_load_nxv2i16(ptr %a, <vscale x 2 x i1> %mask) nounwind {
61; CHECK-LABEL: masked_load_nxv2i16:
62; CHECK:       # %bb.0:
63; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
64; CHECK-NEXT:    vle16.v v8, (a0), v0.t
65; CHECK-NEXT:    ret
66  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
67  ret <vscale x 2 x i16> %load
68}
69declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
70
71define <vscale x 2 x i32> @masked_load_nxv2i32(ptr %a, <vscale x 2 x i1> %mask) nounwind {
72; CHECK-LABEL: masked_load_nxv2i32:
73; CHECK:       # %bb.0:
74; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
75; CHECK-NEXT:    vle32.v v8, (a0), v0.t
76; CHECK-NEXT:    ret
77  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
78  ret <vscale x 2 x i32> %load
79}
80declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
81
82define <vscale x 2 x i64> @masked_load_nxv2i64(ptr %a, <vscale x 2 x i1> %mask) nounwind {
83; CHECK-LABEL: masked_load_nxv2i64:
84; CHECK:       # %bb.0:
85; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
86; CHECK-NEXT:    vle64.v v8, (a0), v0.t
87; CHECK-NEXT:    ret
88  %load = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
89  ret <vscale x 2 x i64> %load
90}
91declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
92
93define <vscale x 4 x i8> @masked_load_nxv4i8(ptr %a, <vscale x 4 x i1> %mask) nounwind {
94; CHECK-LABEL: masked_load_nxv4i8:
95; CHECK:       # %bb.0:
96; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
97; CHECK-NEXT:    vle8.v v8, (a0), v0.t
98; CHECK-NEXT:    ret
99  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
100  ret <vscale x 4 x i8> %load
101}
102declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
103
104define <vscale x 4 x i16> @masked_load_nxv4i16(ptr %a, <vscale x 4 x i1> %mask) nounwind {
105; CHECK-LABEL: masked_load_nxv4i16:
106; CHECK:       # %bb.0:
107; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
108; CHECK-NEXT:    vle16.v v8, (a0), v0.t
109; CHECK-NEXT:    ret
110  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
111  ret <vscale x 4 x i16> %load
112}
113declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
114
115define <vscale x 4 x i32> @masked_load_nxv4i32(ptr %a, <vscale x 4 x i1> %mask) nounwind {
116; CHECK-LABEL: masked_load_nxv4i32:
117; CHECK:       # %bb.0:
118; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
119; CHECK-NEXT:    vle32.v v8, (a0), v0.t
120; CHECK-NEXT:    ret
121  %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
122  ret <vscale x 4 x i32> %load
123}
124declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
125
126define <vscale x 4 x i64> @masked_load_nxv4i64(ptr %a, <vscale x 4 x i1> %mask) nounwind {
127; CHECK-LABEL: masked_load_nxv4i64:
128; CHECK:       # %bb.0:
129; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
130; CHECK-NEXT:    vle64.v v8, (a0), v0.t
131; CHECK-NEXT:    ret
132  %load = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64(ptr %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x i64> undef)
133  ret <vscale x 4 x i64> %load
134}
135declare <vscale x 4 x i64> @llvm.masked.load.nxv4i64(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i64>)
136
137define <vscale x 8 x i8> @masked_load_nxv8i8(ptr %a, <vscale x 8 x i1> %mask) nounwind {
138; CHECK-LABEL: masked_load_nxv8i8:
139; CHECK:       # %bb.0:
140; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
141; CHECK-NEXT:    vle8.v v8, (a0), v0.t
142; CHECK-NEXT:    ret
143  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
144  ret <vscale x 8 x i8> %load
145}
146declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
147
148define <vscale x 8 x i16> @masked_load_nxv8i16(ptr %a, <vscale x 8 x i1> %mask) nounwind {
149; CHECK-LABEL: masked_load_nxv8i16:
150; CHECK:       # %bb.0:
151; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
152; CHECK-NEXT:    vle16.v v8, (a0), v0.t
153; CHECK-NEXT:    ret
154  %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
155  ret <vscale x 8 x i16> %load
156}
157declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
158
159define <vscale x 8 x i32> @masked_load_nxv8i32(ptr %a, <vscale x 8 x i1> %mask) nounwind {
160; CHECK-LABEL: masked_load_nxv8i32:
161; CHECK:       # %bb.0:
162; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
163; CHECK-NEXT:    vle32.v v8, (a0), v0.t
164; CHECK-NEXT:    ret
165  %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x i32> undef)
166  ret <vscale x 8 x i32> %load
167}
168declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
169
170define <vscale x 8 x i64> @masked_load_nxv8i64(ptr %a, <vscale x 8 x i1> %mask) nounwind {
171; CHECK-LABEL: masked_load_nxv8i64:
172; CHECK:       # %bb.0:
173; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
174; CHECK-NEXT:    vle64.v v8, (a0), v0.t
175; CHECK-NEXT:    ret
176  %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
177  ret <vscale x 8 x i64> %load
178}
179declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)
180
181define <vscale x 16 x i8> @masked_load_nxv16i8(ptr %a, <vscale x 16 x i1> %mask) nounwind {
182; CHECK-LABEL: masked_load_nxv16i8:
183; CHECK:       # %bb.0:
184; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
185; CHECK-NEXT:    vle8.v v8, (a0), v0.t
186; CHECK-NEXT:    ret
187  %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %a, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
188  ret <vscale x 16 x i8> %load
189}
190declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
191
192define <vscale x 16 x i16> @masked_load_nxv16i16(ptr %a, <vscale x 16 x i1> %mask) nounwind {
193; CHECK-LABEL: masked_load_nxv16i16:
194; CHECK:       # %bb.0:
195; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
196; CHECK-NEXT:    vle16.v v8, (a0), v0.t
197; CHECK-NEXT:    ret
198  %load = call <vscale x 16 x i16> @llvm.masked.load.nxv16i16(ptr %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i16> undef)
199  ret <vscale x 16 x i16> %load
200}
201declare <vscale x 16 x i16> @llvm.masked.load.nxv16i16(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i16>)
202
203define <vscale x 16 x i32> @masked_load_nxv16i32(ptr %a, <vscale x 16 x i1> %mask) nounwind {
204; CHECK-LABEL: masked_load_nxv16i32:
205; CHECK:       # %bb.0:
206; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
207; CHECK-NEXT:    vle32.v v8, (a0), v0.t
208; CHECK-NEXT:    ret
209  %load = call <vscale x 16 x i32> @llvm.masked.load.nxv16i32(ptr %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x i32> undef)
210  ret <vscale x 16 x i32> %load
211}
212declare <vscale x 16 x i32> @llvm.masked.load.nxv16i32(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i32>)
213
214define <vscale x 32 x i8> @masked_load_nxv32i8(ptr %a, <vscale x 32 x i1> %mask) nounwind {
215; CHECK-LABEL: masked_load_nxv32i8:
216; CHECK:       # %bb.0:
217; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
218; CHECK-NEXT:    vle8.v v8, (a0), v0.t
219; CHECK-NEXT:    ret
220  %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %a, i32 1, <vscale x 32 x i1> %mask, <vscale x 32 x i8> undef)
221  ret <vscale x 32 x i8> %load
222}
223declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)
224
225define <vscale x 32 x i16> @masked_load_nxv32i16(ptr %a, <vscale x 32 x i1> %mask) nounwind {
226; CHECK-LABEL: masked_load_nxv32i16:
227; CHECK:       # %bb.0:
228; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
229; CHECK-NEXT:    vle16.v v8, (a0), v0.t
230; CHECK-NEXT:    ret
231  %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x i16> undef)
232  ret <vscale x 32 x i16> %load
233}
234declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x i16>)
235
236define <vscale x 64 x i8> @masked_load_nxv64i8(ptr %a, <vscale x 64 x i1> %mask) nounwind {
237; CHECK-LABEL: masked_load_nxv64i8:
238; CHECK:       # %bb.0:
239; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
240; CHECK-NEXT:    vle8.v v8, (a0), v0.t
241; CHECK-NEXT:    ret
242  %load = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr %a, i32 1, <vscale x 64 x i1> %mask, <vscale x 64 x i8> undef)
243  ret <vscale x 64 x i8> %load
244}
245declare <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr, i32, <vscale x 64 x i1>, <vscale x 64 x i8>)
246
247define <vscale x 2 x i8> @masked_load_zero_mask(ptr %a) nounwind {
248; CHECK-LABEL: masked_load_zero_mask:
249; CHECK:       # %bb.0:
250; CHECK-NEXT:    ret
251  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> zeroinitializer, <vscale x 2 x i8> undef)
252  ret <vscale x 2 x i8> %load
253}
254
255define <vscale x 2 x i8> @masked_load_allones_mask(ptr %a, <vscale x 2 x i8> %maskedoff) nounwind {
256; CHECK-LABEL: masked_load_allones_mask:
257; CHECK:       # %bb.0:
258; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
259; CHECK-NEXT:    vle8.v v8, (a0)
260; CHECK-NEXT:    ret
261  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> splat (i1 1), <vscale x 2 x i8> %maskedoff)
262  ret <vscale x 2 x i8> %load
263}
264