xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll (revision cc82f1290a1e2157a6c0530d78d8cc84d2b8553d)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
3
4;
5; Masked Loads
6;
7
8define <vscale x 2 x i64> @masked_zload_nxv2i8(ptr %src, <vscale x 2 x i1> %mask) {
9; CHECK-LABEL: masked_zload_nxv2i8:
10; CHECK:       // %bb.0:
11; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
12; CHECK-NEXT:    ret
13  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
14  %ext = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
15  ret <vscale x 2 x i64> %ext
16}
17
18define <vscale x 2 x i64> @masked_zload_nxv2i16(ptr %src, <vscale x 2 x i1> %mask) {
19; CHECK-LABEL: masked_zload_nxv2i16:
20; CHECK:       // %bb.0:
21; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
22; CHECK-NEXT:    ret
23  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
24  %ext = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
25  ret <vscale x 2 x i64> %ext
26}
27
28define <vscale x 2 x i64> @masked_zload_nxv2i32(ptr %src, <vscale x 2 x i1> %mask) {
29; CHECK-LABEL: masked_zload_nxv2i32:
30; CHECK:       // %bb.0:
31; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
32; CHECK-NEXT:    ret
33  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
34  %ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
35  ret <vscale x 2 x i64> %ext
36}
37
38define <vscale x 4 x i32> @masked_zload_nxv4i8(ptr %src, <vscale x 4 x i1> %mask) {
39; CHECK-LABEL: masked_zload_nxv4i8:
40; CHECK:       // %bb.0:
41; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0]
42; CHECK-NEXT:    ret
43  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %src, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
44  %ext = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
45  ret <vscale x 4 x i32> %ext
46}
47
48define <vscale x 4 x i32> @masked_zload_nxv4i16(ptr %src, <vscale x 4 x i1> %mask) {
49; CHECK-LABEL: masked_zload_nxv4i16:
50; CHECK:       // %bb.0:
51; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
52; CHECK-NEXT:    ret
53  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %src, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
54  %ext = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
55  ret <vscale x 4 x i32> %ext
56}
57
58define <vscale x 8 x i16> @masked_zload_nxv8i8(ptr %src, <vscale x 8 x i1> %mask) {
59; CHECK-LABEL: masked_zload_nxv8i8:
60; CHECK:       // %bb.0:
61; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
62; CHECK-NEXT:    ret
63  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %src, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
64  %ext = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
65  ret <vscale x 8 x i16> %ext
66}
67
68define <vscale x 2 x i64> @masked_zload_passthru(ptr %src, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru) {
69; CHECK-LABEL: masked_zload_passthru:
70; CHECK:       // %bb.0:
71; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0]
72; CHECK-NEXT:    and z0.d, z0.d, #0xffffffff
73; CHECK-NEXT:    mov z0.d, p0/m, z1.d
74; CHECK-NEXT:    ret
75  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru)
76  %ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
77  ret <vscale x 2 x i64> %ext
78}
79
80; Return type requires splitting
81define <vscale x 8 x i64> @masked_zload_nxv8i16(ptr %a, <vscale x 8 x i1> %mask) {
82; CHECK-LABEL: masked_zload_nxv8i16:
83; CHECK:       // %bb.0:
84; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
85; CHECK-NEXT:    uunpklo z1.s, z0.h
86; CHECK-NEXT:    uunpkhi z3.s, z0.h
87; CHECK-NEXT:    uunpklo z0.d, z1.s
88; CHECK-NEXT:    uunpkhi z1.d, z1.s
89; CHECK-NEXT:    uunpklo z2.d, z3.s
90; CHECK-NEXT:    uunpkhi z3.d, z3.s
91; CHECK-NEXT:    ret
92  %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
93  %ext = zext <vscale x 8 x i16> %load to <vscale x 8 x i64>
94  ret <vscale x 8 x i64> %ext
95}
96
97; Masked load requires promotion
98define <vscale x 2 x double> @masked_zload_2i16_2f64(ptr noalias %in, <vscale x 2 x i1> %mask) {
99; CHECK-LABEL: masked_zload_2i16_2f64:
100; CHECK:       // %bb.0:
101; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
102; CHECK-NEXT:    ptrue p0.d
103; CHECK-NEXT:    ucvtf z0.d, p0/m, z0.d
104; CHECK-NEXT:    ret
105  %wide.load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %in, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
106  %zext = zext <vscale x 2 x i16> %wide.load to <vscale x 2 x i32>
107  %res = uitofp <vscale x 2 x i32> %zext to <vscale x 2 x double>
108  ret <vscale x 2 x double> %res
109}
110
111; Extending loads from unpacked to wide illegal types
112
113define <vscale x 4 x i64> @masked_zload_4i8_4i64(ptr %a, <vscale x 4 x i1> %b) {
114; CHECK-LABEL: masked_zload_4i8_4i64:
115; CHECK:       // %bb.0:
116; CHECK-NEXT:    ld1b { z1.s }, p0/z, [x0]
117; CHECK-NEXT:    uunpklo z0.d, z1.s
118; CHECK-NEXT:    uunpkhi z1.d, z1.s
119; CHECK-NEXT:    ret
120  %aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 16, <vscale x 4 x i1> %b, <vscale x 4 x i8> zeroinitializer)
121  %aext = zext <vscale x 4 x i8> %aval to <vscale x 4 x i64>
122  ret <vscale x 4 x i64> %aext
123}
124
125define <vscale x 4 x i64> @masked_zload_4i16_4i64(ptr %a, <vscale x 4 x i1> %b) {
126; CHECK-LABEL: masked_zload_4i16_4i64:
127; CHECK:       // %bb.0:
128; CHECK-NEXT:    ld1h { z1.s }, p0/z, [x0]
129; CHECK-NEXT:    uunpklo z0.d, z1.s
130; CHECK-NEXT:    uunpkhi z1.d, z1.s
131; CHECK-NEXT:    ret
132  %aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 16, <vscale x 4 x i1> %b, <vscale x 4 x i16> zeroinitializer)
133  %aext = zext <vscale x 4 x i16> %aval to <vscale x 4 x i64>
134  ret <vscale x 4 x i64> %aext
135}
136
137define <vscale x 8 x i32> @masked_zload_8i8_8i32(ptr %a, <vscale x 8 x i1> %b) {
138; CHECK-LABEL: masked_zload_8i8_8i32:
139; CHECK:       // %bb.0:
140; CHECK-NEXT:    ld1b { z1.h }, p0/z, [x0]
141; CHECK-NEXT:    uunpklo z0.s, z1.h
142; CHECK-NEXT:    uunpkhi z1.s, z1.h
143; CHECK-NEXT:    ret
144  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 16, <vscale x 8 x i1> %b, <vscale x 8 x i8> zeroinitializer)
145  %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i32>
146  ret <vscale x 8 x i32> %aext
147}
148
149define <vscale x 8 x i64> @masked_zload_8i8_8i64(ptr %a, <vscale x 8 x i1> %b) {
150; CHECK-LABEL: masked_zload_8i8_8i64:
151; CHECK:       // %bb.0:
152; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
153; CHECK-NEXT:    uunpklo z1.s, z0.h
154; CHECK-NEXT:    uunpkhi z3.s, z0.h
155; CHECK-NEXT:    uunpklo z0.d, z1.s
156; CHECK-NEXT:    uunpkhi z1.d, z1.s
157; CHECK-NEXT:    uunpklo z2.d, z3.s
158; CHECK-NEXT:    uunpkhi z3.d, z3.s
159; CHECK-NEXT:    ret
160  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 16, <vscale x 8 x i1> %b, <vscale x 8 x i8> zeroinitializer)
161  %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i64>
162  ret <vscale x 8 x i64> %aext
163}
164
165define <vscale x 4 x i64> @masked_zload_x2_4i8_4i64(ptr %a, ptr %b, <vscale x 4 x i1> %c) {
166; CHECK-LABEL: masked_zload_x2_4i8_4i64:
167; CHECK:       // %bb.0:
168; CHECK-NEXT:    punpkhi p1.h, p0.b
169; CHECK-NEXT:    punpklo p0.h, p0.b
170; CHECK-NEXT:    ld1b { z1.d }, p1/z, [x0, #1, mul vl]
171; CHECK-NEXT:    ld1b { z2.d }, p1/z, [x1, #1, mul vl]
172; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
173; CHECK-NEXT:    ld1b { z3.d }, p0/z, [x1]
174; CHECK-NEXT:    add z1.d, z1.d, z2.d
175; CHECK-NEXT:    add z0.d, z0.d, z3.d
176; CHECK-NEXT:    ret
177  %aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
178  %bval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
179  %aext = zext <vscale x 4 x i8> %aval to <vscale x 4 x i64>
180  %bext = zext <vscale x 4 x i8> %bval to <vscale x 4 x i64>
181  %res = add <vscale x 4 x i64> %aext, %bext
182  ret <vscale x 4 x i64> %res
183}
184
185define <vscale x 4 x i64> @masked_zload_x2_4i16_4i64(ptr %a, ptr %b, <vscale x 4 x i1> %c) {
186; CHECK-LABEL: masked_zload_x2_4i16_4i64:
187; CHECK:       // %bb.0:
188; CHECK-NEXT:    punpkhi p1.h, p0.b
189; CHECK-NEXT:    punpklo p0.h, p0.b
190; CHECK-NEXT:    ld1h { z1.d }, p1/z, [x0, #1, mul vl]
191; CHECK-NEXT:    ld1h { z2.d }, p1/z, [x1, #1, mul vl]
192; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
193; CHECK-NEXT:    ld1h { z3.d }, p0/z, [x1]
194; CHECK-NEXT:    add z1.d, z1.d, z2.d
195; CHECK-NEXT:    add z0.d, z0.d, z3.d
196; CHECK-NEXT:    ret
197  %aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
198  %bval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
199  %aext = zext <vscale x 4 x i16> %aval to <vscale x 4 x i64>
200  %bext = zext <vscale x 4 x i16> %bval to <vscale x 4 x i64>
201  %res = add <vscale x 4 x i64> %aext, %bext
202  ret <vscale x 4 x i64> %res
203}
204
205define <vscale x 8 x i32> @masked_zload_x2_8i8_8i32(ptr %a, ptr %b, <vscale x 8 x i1> %c) {
206; CHECK-LABEL: masked_zload_x2_8i8_8i32:
207; CHECK:       // %bb.0:
208; CHECK-NEXT:    punpkhi p1.h, p0.b
209; CHECK-NEXT:    punpklo p0.h, p0.b
210; CHECK-NEXT:    ld1b { z1.s }, p1/z, [x0, #1, mul vl]
211; CHECK-NEXT:    ld1b { z2.s }, p1/z, [x1, #1, mul vl]
212; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0]
213; CHECK-NEXT:    ld1b { z3.s }, p0/z, [x1]
214; CHECK-NEXT:    add z1.s, z1.s, z2.s
215; CHECK-NEXT:    add z0.s, z0.s, z3.s
216; CHECK-NEXT:    ret
217  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
218  %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
219  %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i32>
220  %bext = zext <vscale x 8 x i8> %bval to <vscale x 8 x i32>
221  %res = add <vscale x 8 x i32> %aext, %bext
222  ret <vscale x 8 x i32> %res
223}
224
225define <vscale x 8 x i64> @masked_zload_x2_8i8_8i64(ptr %a, ptr %b, <vscale x 8 x i1> %c) {
226; CHECK-LABEL: masked_zload_x2_8i8_8i64:
227; CHECK:       // %bb.0:
228; CHECK-NEXT:    punpkhi p1.h, p0.b
229; CHECK-NEXT:    punpklo p0.h, p0.b
230; CHECK-NEXT:    punpkhi p2.h, p1.b
231; CHECK-NEXT:    punpklo p1.h, p1.b
232; CHECK-NEXT:    punpkhi p3.h, p0.b
233; CHECK-NEXT:    ld1b { z3.d }, p2/z, [x0, #3, mul vl]
234; CHECK-NEXT:    ld1b { z5.d }, p2/z, [x1, #3, mul vl]
235; CHECK-NEXT:    punpklo p0.h, p0.b
236; CHECK-NEXT:    ld1b { z2.d }, p1/z, [x0, #2, mul vl]
237; CHECK-NEXT:    ld1b { z6.d }, p1/z, [x1, #2, mul vl]
238; CHECK-NEXT:    ld1b { z1.d }, p3/z, [x0, #1, mul vl]
239; CHECK-NEXT:    ld1b { z7.d }, p3/z, [x1, #1, mul vl]
240; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
241; CHECK-NEXT:    ld1b { z4.d }, p0/z, [x1]
242; CHECK-NEXT:    add z3.d, z3.d, z5.d
243; CHECK-NEXT:    add z2.d, z2.d, z6.d
244; CHECK-NEXT:    add z1.d, z1.d, z7.d
245; CHECK-NEXT:    add z0.d, z0.d, z4.d
246; CHECK-NEXT:    ret
247  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
248  %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
249  %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i64>
250  %bext = zext <vscale x 8 x i8> %bval to <vscale x 8 x i64>
251  %res = add <vscale x 8 x i64> %aext, %bext
252  ret <vscale x 8 x i64> %res
253}
254
255
256declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
257declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
258declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
259declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
260declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
261declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
262declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
263