xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-vector-base-imm-offset.ll (revision fadea4413ecbfffa4d28ad8298e0628165b543f1)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3
4;
5; ST1B, ST1W, ST1H, ST1D: vector base + immediate offset
6;   e.g. st1h { z0.s }, p0, [z1.s, #16]
7;
8
9; ST1B
10define void @sst1b_s_imm_offset(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
11; CHECK-LABEL: sst1b_s_imm_offset:
12; CHECK:       // %bb.0:
13; CHECK-NEXT:    st1b { z0.s }, p0, [z1.s, #16]
14; CHECK-NEXT:    ret
15  %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
16  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8> %data_trunc,
17                                                                       <vscale x 4 x i1> %pg,
18                                                                       <vscale x 4 x i32> %base,
19                                                                       i64 16)
20  ret void
21}
22
23define void @sst1b_d_imm_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
24; CHECK-LABEL: sst1b_d_imm_offset:
25; CHECK:       // %bb.0:
26; CHECK-NEXT:    st1b { z0.d }, p0, [z1.d, #16]
27; CHECK-NEXT:    ret
28  %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
29  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8> %data_trunc,
30                                                                       <vscale x 2 x i1> %pg,
31                                                                       <vscale x 2 x i64> %base,
32                                                                       i64 16)
33  ret void
34}
35
36; ST1H
37define void @sst1h_s_imm_offset(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
38; CHECK-LABEL: sst1h_s_imm_offset:
39; CHECK:       // %bb.0:
40; CHECK-NEXT:    st1h { z0.s }, p0, [z1.s, #16]
41; CHECK-NEXT:    ret
42  %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
43  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> %data_trunc,
44                                                                        <vscale x 4 x i1> %pg,
45                                                                        <vscale x 4 x i32> %base,
46                                                                        i64 16)
47  ret void
48}
49
50define void @sst1h_d_imm_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
51; CHECK-LABEL: sst1h_d_imm_offset:
52; CHECK:       // %bb.0:
53; CHECK-NEXT:    st1h { z0.d }, p0, [z1.d, #16]
54; CHECK-NEXT:    ret
55  %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
56  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> %data_trunc,
57                                                                        <vscale x 2 x i1> %pg,
58                                                                        <vscale x 2 x i64> %base,
59                                                                        i64 16)
60  ret void
61}
62
63; ST1W
64define void @sst1w_s_imm_offset(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
65; CHECK-LABEL: sst1w_s_imm_offset:
66; CHECK:       // %bb.0:
67; CHECK-NEXT:    st1w { z0.s }, p0, [z1.s, #16]
68; CHECK-NEXT:    ret
69  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32> %data,
70                                                                        <vscale x 4 x i1> %pg,
71                                                                        <vscale x 4 x i32> %base,
72                                                                        i64 16)
73  ret void
74}
75
76define void @sst1w_d_imm_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
77; CHECK-LABEL: sst1w_d_imm_offset:
78; CHECK:       // %bb.0:
79; CHECK-NEXT:    st1w { z0.d }, p0, [z1.d, #16]
80; CHECK-NEXT:    ret
81  %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
82  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32> %data_trunc,
83                                                                        <vscale x 2 x i1> %pg,
84                                                                        <vscale x 2 x i64> %base,
85                                                                        i64 16)
86  ret void
87}
88
89define void @sst1w_s_imm_offset_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
90; CHECK-LABEL: sst1w_s_imm_offset_float:
91; CHECK:       // %bb.0:
92; CHECK-NEXT:    st1w { z0.s }, p0, [z1.s, #16]
93; CHECK-NEXT:    ret
94  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float> %data,
95                                                                        <vscale x 4 x i1> %pg,
96                                                                        <vscale x 4 x i32> %base,
97                                                                        i64 16)
98  ret void
99}
100
101; ST1D
102define void @sst1d_d_imm_offset(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
103; CHECK-LABEL: sst1d_d_imm_offset:
104; CHECK:       // %bb.0:
105; CHECK-NEXT:    st1d { z0.d }, p0, [z1.d, #16]
106; CHECK-NEXT:    ret
107  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> %data,
108                                                                        <vscale x 2 x i1> %pg,
109                                                                        <vscale x 2 x i64> %base,
110                                                                        i64 16)
111  ret void
112}
113
114define void @sst1d_d_imm_offset_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
115; CHECK-LABEL: sst1d_d_imm_offset_double:
116; CHECK:       // %bb.0:
117; CHECK-NEXT:    st1d { z0.d }, p0, [z1.d, #16]
118; CHECK-NEXT:    ret
119  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> %data,
120                                                                        <vscale x 2 x i1> %pg,
121                                                                        <vscale x 2 x i64> %base,
122                                                                        i64 16)
123  ret void
124}
125
126;
127; ST1B, ST1W, ST1H, ST1D: vector base + out of range immediate offset
128;   e.g. st1h { z0.s }, p0, [z1.s, #16]
129;
130
131; ST1B
132define void @sst1b_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
133; CHECK-LABEL: sst1b_s_imm_offset_out_of_range:
134; CHECK:       // %bb.0:
135; CHECK-NEXT:    mov w8, #32
136; CHECK-NEXT:    st1b { z0.s }, p0, [x8, z1.s, uxtw]
137; CHECK-NEXT:    ret
138  %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
139  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8> %data_trunc,
140                                                                       <vscale x 4 x i1> %pg,
141                                                                       <vscale x 4 x i32> %base,
142                                                                       i64 32)
143  ret void
144}
145
146define void @sst1b_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
147; CHECK-LABEL: sst1b_d_imm_offset_out_of_range:
148; CHECK:       // %bb.0:
149; CHECK-NEXT:    mov w8, #32
150; CHECK-NEXT:    st1b { z0.d }, p0, [x8, z1.d]
151; CHECK-NEXT:    ret
152  %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
153  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8> %data_trunc,
154                                                                       <vscale x 2 x i1> %pg,
155                                                                       <vscale x 2 x i64> %base,
156                                                                       i64 32)
157  ret void
158}
159
160; ST1H
161define void @sst1h_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
162; CHECK-LABEL: sst1h_s_imm_offset_out_of_range:
163; CHECK:       // %bb.0:
164; CHECK-NEXT:    mov w8, #63
165; CHECK-NEXT:    st1h { z0.s }, p0, [x8, z1.s, uxtw]
166; CHECK-NEXT:    ret
167  %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
168  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> %data_trunc,
169                                                                        <vscale x 4 x i1> %pg,
170                                                                        <vscale x 4 x i32> %base,
171                                                                        i64 63)
172  ret void
173}
174
175define void @sst1h_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
176; CHECK-LABEL: sst1h_d_imm_offset_out_of_range:
177; CHECK:       // %bb.0:
178; CHECK-NEXT:    mov w8, #63
179; CHECK-NEXT:    st1h { z0.d }, p0, [x8, z1.d]
180; CHECK-NEXT:    ret
181  %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
182  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> %data_trunc,
183                                                                        <vscale x 2 x i1> %pg,
184                                                                        <vscale x 2 x i64> %base,
185                                                                        i64 63)
186  ret void
187}
188
189; ST1W
190define void @sst1w_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
191; CHECK-LABEL: sst1w_s_imm_offset_out_of_range:
192; CHECK:       // %bb.0:
193; CHECK-NEXT:    mov w8, #125
194; CHECK-NEXT:    st1w { z0.s }, p0, [x8, z1.s, uxtw]
195; CHECK-NEXT:    ret
196  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32> %data,
197                                                                        <vscale x 4 x i1> %pg,
198                                                                        <vscale x 4 x i32> %base,
199                                                                        i64 125)
200  ret void
201}
202
203define void @sst1w_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
204; CHECK-LABEL: sst1w_d_imm_offset_out_of_range:
205; CHECK:       // %bb.0:
206; CHECK-NEXT:    mov w8, #125
207; CHECK-NEXT:    st1w { z0.d }, p0, [x8, z1.d]
208; CHECK-NEXT:    ret
209  %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
210  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32> %data_trunc,
211                                                                        <vscale x 2 x i1> %pg,
212                                                                        <vscale x 2 x i64> %base,
213                                                                        i64 125)
214  ret void
215}
216
217define void @sst1w_s_imm_offset_float_out_of_range(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
218; CHECK-LABEL: sst1w_s_imm_offset_float_out_of_range:
219; CHECK:       // %bb.0:
220; CHECK-NEXT:    mov w8, #125
221; CHECK-NEXT:    st1w { z0.s }, p0, [x8, z1.s, uxtw]
222; CHECK-NEXT:    ret
223  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float> %data,
224                                                                        <vscale x 4 x i1> %pg,
225                                                                        <vscale x 4 x i32> %base,
226                                                                        i64 125)
227  ret void
228}
229
230; ST1D
231define void @sst1d_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
232; CHECK-LABEL: sst1d_d_imm_offset_out_of_range:
233; CHECK:       // %bb.0:
234; CHECK-NEXT:    mov w8, #249
235; CHECK-NEXT:    st1d { z0.d }, p0, [x8, z1.d]
236; CHECK-NEXT:    ret
237  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> %data,
238                                                                        <vscale x 2 x i1> %pg,
239                                                                        <vscale x 2 x i64> %base,
240                                                                        i64 249)
241  ret void
242}
243
244define void @sst1d_d_imm_offset_double_out_of_range(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
245; CHECK-LABEL: sst1d_d_imm_offset_double_out_of_range:
246; CHECK:       // %bb.0:
247; CHECK-NEXT:    mov w8, #249
248; CHECK-NEXT:    st1d { z0.d }, p0, [x8, z1.d]
249; CHECK-NEXT:    ret
250  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> %data,
251                                                                        <vscale x 2 x i1> %pg,
252                                                                        <vscale x 2 x i64> %base,
253                                                                        i64 249)
254  ret void
255}
256
257; ST1B
258declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
259declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
260
261; ST1H
262declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
263declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
264
265; ST1W
266declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
267declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
268
269declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
270
271; ST1D
272declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
273
274declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
275