xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-merging.ll (revision fadea4413ecbfffa4d28ad8298e0628165b543f1)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -mattr=+use-experimental-zeroing-pseudos < %s | FileCheck %s
3
4;
5; ASR
6;
7
8define <vscale x 16 x i8> @asr_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
9; CHECK-LABEL: asr_i8_zero:
10; CHECK:       // %bb.0:
11; CHECK-NEXT:    movprfx z0.b, p0/z, z0.b
12; CHECK-NEXT:    asr z0.b, p0/m, z0.b, z1.b
13; CHECK-NEXT:    ret
14  %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
15  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg,
16                                                          <vscale x 16 x i8> %a_z,
17                                                          <vscale x 16 x i8> %b)
18  ret <vscale x 16 x i8> %out
19}
20
21define <vscale x 8 x i16> @asr_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
22; CHECK-LABEL: asr_i16_zero:
23; CHECK:       // %bb.0:
24; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
25; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z1.h
26; CHECK-NEXT:    ret
27  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
28  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg,
29                                                          <vscale x 8 x i16> %a_z,
30                                                          <vscale x 8 x i16> %b)
31  ret <vscale x 8 x i16> %out
32}
33
34define <vscale x 4 x i32> @asr_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
35; CHECK-LABEL: asr_i32_zero:
36; CHECK:       // %bb.0:
37; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
38; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z1.s
39; CHECK-NEXT:    ret
40  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
41  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg,
42                                                          <vscale x 4 x i32> %a_z,
43                                                          <vscale x 4 x i32> %b)
44  ret <vscale x 4 x i32> %out
45}
46
47define <vscale x 2 x i64> @asr_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
48; CHECK-LABEL: asr_i64_zero:
49; CHECK:       // %bb.0:
50; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
51; CHECK-NEXT:    asr z0.d, p0/m, z0.d, z1.d
52; CHECK-NEXT:    ret
53  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
54  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg,
55                                                          <vscale x 2 x i64> %a_z,
56                                                          <vscale x 2 x i64> %b)
57  ret <vscale x 2 x i64> %out
58}
59
60define <vscale x 16 x i8> @asr_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
61; CHECK-LABEL: asr_wide_i8_zero:
62; CHECK:       // %bb.0:
63; CHECK-NEXT:    mov z2.b, #0 // =0x0
64; CHECK-NEXT:    sel z0.b, p0, z0.b, z2.b
65; CHECK-NEXT:    asr z0.b, p0/m, z0.b, z1.d
66; CHECK-NEXT:    ret
67  %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
68  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg,
69                                                               <vscale x 16 x i8> %a_z,
70                                                               <vscale x 2 x i64> %b)
71  ret <vscale x 16 x i8> %out
72}
73
74define <vscale x 8 x i16> @asr_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
75; CHECK-LABEL: asr_wide_i16_zero:
76; CHECK:       // %bb.0:
77; CHECK-NEXT:    mov z2.h, #0 // =0x0
78; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
79; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z1.d
80; CHECK-NEXT:    ret
81  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
82  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %pg,
83                                                               <vscale x 8 x i16> %a_z,
84                                                               <vscale x 2 x i64> %b)
85  ret <vscale x 8 x i16> %out
86}
87
88define <vscale x 4 x i32> @asr_wide_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
89; CHECK-LABEL: asr_wide_i32_zero:
90; CHECK:       // %bb.0:
91; CHECK-NEXT:    mov z2.s, #0 // =0x0
92; CHECK-NEXT:    sel z0.s, p0, z0.s, z2.s
93; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z1.d
94; CHECK-NEXT:    ret
95  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
96  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %pg,
97                                                               <vscale x 4 x i32> %a_z,
98                                                               <vscale x 2 x i64> %b)
99  ret <vscale x 4 x i32> %out
100}
101
102;
103; ASRD
104;
105
106define <vscale x 16 x i8> @asrd_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
107; CHECK-LABEL: asrd_i8_zero:
108; CHECK:       // %bb.0:
109; CHECK-NEXT:    movprfx z0.b, p0/z, z0.b
110; CHECK-NEXT:    asrd z0.b, p0/m, z0.b, #1
111; CHECK-NEXT:    ret
112  %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
113  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %pg,
114                                                           <vscale x 16 x i8> %a_z,
115                                                           i32 1)
116  ret <vscale x 16 x i8> %out
117}
118
119define <vscale x 8 x i16> @asrd_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
120; CHECK-LABEL: asrd_i16_zero:
121; CHECK:       // %bb.0:
122; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
123; CHECK-NEXT:    asrd z0.h, p0/m, z0.h, #2
124; CHECK-NEXT:    ret
125  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
126  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1> %pg,
127                                                           <vscale x 8 x i16> %a_z,
128                                                           i32 2)
129  ret <vscale x 8 x i16> %out
130}
131
132define <vscale x 4 x i32> @asrd_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
133; CHECK-LABEL: asrd_i32_zero:
134; CHECK:       // %bb.0:
135; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
136; CHECK-NEXT:    asrd z0.s, p0/m, z0.s, #31
137; CHECK-NEXT:    ret
138  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
139  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asrd.nxv4i32(<vscale x 4 x i1> %pg,
140                                                           <vscale x 4 x i32> %a_z,
141                                                           i32 31)
142  ret <vscale x 4 x i32> %out
143}
144
145define <vscale x 2 x i64> @asrd_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
146; CHECK-LABEL: asrd_i64_zero:
147; CHECK:       // %bb.0:
148; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
149; CHECK-NEXT:    asrd z0.d, p0/m, z0.d, #64
150; CHECK-NEXT:    ret
151  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
152  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asrd.nxv2i64(<vscale x 2 x i1> %pg,
153                                                           <vscale x 2 x i64> %a_z,
154                                                           i32 64)
155  ret <vscale x 2 x i64> %out
156}
157
158;
159; LSL
160;
161
162define <vscale x 16 x i8> @lsl_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
163; CHECK-LABEL: lsl_i8_zero:
164; CHECK:       // %bb.0:
165; CHECK-NEXT:    movprfx z0.b, p0/z, z0.b
166; CHECK-NEXT:    lsl z0.b, p0/m, z0.b, z1.b
167; CHECK-NEXT:    ret
168  %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
169  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg,
170                                                          <vscale x 16 x i8> %a_z,
171                                                          <vscale x 16 x i8> %b)
172  ret <vscale x 16 x i8> %out
173}
174
175define <vscale x 8 x i16> @lsl_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
176; CHECK-LABEL: lsl_i16_zero:
177; CHECK:       // %bb.0:
178; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
179; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.h
180; CHECK-NEXT:    ret
181  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
182  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg,
183                                                          <vscale x 8 x i16> %a_z,
184                                                          <vscale x 8 x i16> %b)
185  ret <vscale x 8 x i16> %out
186}
187
188define <vscale x 4 x i32> @lsl_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
189; CHECK-LABEL: lsl_i32_zero:
190; CHECK:       // %bb.0:
191; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
192; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.s
193; CHECK-NEXT:    ret
194  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
195  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg,
196                                                          <vscale x 4 x i32> %a_z,
197                                                          <vscale x 4 x i32> %b)
198  ret <vscale x 4 x i32> %out
199}
200
201define <vscale x 2 x i64> @lsl_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
202; CHECK-LABEL: lsl_i64_zero:
203; CHECK:       // %bb.0:
204; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
205; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z1.d
206; CHECK-NEXT:    ret
207  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
208  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg,
209                                                          <vscale x 2 x i64> %a_z,
210                                                          <vscale x 2 x i64> %b)
211  ret <vscale x 2 x i64> %out
212}
213
214define <vscale x 16 x i8> @lsl_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
215; CHECK-LABEL: lsl_wide_i8_zero:
216; CHECK:       // %bb.0:
217; CHECK-NEXT:    mov z2.b, #0 // =0x0
218; CHECK-NEXT:    sel z0.b, p0, z0.b, z2.b
219; CHECK-NEXT:    lsl z0.b, p0/m, z0.b, z1.d
220; CHECK-NEXT:    ret
221  %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
222  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg,
223                                                               <vscale x 16 x i8> %a_z,
224                                                               <vscale x 2 x i64> %b)
225  ret <vscale x 16 x i8> %out
226}
227
228define <vscale x 8 x i16> @lsl_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
229; CHECK-LABEL: lsl_wide_i16_zero:
230; CHECK:       // %bb.0:
231; CHECK-NEXT:    mov z2.h, #0 // =0x0
232; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
233; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.d
234; CHECK-NEXT:    ret
235  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
236  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %pg,
237                                                               <vscale x 8 x i16> %a_z,
238                                                               <vscale x 2 x i64> %b)
239  ret <vscale x 8 x i16> %out
240}
241
242define <vscale x 4 x i32> @lsl_wide_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
243; CHECK-LABEL: lsl_wide_i32_zero:
244; CHECK:       // %bb.0:
245; CHECK-NEXT:    mov z2.s, #0 // =0x0
246; CHECK-NEXT:    sel z0.s, p0, z0.s, z2.s
247; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.d
248; CHECK-NEXT:    ret
249  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
250  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %pg,
251                                                               <vscale x 4 x i32> %a_z,
252                                                               <vscale x 2 x i64> %b)
253  ret <vscale x 4 x i32> %out
254}
255
256;
257; LSR
258;
259
260define <vscale x 16 x i8> @lsr_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
261; CHECK-LABEL: lsr_i8_zero:
262; CHECK:       // %bb.0:
263; CHECK-NEXT:    movprfx z0.b, p0/z, z0.b
264; CHECK-NEXT:    lsr z0.b, p0/m, z0.b, z1.b
265; CHECK-NEXT:    ret
266  %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
267  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg,
268                                                          <vscale x 16 x i8> %a_z,
269                                                          <vscale x 16 x i8> %b)
270  ret <vscale x 16 x i8> %out
271}
272
273define <vscale x 8 x i16> @lsr_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
274; CHECK-LABEL: lsr_i16_zero:
275; CHECK:       // %bb.0:
276; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
277; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z1.h
278; CHECK-NEXT:    ret
279  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
280  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg,
281                                                          <vscale x 8 x i16> %a_z,
282                                                          <vscale x 8 x i16> %b)
283  ret <vscale x 8 x i16> %out
284}
285
286define <vscale x 4 x i32> @lsr_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
287; CHECK-LABEL: lsr_i32_zero:
288; CHECK:       // %bb.0:
289; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
290; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, z1.s
291; CHECK-NEXT:    ret
292  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
293  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg,
294                                                          <vscale x 4 x i32> %a_z,
295                                                          <vscale x 4 x i32> %b)
296  ret <vscale x 4 x i32> %out
297}
298
299define <vscale x 2 x i64> @lsr_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
300; CHECK-LABEL: lsr_i64_zero:
301; CHECK:       // %bb.0:
302; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
303; CHECK-NEXT:    lsr z0.d, p0/m, z0.d, z1.d
304; CHECK-NEXT:    ret
305  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
306  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg,
307                                                          <vscale x 2 x i64> %a_z,
308                                                          <vscale x 2 x i64> %b)
309  ret <vscale x 2 x i64> %out
310}
311
312define <vscale x 16 x i8> @lsr_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
313; CHECK-LABEL: lsr_wide_i8_zero:
314; CHECK:       // %bb.0:
315; CHECK-NEXT:    mov z2.b, #0 // =0x0
316; CHECK-NEXT:    sel z0.b, p0, z0.b, z2.b
317; CHECK-NEXT:    lsr z0.b, p0/m, z0.b, z1.d
318; CHECK-NEXT:    ret
319  %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
320  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> %pg,
321                                                               <vscale x 16 x i8> %a_z,
322                                                               <vscale x 2 x i64> %b)
323  ret <vscale x 16 x i8> %out
324}
325
326define <vscale x 8 x i16> @lsr_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
327; CHECK-LABEL: lsr_wide_i16_zero:
328; CHECK:       // %bb.0:
329; CHECK-NEXT:    mov z2.h, #0 // =0x0
330; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
331; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z1.d
332; CHECK-NEXT:    ret
333  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
334  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> %pg,
335                                                               <vscale x 8 x i16> %a_z,
336                                                               <vscale x 2 x i64> %b)
337  ret <vscale x 8 x i16> %out
338}
339
340define <vscale x 4 x i32> @lsr_wide_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
341; CHECK-LABEL: lsr_wide_i32_zero:
342; CHECK:       // %bb.0:
343; CHECK-NEXT:    mov z2.s, #0 // =0x0
344; CHECK-NEXT:    sel z0.s, p0, z0.s, z2.s
345; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, z1.d
346; CHECK-NEXT:    ret
347  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
348  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> %pg,
349                                                               <vscale x 4 x i32> %a_z,
350                                                               <vscale x 2 x i64> %b)
351  ret <vscale x 4 x i32> %out
352}
353
354declare <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
355declare <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
356declare <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
357declare <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
358
359declare <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
360declare <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
361declare <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
362
363declare <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, i32)
364declare <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, i32)
365declare <vscale x 4 x i32> @llvm.aarch64.sve.asrd.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i32)
366declare <vscale x 2 x i64> @llvm.aarch64.sve.asrd.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i32)
367
368declare <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
369declare <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
370declare <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
371declare <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
372
373declare <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
374declare <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
375declare <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
376
377declare <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
378declare <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
379declare <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
380declare <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
381
382declare <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
383declare <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
384declare <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
385