xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-int-reduce-pred.ll (revision 672f673004663aeb15ece1af4b5b219994924167)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3
4define i64 @saddv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
5; CHECK-LABEL: saddv_i8:
6; CHECK:       // %bb.0:
7; CHECK-NEXT:    saddv d0, p0, z0.b
8; CHECK-NEXT:    fmov x0, d0
9; CHECK-NEXT:    ret
10  %out = call i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1> %pg,
11                                                  <vscale x 16 x i8> %a)
12  ret i64 %out
13}
14
15define i64 @saddv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
16; CHECK-LABEL: saddv_i16:
17; CHECK:       // %bb.0:
18; CHECK-NEXT:    saddv d0, p0, z0.h
19; CHECK-NEXT:    fmov x0, d0
20; CHECK-NEXT:    ret
21  %out = call i64 @llvm.aarch64.sve.saddv.nxv8i16(<vscale x 8 x i1> %pg,
22                                                  <vscale x 8 x i16> %a)
23  ret i64 %out
24}
25
26
27define i64 @saddv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
28; CHECK-LABEL: saddv_i32:
29; CHECK:       // %bb.0:
30; CHECK-NEXT:    saddv d0, p0, z0.s
31; CHECK-NEXT:    fmov x0, d0
32; CHECK-NEXT:    ret
33  %out = call i64 @llvm.aarch64.sve.saddv.nxv4i32(<vscale x 4 x i1> %pg,
34                                                  <vscale x 4 x i32> %a)
35  ret i64 %out
36}
37
38define i64 @saddv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
39; CHECK-LABEL: saddv_i64:
40; CHECK:       // %bb.0:
41; CHECK-NEXT:    uaddv d0, p0, z0.d
42; CHECK-NEXT:    fmov x0, d0
43; CHECK-NEXT:    ret
44  %out = call i64 @llvm.aarch64.sve.saddv.nxv2i64(<vscale x 2 x i1> %pg,
45                                                  <vscale x 2 x i64> %a)
46  ret i64 %out
47}
48
49define i64 @uaddv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
50; CHECK-LABEL: uaddv_i8:
51; CHECK:       // %bb.0:
52; CHECK-NEXT:    uaddv d0, p0, z0.b
53; CHECK-NEXT:    fmov x0, d0
54; CHECK-NEXT:    ret
55  %out = call i64 @llvm.aarch64.sve.uaddv.nxv16i8(<vscale x 16 x i1> %pg,
56                                                  <vscale x 16 x i8> %a)
57  ret i64 %out
58}
59
60define i64 @uaddv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
61; CHECK-LABEL: uaddv_i16:
62; CHECK:       // %bb.0:
63; CHECK-NEXT:    uaddv d0, p0, z0.h
64; CHECK-NEXT:    fmov x0, d0
65; CHECK-NEXT:    ret
66  %out = call i64 @llvm.aarch64.sve.uaddv.nxv8i16(<vscale x 8 x i1> %pg,
67                                                  <vscale x 8 x i16> %a)
68  ret i64 %out
69}
70
71
72define i64 @uaddv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
73; CHECK-LABEL: uaddv_i32:
74; CHECK:       // %bb.0:
75; CHECK-NEXT:    uaddv d0, p0, z0.s
76; CHECK-NEXT:    fmov x0, d0
77; CHECK-NEXT:    ret
78  %out = call i64 @llvm.aarch64.sve.uaddv.nxv4i32(<vscale x 4 x i1> %pg,
79                                                  <vscale x 4 x i32> %a)
80  ret i64 %out
81}
82
83define i64 @uaddv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
84; CHECK-LABEL: uaddv_i64:
85; CHECK:       // %bb.0:
86; CHECK-NEXT:    uaddv d0, p0, z0.d
87; CHECK-NEXT:    fmov x0, d0
88; CHECK-NEXT:    ret
89  %out = call i64 @llvm.aarch64.sve.uaddv.nxv2i64(<vscale x 2 x i1> %pg,
90                                                  <vscale x 2 x i64> %a)
91  ret i64 %out
92}
93
94define i8 @smaxv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
95; CHECK-LABEL: smaxv_i8:
96; CHECK:       // %bb.0:
97; CHECK-NEXT:    smaxv b0, p0, z0.b
98; CHECK-NEXT:    fmov w0, s0
99; CHECK-NEXT:    ret
100  %out = call i8 @llvm.aarch64.sve.smaxv.nxv16i8(<vscale x 16 x i1> %pg,
101                                                 <vscale x 16 x i8> %a)
102  ret i8 %out
103}
104
105define i16 @smaxv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
106; CHECK-LABEL: smaxv_i16:
107; CHECK:       // %bb.0:
108; CHECK-NEXT:    smaxv h0, p0, z0.h
109; CHECK-NEXT:    fmov w0, s0
110; CHECK-NEXT:    ret
111  %out = call i16 @llvm.aarch64.sve.smaxv.nxv8i16(<vscale x 8 x i1> %pg,
112                                                  <vscale x 8 x i16> %a)
113  ret i16 %out
114}
115
116define i32 @smaxv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
117; CHECK-LABEL: smaxv_i32:
118; CHECK:       // %bb.0:
119; CHECK-NEXT:    smaxv s0, p0, z0.s
120; CHECK-NEXT:    fmov w0, s0
121; CHECK-NEXT:    ret
122  %out = call i32 @llvm.aarch64.sve.smaxv.nxv4i32(<vscale x 4 x i1> %pg,
123                                                  <vscale x 4 x i32> %a)
124  ret i32 %out
125}
126
127define i64 @smaxv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
128; CHECK-LABEL: smaxv_i64:
129; CHECK:       // %bb.0:
130; CHECK-NEXT:    smaxv d0, p0, z0.d
131; CHECK-NEXT:    fmov x0, d0
132; CHECK-NEXT:    ret
133  %out = call i64 @llvm.aarch64.sve.smaxv.nxv2i64(<vscale x 2 x i1> %pg,
134                                                  <vscale x 2 x i64> %a)
135  ret i64 %out
136}
137
138define i8 @umaxv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
139; CHECK-LABEL: umaxv_i8:
140; CHECK:       // %bb.0:
141; CHECK-NEXT:    umaxv b0, p0, z0.b
142; CHECK-NEXT:    fmov w0, s0
143; CHECK-NEXT:    ret
144  %out = call i8 @llvm.aarch64.sve.umaxv.nxv16i8(<vscale x 16 x i1> %pg,
145                                                 <vscale x 16 x i8> %a)
146  ret i8 %out
147}
148
149define i16 @umaxv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
150; CHECK-LABEL: umaxv_i16:
151; CHECK:       // %bb.0:
152; CHECK-NEXT:    umaxv h0, p0, z0.h
153; CHECK-NEXT:    fmov w0, s0
154; CHECK-NEXT:    ret
155  %out = call i16 @llvm.aarch64.sve.umaxv.nxv8i16(<vscale x 8 x i1> %pg,
156                                                  <vscale x 8 x i16> %a)
157  ret i16 %out
158}
159
160define i32 @umaxv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
161; CHECK-LABEL: umaxv_i32:
162; CHECK:       // %bb.0:
163; CHECK-NEXT:    umaxv s0, p0, z0.s
164; CHECK-NEXT:    fmov w0, s0
165; CHECK-NEXT:    ret
166  %out = call i32 @llvm.aarch64.sve.umaxv.nxv4i32(<vscale x 4 x i1> %pg,
167                                                  <vscale x 4 x i32> %a)
168  ret i32 %out
169}
170
171define i64 @umaxv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
172; CHECK-LABEL: umaxv_i64:
173; CHECK:       // %bb.0:
174; CHECK-NEXT:    umaxv d0, p0, z0.d
175; CHECK-NEXT:    fmov x0, d0
176; CHECK-NEXT:    ret
177  %out = call i64 @llvm.aarch64.sve.umaxv.nxv2i64(<vscale x 2 x i1> %pg,
178                                                  <vscale x 2 x i64> %a)
179  ret i64 %out
180}
181
182define i8 @sminv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
183; CHECK-LABEL: sminv_i8:
184; CHECK:       // %bb.0:
185; CHECK-NEXT:    sminv b0, p0, z0.b
186; CHECK-NEXT:    fmov w0, s0
187; CHECK-NEXT:    ret
188  %out = call i8 @llvm.aarch64.sve.sminv.nxv16i8(<vscale x 16 x i1> %pg,
189                                                 <vscale x 16 x i8> %a)
190  ret i8 %out
191}
192
193define i16 @sminv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
194; CHECK-LABEL: sminv_i16:
195; CHECK:       // %bb.0:
196; CHECK-NEXT:    sminv h0, p0, z0.h
197; CHECK-NEXT:    fmov w0, s0
198; CHECK-NEXT:    ret
199  %out = call i16 @llvm.aarch64.sve.sminv.nxv8i16(<vscale x 8 x i1> %pg,
200                                                  <vscale x 8 x i16> %a)
201  ret i16 %out
202}
203
204define i32 @sminv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
205; CHECK-LABEL: sminv_i32:
206; CHECK:       // %bb.0:
207; CHECK-NEXT:    sminv s0, p0, z0.s
208; CHECK-NEXT:    fmov w0, s0
209; CHECK-NEXT:    ret
210  %out = call i32 @llvm.aarch64.sve.sminv.nxv4i32(<vscale x 4 x i1> %pg,
211                                                  <vscale x 4 x i32> %a)
212  ret i32 %out
213}
214
215define i64 @sminv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
216; CHECK-LABEL: sminv_i64:
217; CHECK:       // %bb.0:
218; CHECK-NEXT:    sminv d0, p0, z0.d
219; CHECK-NEXT:    fmov x0, d0
220; CHECK-NEXT:    ret
221  %out = call i64 @llvm.aarch64.sve.sminv.nxv2i64(<vscale x 2 x i1> %pg,
222                                                  <vscale x 2 x i64> %a)
223  ret i64 %out
224}
225
226define i8 @uminv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
227; CHECK-LABEL: uminv_i8:
228; CHECK:       // %bb.0:
229; CHECK-NEXT:    uminv b0, p0, z0.b
230; CHECK-NEXT:    fmov w0, s0
231; CHECK-NEXT:    ret
232  %out = call i8 @llvm.aarch64.sve.uminv.nxv16i8(<vscale x 16 x i1> %pg,
233                                                 <vscale x 16 x i8> %a)
234  ret i8 %out
235}
236
237define i16 @uminv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
238; CHECK-LABEL: uminv_i16:
239; CHECK:       // %bb.0:
240; CHECK-NEXT:    uminv h0, p0, z0.h
241; CHECK-NEXT:    fmov w0, s0
242; CHECK-NEXT:    ret
243  %out = call i16 @llvm.aarch64.sve.uminv.nxv8i16(<vscale x 8 x i1> %pg,
244                                                  <vscale x 8 x i16> %a)
245  ret i16 %out
246}
247
248define i32 @uminv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
249; CHECK-LABEL: uminv_i32:
250; CHECK:       // %bb.0:
251; CHECK-NEXT:    uminv s0, p0, z0.s
252; CHECK-NEXT:    fmov w0, s0
253; CHECK-NEXT:    ret
254  %out = call i32 @llvm.aarch64.sve.uminv.nxv4i32(<vscale x 4 x i1> %pg,
255                                                  <vscale x 4 x i32> %a)
256  ret i32 %out
257}
258
259define i64 @uminv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
260; CHECK-LABEL: uminv_i64:
261; CHECK:       // %bb.0:
262; CHECK-NEXT:    uminv d0, p0, z0.d
263; CHECK-NEXT:    fmov x0, d0
264; CHECK-NEXT:    ret
265  %out = call i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1> %pg,
266                                                  <vscale x 2 x i64> %a)
267  ret i64 %out
268}
269
270define i8 @orv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
271; CHECK-LABEL: orv_i8:
272; CHECK:       // %bb.0:
273; CHECK-NEXT:    orv b0, p0, z0.b
274; CHECK-NEXT:    fmov w0, s0
275; CHECK-NEXT:    ret
276  %out = call i8 @llvm.aarch64.sve.orv.nxv16i8(<vscale x 16 x i1> %pg,
277                                               <vscale x 16 x i8> %a)
278  ret i8 %out
279}
280
281define i16 @orv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
282; CHECK-LABEL: orv_i16:
283; CHECK:       // %bb.0:
284; CHECK-NEXT:    orv h0, p0, z0.h
285; CHECK-NEXT:    fmov w0, s0
286; CHECK-NEXT:    ret
287  %out = call i16 @llvm.aarch64.sve.orv.nxv8i16(<vscale x 8 x i1> %pg,
288                                                <vscale x 8 x i16> %a)
289  ret i16 %out
290}
291
292define i32 @orv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
293; CHECK-LABEL: orv_i32:
294; CHECK:       // %bb.0:
295; CHECK-NEXT:    orv s0, p0, z0.s
296; CHECK-NEXT:    fmov w0, s0
297; CHECK-NEXT:    ret
298  %out = call i32 @llvm.aarch64.sve.orv.nxv4i32(<vscale x 4 x i1> %pg,
299                                                <vscale x 4 x i32> %a)
300  ret i32 %out
301}
302
303define i64 @orv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
304; CHECK-LABEL: orv_i64:
305; CHECK:       // %bb.0:
306; CHECK-NEXT:    orv d0, p0, z0.d
307; CHECK-NEXT:    fmov x0, d0
308; CHECK-NEXT:    ret
309  %out = call i64 @llvm.aarch64.sve.orv.nxv2i64(<vscale x 2 x i1> %pg,
310                                                <vscale x 2 x i64> %a)
311  ret i64 %out
312}
313
314define i8 @eorv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
315; CHECK-LABEL: eorv_i8:
316; CHECK:       // %bb.0:
317; CHECK-NEXT:    eorv b0, p0, z0.b
318; CHECK-NEXT:    fmov w0, s0
319; CHECK-NEXT:    ret
320  %out = call i8 @llvm.aarch64.sve.eorv.nxv16i8(<vscale x 16 x i1> %pg,
321                                                <vscale x 16 x i8> %a)
322  ret i8 %out
323}
324
325define i16 @eorv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
326; CHECK-LABEL: eorv_i16:
327; CHECK:       // %bb.0:
328; CHECK-NEXT:    eorv h0, p0, z0.h
329; CHECK-NEXT:    fmov w0, s0
330; CHECK-NEXT:    ret
331  %out = call i16 @llvm.aarch64.sve.eorv.nxv8i16(<vscale x 8 x i1> %pg,
332                                                 <vscale x 8 x i16> %a)
333  ret i16 %out
334}
335
336define i32 @eorv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
337; CHECK-LABEL: eorv_i32:
338; CHECK:       // %bb.0:
339; CHECK-NEXT:    eorv s0, p0, z0.s
340; CHECK-NEXT:    fmov w0, s0
341; CHECK-NEXT:    ret
342  %out = call i32 @llvm.aarch64.sve.eorv.nxv4i32(<vscale x 4 x i1> %pg,
343                                                 <vscale x 4 x i32> %a)
344  ret i32 %out
345}
346
347define i64 @eorv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
348; CHECK-LABEL: eorv_i64:
349; CHECK:       // %bb.0:
350; CHECK-NEXT:    eorv d0, p0, z0.d
351; CHECK-NEXT:    fmov x0, d0
352; CHECK-NEXT:    ret
353  %out = call i64 @llvm.aarch64.sve.eorv.nxv2i64(<vscale x 2 x i1> %pg,
354                                                 <vscale x 2 x i64> %a)
355  ret i64 %out
356}
357
358define i8 @andv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
359; CHECK-LABEL: andv_i8:
360; CHECK:       // %bb.0:
361; CHECK-NEXT:    andv b0, p0, z0.b
362; CHECK-NEXT:    fmov w0, s0
363; CHECK-NEXT:    ret
364  %out = call i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1> %pg,
365                                                <vscale x 16 x i8> %a)
366  ret i8 %out
367}
368
369define i16 @andv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
370; CHECK-LABEL: andv_i16:
371; CHECK:       // %bb.0:
372; CHECK-NEXT:    andv h0, p0, z0.h
373; CHECK-NEXT:    fmov w0, s0
374; CHECK-NEXT:    ret
375  %out = call i16 @llvm.aarch64.sve.andv.nxv8i16(<vscale x 8 x i1> %pg,
376                                                 <vscale x 8 x i16> %a)
377  ret i16 %out
378}
379
380define i32 @andv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
381; CHECK-LABEL: andv_i32:
382; CHECK:       // %bb.0:
383; CHECK-NEXT:    andv s0, p0, z0.s
384; CHECK-NEXT:    fmov w0, s0
385; CHECK-NEXT:    ret
386  %out = call i32 @llvm.aarch64.sve.andv.nxv4i32(<vscale x 4 x i1> %pg,
387                                                 <vscale x 4 x i32> %a)
388  ret i32 %out
389}
390
391define i64 @andv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
392; CHECK-LABEL: andv_i64:
393; CHECK:       // %bb.0:
394; CHECK-NEXT:    andv d0, p0, z0.d
395; CHECK-NEXT:    fmov x0, d0
396; CHECK-NEXT:    ret
397  %out = call i64 @llvm.aarch64.sve.andv.nxv2i64(<vscale x 2 x i1> %pg,
398                                                 <vscale x 2 x i64> %a)
399  ret i64 %out
400}
401
402declare i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1>, <vscale x  16 x i8>)
403declare i64 @llvm.aarch64.sve.saddv.nxv8i16(<vscale x 8 x  i1>, <vscale x  8 x  i16>)
404declare i64 @llvm.aarch64.sve.saddv.nxv4i32(<vscale x 4 x  i1>, <vscale x  4 x  i32>)
405declare i64 @llvm.aarch64.sve.saddv.nxv2i64(<vscale x 2 x  i1>, <vscale x  2 x  i64>)
406declare i64 @llvm.aarch64.sve.uaddv.nxv16i8(<vscale x 16 x i1>, <vscale x  16 x i8>)
407declare i64 @llvm.aarch64.sve.uaddv.nxv8i16(<vscale x 8 x  i1>, <vscale x  8 x  i16>)
408declare i64 @llvm.aarch64.sve.uaddv.nxv4i32(<vscale x 4 x  i1>, <vscale x  4 x  i32>)
409declare i64 @llvm.aarch64.sve.uaddv.nxv2i64(<vscale x 2 x  i1>, <vscale x  2 x  i64>)
410declare i8 @llvm.aarch64.sve.smaxv.nxv16i8(<vscale x 16 x  i1>, <vscale x  16 x  i8>)
411declare i16 @llvm.aarch64.sve.smaxv.nxv8i16(<vscale x 8 x  i1>, <vscale x  8 x  i16>)
412declare i32 @llvm.aarch64.sve.smaxv.nxv4i32(<vscale x 4 x  i1>, <vscale x  4 x  i32>)
413declare i64 @llvm.aarch64.sve.smaxv.nxv2i64(<vscale x 2 x  i1>, <vscale x  2 x  i64>)
414declare i8 @llvm.aarch64.sve.umaxv.nxv16i8(<vscale x 16 x  i1>, <vscale x  16 x  i8>)
415declare i16 @llvm.aarch64.sve.umaxv.nxv8i16(<vscale x 8 x  i1>, <vscale x  8 x  i16>)
416declare i32 @llvm.aarch64.sve.umaxv.nxv4i32(<vscale x 4 x  i1>, <vscale x  4 x  i32>)
417declare i64 @llvm.aarch64.sve.umaxv.nxv2i64(<vscale x 2 x  i1>, <vscale x  2 x  i64>)
418declare i8 @llvm.aarch64.sve.sminv.nxv16i8(<vscale x 16 x  i1>, <vscale x  16 x  i8>)
419declare i16 @llvm.aarch64.sve.sminv.nxv8i16(<vscale x 8 x  i1>, <vscale x  8 x  i16>)
420declare i32 @llvm.aarch64.sve.sminv.nxv4i32(<vscale x 4 x  i1>, <vscale x  4 x  i32>)
421declare i64 @llvm.aarch64.sve.sminv.nxv2i64(<vscale x 2 x  i1>, <vscale x  2 x  i64>)
422declare i8 @llvm.aarch64.sve.uminv.nxv16i8(<vscale x 16 x  i1>, <vscale x  16 x  i8>)
423declare i16 @llvm.aarch64.sve.uminv.nxv8i16(<vscale x 8 x  i1>, <vscale x  8 x  i16>)
424declare i32 @llvm.aarch64.sve.uminv.nxv4i32(<vscale x 4 x  i1>, <vscale x  4 x  i32>)
425declare i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x  i1>, <vscale x  2 x  i64>)
426declare i8 @llvm.aarch64.sve.orv.nxv16i8(<vscale x 16 x  i1>, <vscale x  16 x  i8>)
427declare i16 @llvm.aarch64.sve.orv.nxv8i16(<vscale x 8 x  i1>, <vscale x  8 x  i16>)
428declare i32 @llvm.aarch64.sve.orv.nxv4i32  (<vscale x 4 x  i1>, <vscale x  4 x  i32>)
429declare i64 @llvm.aarch64.sve.orv.nxv2i64  (<vscale x 2 x  i1>, <vscale x  2 x  i64>)
430declare i8 @llvm.aarch64.sve.eorv.nxv16i8(<vscale x 16 x  i1>, <vscale x  16 x  i8>)
431declare i16 @llvm.aarch64.sve.eorv.nxv8i16(<vscale x 8 x  i1>, <vscale x  8 x  i16>)
432declare i32 @llvm.aarch64.sve.eorv.nxv4i32 (<vscale x 4 x  i1>, <vscale x  4 x  i32>)
433declare i64 @llvm.aarch64.sve.eorv.nxv2i64 (<vscale x 2 x  i1>, <vscale x  2 x  i64>)
434declare i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x  i1>, <vscale x  16 x  i8>)
435declare i16 @llvm.aarch64.sve.andv.nxv8i16(<vscale x 8 x  i1>, <vscale x  8 x  i16>)
436declare i32 @llvm.aarch64.sve.andv.nxv4i32 (<vscale x 4 x  i1>, <vscale x  4 x  i32>)
437declare i64 @llvm.aarch64.sve.andv.nxv2i64 (<vscale x 2 x  i1>, <vscale x  2 x  i64>)
438