xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-int-arith-pred.ll (revision 672f673004663aeb15ece1af4b5b219994924167)
1; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
2
3define <vscale x 16 x i8> @add_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
4; CHECK-LABEL: add_i8:
5; CHECK: add z0.b, p0/m, z0.b, z1.b
6; CHECK-NEXT: ret
7  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg,
8                                                               <vscale x 16 x i8> %a,
9                                                               <vscale x 16 x i8> %b)
10  ret <vscale x 16 x i8> %out
11}
12
13define <vscale x 8 x i16> @add_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
14; CHECK-LABEL: add_i16:
15; CHECK: add z0.h, p0/m, z0.h, z1.h
16; CHECK-NEXT: ret
17  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %pg,
18                                                               <vscale x 8 x i16> %a,
19                                                               <vscale x 8 x i16> %b)
20  ret <vscale x 8 x i16> %out
21}
22
23define <vscale x 4 x i32> @add_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
24; CHECK-LABEL: add_i32:
25; CHECK: add z0.s, p0/m, z0.s, z1.s
26; CHECK-NEXT: ret
27  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %pg,
28                                                               <vscale x 4 x i32> %a,
29                                                               <vscale x 4 x i32> %b)
30  ret <vscale x 4 x i32> %out
31}
32
33define <vscale x 2 x i64> @add_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
34; CHECK-LABEL: add_i64:
35; CHECK: add z0.d, p0/m, z0.d, z1.d
36; CHECK-NEXT: ret
37  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %pg,
38                                                               <vscale x 2 x i64> %a,
39                                                               <vscale x 2 x i64> %b)
40  ret <vscale x 2 x i64> %out
41}
42
43define <vscale x 16 x i8> @sub_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
44; CHECK-LABEL: sub_i8:
45; CHECK: sub z0.b, p0/m, z0.b, z1.b
46; CHECK-NEXT: ret
47  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1> %pg,
48                                                               <vscale x 16 x i8> %a,
49                                                               <vscale x 16 x i8> %b)
50  ret <vscale x 16 x i8> %out
51}
52
53define <vscale x 8 x i16> @sub_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
54; CHECK-LABEL: sub_i16:
55; CHECK: sub z0.h, p0/m, z0.h, z1.h
56; CHECK-NEXT: ret
57  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x 8 x i1> %pg,
58                                                               <vscale x 8 x i16> %a,
59                                                               <vscale x 8 x i16> %b)
60  ret <vscale x 8 x i16> %out
61}
62
63define <vscale x 4 x i32> @sub_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
64; CHECK-LABEL: sub_i32:
65; CHECK: sub z0.s, p0/m, z0.s, z1.s
66; CHECK-NEXT: ret
67  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x 4 x i1> %pg,
68                                                               <vscale x 4 x i32> %a,
69                                                               <vscale x 4 x i32> %b)
70  ret <vscale x 4 x i32> %out
71}
72
73define <vscale x 2 x i64> @sub_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
74; CHECK-LABEL: sub_i64:
75; CHECK: sub z0.d, p0/m, z0.d, z1.d
76; CHECK-NEXT: ret
77  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x 2 x i1> %pg,
78                                                               <vscale x 2 x i64> %a,
79                                                               <vscale x 2 x i64> %b)
80  ret <vscale x 2 x i64> %out
81}
82
83define <vscale x 16 x i8> @subr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
84; CHECK-LABEL: subr_i8:
85; CHECK: subr z0.b, p0/m, z0.b, z1.b
86; CHECK-NEXT: ret
87  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg,
88                                                               <vscale x 16 x i8> %a,
89                                                               <vscale x 16 x i8> %b)
90  ret <vscale x 16 x i8> %out
91}
92
93define <vscale x 8 x i16> @subr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
94; CHECK-LABEL: subr_i16:
95; CHECK: subr z0.h, p0/m, z0.h, z1.h
96; CHECK-NEXT: ret
97  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %pg,
98                                                               <vscale x 8 x i16> %a,
99                                                               <vscale x 8 x i16> %b)
100  ret <vscale x 8 x i16> %out
101}
102
103define <vscale x 4 x i32> @subr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
104; CHECK-LABEL: subr_i32:
105; CHECK: subr z0.s, p0/m, z0.s, z1.s
106; CHECK-NEXT: ret
107  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %pg,
108                                                               <vscale x 4 x i32> %a,
109                                                               <vscale x 4 x i32> %b)
110  ret <vscale x 4 x i32> %out
111}
112
113define <vscale x 2 x i64> @subr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
114; CHECK-LABEL: subr_i64:
115; CHECK: subr z0.d, p0/m, z0.d, z1.d
116; CHECK-NEXT: ret
117  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %pg,
118                                                               <vscale x 2 x i64> %a,
119                                                               <vscale x 2 x i64> %b)
120  ret <vscale x 2 x i64> %out
121}
122
123define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
124; CHECK-LABEL: smax_i8:
125; CHECK: smax z0.b, p0/m, z0.b, z1.b
126; CHECK-NEXT: ret
127  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1> %pg,
128                                                               <vscale x 16 x i8> %a,
129                                                               <vscale x 16 x i8> %b)
130  ret <vscale x 16 x i8> %out
131}
132
133define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
134; CHECK-LABEL: smax_i16:
135; CHECK: smax z0.h, p0/m, z0.h, z1.h
136; CHECK-NEXT: ret
137  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1> %pg,
138                                                               <vscale x 8 x i16> %a,
139                                                               <vscale x 8 x i16> %b)
140  ret <vscale x 8 x i16> %out
141}
142
143define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
144; CHECK-LABEL: smax_i32:
145; CHECK: smax z0.s, p0/m, z0.s, z1.s
146; CHECK-NEXT: ret
147  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1> %pg,
148                                                               <vscale x 4 x i32> %a,
149                                                               <vscale x 4 x i32> %b)
150  ret <vscale x 4 x i32> %out
151}
152
153define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
154; CHECK-LABEL: smax_i64:
155; CHECK: smax z0.d, p0/m, z0.d, z1.d
156; CHECK-NEXT: ret
157  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1> %pg,
158                                                               <vscale x 2 x i64> %a,
159                                                               <vscale x 2 x i64> %b)
160  ret <vscale x 2 x i64> %out
161}
162
163define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
164; CHECK-LABEL: umax_i8:
165; CHECK: umax z0.b, p0/m, z0.b, z1.b
166; CHECK-NEXT: ret
167  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1> %pg,
168                                                               <vscale x 16 x i8> %a,
169                                                               <vscale x 16 x i8> %b)
170  ret <vscale x 16 x i8> %out
171}
172
173define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
174; CHECK-LABEL: umax_i16:
175; CHECK: umax z0.h, p0/m, z0.h, z1.h
176; CHECK-NEXT: ret
177  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1> %pg,
178                                                               <vscale x 8 x i16> %a,
179                                                               <vscale x 8 x i16> %b)
180  ret <vscale x 8 x i16> %out
181}
182
183define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
184; CHECK-LABEL: umax_i32:
185; CHECK: umax z0.s, p0/m, z0.s, z1.s
186; CHECK-NEXT: ret
187  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1> %pg,
188                                                               <vscale x 4 x i32> %a,
189                                                               <vscale x 4 x i32> %b)
190  ret <vscale x 4 x i32> %out
191}
192
193define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
194; CHECK-LABEL: umax_i64:
195; CHECK: umax z0.d, p0/m, z0.d, z1.d
196; CHECK-NEXT: ret
197  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1> %pg,
198                                                               <vscale x 2 x i64> %a,
199                                                               <vscale x 2 x i64> %b)
200  ret <vscale x 2 x i64> %out
201}
202
203define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
204; CHECK-LABEL: smin_i8:
205; CHECK: smin z0.b, p0/m, z0.b, z1.b
206; CHECK-NEXT: ret
207  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1> %pg,
208                                                               <vscale x 16 x i8> %a,
209                                                               <vscale x 16 x i8> %b)
210  ret <vscale x 16 x i8> %out
211}
212
213define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
214; CHECK-LABEL: smin_i16:
215; CHECK: smin z0.h, p0/m, z0.h, z1.h
216; CHECK-NEXT: ret
217  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1> %pg,
218                                                               <vscale x 8 x i16> %a,
219                                                               <vscale x 8 x i16> %b)
220  ret <vscale x 8 x i16> %out
221}
222
223define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
224; CHECK-LABEL: smin_i32:
225; CHECK: smin z0.s, p0/m, z0.s, z1.s
226; CHECK-NEXT: ret
227  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1> %pg,
228                                                               <vscale x 4 x i32> %a,
229                                                               <vscale x 4 x i32> %b)
230  ret <vscale x 4 x i32> %out
231}
232
233define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
234; CHECK-LABEL: smin_i64:
235; CHECK: smin z0.d, p0/m, z0.d, z1.d
236; CHECK-NEXT: ret
237  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1> %pg,
238                                                               <vscale x 2 x i64> %a,
239                                                               <vscale x 2 x i64> %b)
240  ret <vscale x 2 x i64> %out
241}
242
243define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
244; CHECK-LABEL: umin_i8:
245; CHECK: umin z0.b, p0/m, z0.b, z1.b
246; CHECK-NEXT: ret
247  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1> %pg,
248                                                               <vscale x 16 x i8> %a,
249                                                               <vscale x 16 x i8> %b)
250  ret <vscale x 16 x i8> %out
251}
252
253define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
254; CHECK-LABEL: umin_i16:
255; CHECK: umin z0.h, p0/m, z0.h, z1.h
256; CHECK-NEXT: ret
257  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1> %pg,
258                                                               <vscale x 8 x i16> %a,
259                                                               <vscale x 8 x i16> %b)
260  ret <vscale x 8 x i16> %out
261}
262
263define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
264; CHECK-LABEL: umin_i32:
265; CHECK: umin z0.s, p0/m, z0.s, z1.s
266; CHECK-NEXT: ret
267  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1> %pg,
268                                                               <vscale x 4 x i32> %a,
269                                                               <vscale x 4 x i32> %b)
270  ret <vscale x 4 x i32> %out
271}
272
273define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
274; CHECK-LABEL: umin_i64:
275; CHECK: umin z0.d, p0/m, z0.d, z1.d
276; CHECK-NEXT: ret
277  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1> %pg,
278                                                               <vscale x 2 x i64> %a,
279                                                               <vscale x 2 x i64> %b)
280  ret <vscale x 2 x i64> %out
281}
282
283define <vscale x 16 x i8> @sabd_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
284; CHECK-LABEL: sabd_i8:
285; CHECK: sabd z0.b, p0/m, z0.b, z1.b
286; CHECK-NEXT: ret
287  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sabd.nxv16i8(<vscale x 16 x i1> %pg,
288                                                               <vscale x 16 x i8> %a,
289                                                               <vscale x 16 x i8> %b)
290  ret <vscale x 16 x i8> %out
291}
292
293define <vscale x 8 x i16> @sabd_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
294; CHECK-LABEL: sabd_i16:
295; CHECK: sabd z0.h, p0/m, z0.h, z1.h
296; CHECK-NEXT: ret
297  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sabd.nxv8i16(<vscale x 8 x i1> %pg,
298                                                               <vscale x 8 x i16> %a,
299                                                               <vscale x 8 x i16> %b)
300  ret <vscale x 8 x i16> %out
301}
302
303define <vscale x 4 x i32> @sabd_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
304; CHECK-LABEL: sabd_i32:
305; CHECK: sabd z0.s, p0/m, z0.s, z1.s
306; CHECK-NEXT: ret
307  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sabd.nxv4i32(<vscale x 4 x i1> %pg,
308                                                               <vscale x 4 x i32> %a,
309                                                               <vscale x 4 x i32> %b)
310  ret <vscale x 4 x i32> %out
311}
312
313define <vscale x 2 x i64> @sabd_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
314; CHECK-LABEL: sabd_i64:
315; CHECK: sabd z0.d, p0/m, z0.d, z1.d
316; CHECK-NEXT: ret
317  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sabd.nxv2i64(<vscale x 2 x i1> %pg,
318                                                               <vscale x 2 x i64> %a,
319                                                               <vscale x 2 x i64> %b)
320  ret <vscale x 2 x i64> %out
321}
322
323define <vscale x 16 x i8> @uabd_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
324; CHECK-LABEL: uabd_i8:
325; CHECK: uabd z0.b, p0/m, z0.b, z1.b
326; CHECK-NEXT: ret
327  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uabd.nxv16i8(<vscale x 16 x i1> %pg,
328                                                               <vscale x 16 x i8> %a,
329                                                               <vscale x 16 x i8> %b)
330  ret <vscale x 16 x i8> %out
331}
332
333define <vscale x 8 x i16> @uabd_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
334; CHECK-LABEL: uabd_i16:
335; CHECK: uabd z0.h, p0/m, z0.h, z1.h
336; CHECK-NEXT: ret
337  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uabd.nxv8i16(<vscale x 8 x i1> %pg,
338                                                               <vscale x 8 x i16> %a,
339                                                               <vscale x 8 x i16> %b)
340  ret <vscale x 8 x i16> %out
341}
342
343define <vscale x 4 x i32> @uabd_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
344; CHECK-LABEL: uabd_i32:
345; CHECK: uabd z0.s, p0/m, z0.s, z1.s
346; CHECK-NEXT: ret
347  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uabd.nxv4i32(<vscale x 4 x i1> %pg,
348                                                               <vscale x 4 x i32> %a,
349                                                               <vscale x 4 x i32> %b)
350  ret <vscale x 4 x i32> %out
351}
352
353define <vscale x 2 x i64> @uabd_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
354; CHECK-LABEL: uabd_i64:
355; CHECK: uabd z0.d, p0/m, z0.d, z1.d
356; CHECK-NEXT: ret
357  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uabd.nxv2i64(<vscale x 2 x i1> %pg,
358                                                               <vscale x 2 x i64> %a,
359                                                               <vscale x 2 x i64> %b)
360  ret <vscale x 2 x i64> %out
361}
362
363declare <vscale x 16 x  i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
364declare <vscale x  8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
365declare <vscale x  4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
366declare <vscale x  2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
367
368declare <vscale x 16 x  i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
369declare <vscale x  8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
370declare <vscale x  4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
371declare <vscale x  2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
372
373declare <vscale x 16 x  i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
374declare <vscale x  8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
375declare <vscale x  4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
376declare <vscale x  2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
377
378declare <vscale x 16 x  i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
379declare <vscale x  8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
380declare <vscale x  4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
381declare <vscale x  2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
382
383declare <vscale x 16 x  i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
384declare <vscale x  8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
385declare <vscale x  4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
386declare <vscale x  2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
387
388declare <vscale x 16 x  i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
389declare <vscale x  8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
390declare <vscale x  4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
391declare <vscale x  2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
392
393declare <vscale x 16 x  i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
394declare <vscale x  8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
395declare <vscale x  4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
396declare <vscale x  2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
397
398declare <vscale x 16 x  i8> @llvm.aarch64.sve.sabd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
399declare <vscale x  8 x i16> @llvm.aarch64.sve.sabd.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
400declare <vscale x  4 x i32> @llvm.aarch64.sve.sabd.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
401declare <vscale x  2 x i64> @llvm.aarch64.sve.sabd.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
402
403declare <vscale x 16 x  i8> @llvm.aarch64.sve.uabd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
404declare <vscale x  8 x i16> @llvm.aarch64.sve.uabd.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
405declare <vscale x  4 x i32> @llvm.aarch64.sve.uabd.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
406declare <vscale x  2 x i64> @llvm.aarch64.sve.uabd.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
407