xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-merging.ll (revision fadea4413ecbfffa4d28ad8298e0628165b543f1)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=sve -mattr=+use-experimental-zeroing-pseudos < %s | FileCheck %s
3
4;
5; FADD
6;
7
8define <vscale x 8 x half> @fadd_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
9; CHECK-LABEL: fadd_h_zero:
10; CHECK:       // %bb.0:
11; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
12; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
13; CHECK-NEXT:    ret
14  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
15  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
16                                                            <vscale x 8 x half> %a_z,
17                                                            <vscale x 8 x half> %b)
18  ret <vscale x 8 x half> %out
19}
20
21define <vscale x 4 x float> @fadd_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
22; CHECK-LABEL: fadd_s_zero:
23; CHECK:       // %bb.0:
24; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
25; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
26; CHECK-NEXT:    ret
27  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
28  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
29                                                             <vscale x 4 x float> %a_z,
30                                                             <vscale x 4 x float> %b)
31  ret <vscale x 4 x float> %out
32}
33
34define <vscale x 2 x double> @fadd_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
35; CHECK-LABEL: fadd_d_zero:
36; CHECK:       // %bb.0:
37; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
38; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
39; CHECK-NEXT:    ret
40  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
41  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
42                                                              <vscale x 2 x double> %a_z,
43                                                              <vscale x 2 x double> %b)
44  ret <vscale x 2 x double> %out
45}
46
47;
48; FMAX
49;
50
51define <vscale x 8 x half> @fmax_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
52; CHECK-LABEL: fmax_h_zero:
53; CHECK:       // %bb.0:
54; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
55; CHECK-NEXT:    fmax z0.h, p0/m, z0.h, z1.h
56; CHECK-NEXT:    ret
57  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
58  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
59                                                            <vscale x 8 x half> %a_z,
60                                                            <vscale x 8 x half> %b)
61  ret <vscale x 8 x half> %out
62}
63
64define <vscale x 4 x float> @fmax_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
65; CHECK-LABEL: fmax_s_zero:
66; CHECK:       // %bb.0:
67; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
68; CHECK-NEXT:    fmax z0.s, p0/m, z0.s, z1.s
69; CHECK-NEXT:    ret
70  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
71  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
72                                                             <vscale x 4 x float> %a_z,
73                                                             <vscale x 4 x float> %b)
74  ret <vscale x 4 x float> %out
75}
76
77define <vscale x 2 x double> @fmax_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
78; CHECK-LABEL: fmax_d_zero:
79; CHECK:       // %bb.0:
80; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
81; CHECK-NEXT:    fmax z0.d, p0/m, z0.d, z1.d
82; CHECK-NEXT:    ret
83  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
84  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
85                                                              <vscale x 2 x double> %a_z,
86                                                              <vscale x 2 x double> %b)
87  ret <vscale x 2 x double> %out
88}
89
90;
91; FMAXNM
92;
93
94define <vscale x 8 x half> @fmaxnm_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
95; CHECK-LABEL: fmaxnm_h_zero:
96; CHECK:       // %bb.0:
97; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
98; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, z1.h
99; CHECK-NEXT:    ret
100  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
101  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
102                                                              <vscale x 8 x half> %a_z,
103                                                              <vscale x 8 x half> %b)
104  ret <vscale x 8 x half> %out
105}
106
107define <vscale x 4 x float> @fmaxnm_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
108; CHECK-LABEL: fmaxnm_s_zero:
109; CHECK:       // %bb.0:
110; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
111; CHECK-NEXT:    fmaxnm z0.s, p0/m, z0.s, z1.s
112; CHECK-NEXT:    ret
113  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
114  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
115                                                               <vscale x 4 x float> %a_z,
116                                                               <vscale x 4 x float> %b)
117  ret <vscale x 4 x float> %out
118}
119
120define <vscale x 2 x double> @fmaxnm_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
121; CHECK-LABEL: fmaxnm_d_zero:
122; CHECK:       // %bb.0:
123; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
124; CHECK-NEXT:    fmaxnm z0.d, p0/m, z0.d, z1.d
125; CHECK-NEXT:    ret
126  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
127  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
128                                                                <vscale x 2 x double> %a_z,
129                                                                <vscale x 2 x double> %b)
130  ret <vscale x 2 x double> %out
131}
132
133;
134; FMIN
135;
136
137define <vscale x 8 x half> @fmin_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
138; CHECK-LABEL: fmin_h_zero:
139; CHECK:       // %bb.0:
140; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
141; CHECK-NEXT:    fmin z0.h, p0/m, z0.h, z1.h
142; CHECK-NEXT:    ret
143  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
144  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
145                                                            <vscale x 8 x half> %a_z,
146                                                            <vscale x 8 x half> %b)
147  ret <vscale x 8 x half> %out
148}
149
150define <vscale x 4 x float> @fmin_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
151; CHECK-LABEL: fmin_s_zero:
152; CHECK:       // %bb.0:
153; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
154; CHECK-NEXT:    fmin z0.s, p0/m, z0.s, z1.s
155; CHECK-NEXT:    ret
156  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
157  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
158                                                             <vscale x 4 x float> %a_z,
159                                                             <vscale x 4 x float> %b)
160  ret <vscale x 4 x float> %out
161}
162
163define <vscale x 2 x double> @fmin_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
164; CHECK-LABEL: fmin_d_zero:
165; CHECK:       // %bb.0:
166; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
167; CHECK-NEXT:    fmin z0.d, p0/m, z0.d, z1.d
168; CHECK-NEXT:    ret
169  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
170  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
171                                                              <vscale x 2 x double> %a_z,
172                                                              <vscale x 2 x double> %b)
173  ret <vscale x 2 x double> %out
174}
175
176;
177; FMINNM
178;
179
180define <vscale x 8 x half> @fminnm_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
181; CHECK-LABEL: fminnm_h_zero:
182; CHECK:       // %bb.0:
183; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
184; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, z1.h
185; CHECK-NEXT:    ret
186  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
187  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
188                                                              <vscale x 8 x half> %a_z,
189                                                              <vscale x 8 x half> %b)
190  ret <vscale x 8 x half> %out
191}
192
193define <vscale x 4 x float> @fminnm_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
194; CHECK-LABEL: fminnm_s_zero:
195; CHECK:       // %bb.0:
196; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
197; CHECK-NEXT:    fminnm z0.s, p0/m, z0.s, z1.s
198; CHECK-NEXT:    ret
199  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
200  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
201                                                               <vscale x 4 x float> %a_z,
202                                                               <vscale x 4 x float> %b)
203  ret <vscale x 4 x float> %out
204}
205
206define <vscale x 2 x double> @fminnm_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
207; CHECK-LABEL: fminnm_d_zero:
208; CHECK:       // %bb.0:
209; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
210; CHECK-NEXT:    fminnm z0.d, p0/m, z0.d, z1.d
211; CHECK-NEXT:    ret
212  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
213  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
214                                                                <vscale x 2 x double> %a_z,
215                                                                <vscale x 2 x double> %b)
216  ret <vscale x 2 x double> %out
217}
218
219;
220; FMUL
221;
222
223define <vscale x 8 x half> @fmul_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
224; CHECK-LABEL: fmul_h_zero:
225; CHECK:       // %bb.0:
226; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
227; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
228; CHECK-NEXT:    ret
229  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
230  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
231                                                            <vscale x 8 x half> %a_z,
232                                                            <vscale x 8 x half> %b)
233  ret <vscale x 8 x half> %out
234}
235
236define <vscale x 4 x float> @fmul_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
237; CHECK-LABEL: fmul_s_zero:
238; CHECK:       // %bb.0:
239; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
240; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
241; CHECK-NEXT:    ret
242  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
243  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
244                                                             <vscale x 4 x float> %a_z,
245                                                             <vscale x 4 x float> %b)
246  ret <vscale x 4 x float> %out
247}
248
249define <vscale x 2 x double> @fmul_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
250; CHECK-LABEL: fmul_d_zero:
251; CHECK:       // %bb.0:
252; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
253; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
254; CHECK-NEXT:    ret
255  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
256  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
257                                                              <vscale x 2 x double> %a_z,
258                                                              <vscale x 2 x double> %b)
259  ret <vscale x 2 x double> %out
260}
261
262;
263; FSUB
264;
265
266define <vscale x 8 x half> @fsub_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
267; CHECK-LABEL: fsub_h_zero:
268; CHECK:       // %bb.0:
269; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
270; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
271; CHECK-NEXT:    ret
272  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
273  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
274                                                            <vscale x 8 x half> %a_z,
275                                                            <vscale x 8 x half> %b)
276  ret <vscale x 8 x half> %out
277}
278
279define <vscale x 4 x float> @fsub_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
280; CHECK-LABEL: fsub_s_zero:
281; CHECK:       // %bb.0:
282; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
283; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
284; CHECK-NEXT:    ret
285  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
286  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
287                                                             <vscale x 4 x float> %a_z,
288                                                             <vscale x 4 x float> %b)
289  ret <vscale x 4 x float> %out
290}
291
292define <vscale x 2 x double> @fsub_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
293; CHECK-LABEL: fsub_d_zero:
294; CHECK:       // %bb.0:
295; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
296; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
297; CHECK-NEXT:    ret
298  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
299  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
300                                                              <vscale x 2 x double> %a_z,
301                                                              <vscale x 2 x double> %b)
302  ret <vscale x 2 x double> %out
303}
304
305;
306; FSUBR
307;
308
309define <vscale x 8 x half> @fsubr_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
310; CHECK-LABEL: fsubr_h_zero:
311; CHECK:       // %bb.0:
312; CHECK-NEXT:    movprfx z0.h, p0/z, z0.h
313; CHECK-NEXT:    fsubr z0.h, p0/m, z0.h, z1.h
314; CHECK-NEXT:    ret
315  %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
316  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %pg,
317                                                             <vscale x 8 x half> %a_z,
318                                                             <vscale x 8 x half> %b)
319  ret <vscale x 8 x half> %out
320}
321
322define <vscale x 4 x float> @fsubr_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
323; CHECK-LABEL: fsubr_s_zero:
324; CHECK:       // %bb.0:
325; CHECK-NEXT:    movprfx z0.s, p0/z, z0.s
326; CHECK-NEXT:    fsubr z0.s, p0/m, z0.s, z1.s
327; CHECK-NEXT:    ret
328  %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
329  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %pg,
330                                                              <vscale x 4 x float> %a_z,
331                                                              <vscale x 4 x float> %b)
332  ret <vscale x 4 x float> %out
333}
334
335define <vscale x 2 x double> @fsubr_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
336; CHECK-LABEL: fsubr_d_zero:
337; CHECK:       // %bb.0:
338; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
339; CHECK-NEXT:    fsubr z0.d, p0/m, z0.d, z1.d
340; CHECK-NEXT:    ret
341  %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
342  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %pg,
343                                                               <vscale x 2 x double> %a_z,
344                                                               <vscale x 2 x double> %b)
345  ret <vscale x 2 x double> %out
346}
347
348declare <vscale x 8 x half> @llvm.aarch64.sve.fabd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
349declare <vscale x 4 x float> @llvm.aarch64.sve.fabd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
350declare <vscale x 2 x double> @llvm.aarch64.sve.fabd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
351
352declare <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
353declare <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
354declare <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
355
356declare <vscale x 8 x half> @llvm.aarch64.sve.fdiv.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
357declare <vscale x 4 x float> @llvm.aarch64.sve.fdiv.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
358declare <vscale x 2 x double> @llvm.aarch64.sve.fdiv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
359
360declare <vscale x 8 x half> @llvm.aarch64.sve.fdivr.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
361declare <vscale x 4 x float> @llvm.aarch64.sve.fdivr.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
362declare <vscale x 2 x double> @llvm.aarch64.sve.fdivr.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
363
364declare <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
365declare <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
366declare <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
367
368declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
369declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
370declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
371
372declare <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
373declare <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
374declare <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
375
376declare <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
377declare <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
378declare <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
379
380declare <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
381declare <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
382declare <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
383
384declare <vscale x 8 x half> @llvm.aarch64.sve.fmulx.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
385declare <vscale x 4 x float> @llvm.aarch64.sve.fmulx.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
386declare <vscale x 2 x double> @llvm.aarch64.sve.fmulx.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
387
388declare <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
389declare <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
390declare <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
391
392declare <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
393declare <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
394declare <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
395