xref: /llvm-project/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll (revision 61510b51c33464a6bc15e4cf5b1ee07e2e0ec1c9)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64 %s -disable-strictnode-mutation -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
3; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 -disable-strictnode-mutation %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
4
5; Check that constrained fp vector intrinsics are correctly lowered.
6
7; CHECK-GI:       warning: Instruction selection used fallback path for add_v4f32
8; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_v4f32
9; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_v4f32
10; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_v4f32
11; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_v4f32
12; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_v4i32_v4f32
13; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_v4i32_v4f32
14; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_v4i64_v4f32
15; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_v4i64_v4f32
16; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_v4f32_v4i32
17; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_v4f32_v4i32
18; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_v4f32_v4i64
19; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_v4f32_v4i64
20; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_v4f32
21; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_v4f32
22; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_v4f32
23; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_v4f32
24; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_v4f32
25; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_v4f32
26; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_v4f32
27; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_v4f32
28; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for roundeven_v4f32
29; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_v4f32
30; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_v4f32
31; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_v4f32
32; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for add_v2f64
33; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_v2f64
34; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_v2f64
35; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_v2f64
36; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_v2f64
37; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_v2i32_v2f64
38; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_v2i32_v2f64
39; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_v2i64_v2f64
40; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_v2i64_v2f64
41; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_v2f64_v2i32
42; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_v2f64_v2i32
43; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_v2f64_v2i64
44; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_v2f64_v2i64
45; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_v2f64
46; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_v2f64
47; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_v2f64
48; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_v2f64
49; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_v2f64
50; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_v2f64
51; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_v2f64
52; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_v2f64
53; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for roundeven_v2f64
54; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_v2f64
55; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_v2f64
56; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_v2f64
57; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for add_v1f64
58; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_v1f64
59; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_v1f64
60; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_v1f64
61; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_v1f64
62; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_v1i32_v1f64
63; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_v1i32_v1f64
64; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_v1i64_v1f64
65; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_v1i64_v1f64
66; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_v1f64_v1i32
67; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_v1f64_v1i32
68; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_v1f64_v1i64
69; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_v1f64_v1i64
70; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_v1f64
71; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_v1f64
72; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_v1f64
73; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_v1f64
74; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_v1f64
75; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_v1f64
76; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_v1f64
77; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_v1f64
78; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for roundeven_v1f64
79; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_v1f64
80; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_v1f61
81; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_v1f61
82; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptrunc_v2f32_v2f64
83; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fpext_v2f64_v2f32
84
85; Single-precision intrinsics
86
87define <4 x float> @add_v4f32(<4 x float> %x, <4 x float> %y) #0 {
88; CHECK-LABEL: add_v4f32:
89; CHECK:       // %bb.0:
90; CHECK-NEXT:    fadd v0.4s, v0.4s, v1.4s
91; CHECK-NEXT:    ret
92  %val = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
93  ret <4 x float> %val
94}
95
96define <4 x float> @sub_v4f32(<4 x float> %x, <4 x float> %y) #0 {
97; CHECK-LABEL: sub_v4f32:
98; CHECK:       // %bb.0:
99; CHECK-NEXT:    fsub v0.4s, v0.4s, v1.4s
100; CHECK-NEXT:    ret
101  %val = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
102  ret <4 x float> %val
103}
104
105define <4 x float> @mul_v4f32(<4 x float> %x, <4 x float> %y) #0 {
106; CHECK-LABEL: mul_v4f32:
107; CHECK:       // %bb.0:
108; CHECK-NEXT:    fmul v0.4s, v0.4s, v1.4s
109; CHECK-NEXT:    ret
110  %val = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
111  ret <4 x float> %val
112}
113
114define <4 x float> @div_v4f32(<4 x float> %x, <4 x float> %y) #0 {
115; CHECK-LABEL: div_v4f32:
116; CHECK:       // %bb.0:
117; CHECK-NEXT:    fdiv v0.4s, v0.4s, v1.4s
118; CHECK-NEXT:    ret
119  %val = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
120  ret <4 x float> %val
121}
122
123define <4 x float> @fma_v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z) #0 {
124; CHECK-LABEL: fma_v4f32:
125; CHECK:       // %bb.0:
126; CHECK-NEXT:    fmla v2.4s, v1.4s, v0.4s
127; CHECK-NEXT:    mov v0.16b, v2.16b
128; CHECK-NEXT:    ret
129  %val = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
130  ret <4 x float> %val
131}
132
133define <4 x i32> @fptosi_v4i32_v4f32(<4 x float> %x) #0 {
134; CHECK-LABEL: fptosi_v4i32_v4f32:
135; CHECK:       // %bb.0:
136; CHECK-NEXT:    fcvtzs v0.4s, v0.4s
137; CHECK-NEXT:    ret
138  %val = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
139  ret <4 x i32> %val
140}
141
142define <4 x i32> @fptoui_v4i32_v4f32(<4 x float> %x) #0 {
143; CHECK-LABEL: fptoui_v4i32_v4f32:
144; CHECK:       // %bb.0:
145; CHECK-NEXT:    fcvtzu v0.4s, v0.4s
146; CHECK-NEXT:    ret
147  %val = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
148  ret <4 x i32> %val
149}
150
151define <4 x i64> @fptosi_v4i64_v4f32(<4 x float> %x) #0 {
152; CHECK-LABEL: fptosi_v4i64_v4f32:
153; CHECK:       // %bb.0:
154; CHECK-NEXT:    fcvtl2 v1.2d, v0.4s
155; CHECK-NEXT:    fcvtl v0.2d, v0.2s
156; CHECK-NEXT:    fcvtzs v1.2d, v1.2d
157; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
158; CHECK-NEXT:    ret
159  %val = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
160  ret <4 x i64> %val
161}
162
163define <4 x i64> @fptoui_v4i64_v4f32(<4 x float> %x) #0 {
164; CHECK-LABEL: fptoui_v4i64_v4f32:
165; CHECK:       // %bb.0:
166; CHECK-NEXT:    fcvtl2 v1.2d, v0.4s
167; CHECK-NEXT:    fcvtl v0.2d, v0.2s
168; CHECK-NEXT:    fcvtzu v1.2d, v1.2d
169; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
170; CHECK-NEXT:    ret
171  %val = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
172  ret <4 x i64> %val
173}
174
175define <4 x float> @sitofp_v4f32_v4i32(<4 x i32> %x) #0 {
176; CHECK-LABEL: sitofp_v4f32_v4i32:
177; CHECK:       // %bb.0:
178; CHECK-NEXT:    scvtf v0.4s, v0.4s
179; CHECK-NEXT:    ret
180  %val = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
181  ret <4 x float> %val
182}
183
184define <4 x float> @uitofp_v4f32_v4i32(<4 x i32> %x) #0 {
185; CHECK-LABEL: uitofp_v4f32_v4i32:
186; CHECK:       // %bb.0:
187; CHECK-NEXT:    ucvtf v0.4s, v0.4s
188; CHECK-NEXT:    ret
189  %val = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
190  ret <4 x float> %val
191}
192
193define <4 x float> @sitofp_v4f32_v4i64(<4 x i64> %x) #0 {
194; CHECK-LABEL: sitofp_v4f32_v4i64:
195; CHECK:       // %bb.0:
196; CHECK-NEXT:    scvtf v0.2d, v0.2d
197; CHECK-NEXT:    scvtf v1.2d, v1.2d
198; CHECK-NEXT:    fcvtn v0.2s, v0.2d
199; CHECK-NEXT:    fcvtn2 v0.4s, v1.2d
200; CHECK-NEXT:    ret
201  %val = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
202  ret <4 x float> %val
203}
204
205define <4 x float> @uitofp_v4f32_v4i64(<4 x i64> %x) #0 {
206; CHECK-LABEL: uitofp_v4f32_v4i64:
207; CHECK:       // %bb.0:
208; CHECK-NEXT:    ucvtf v0.2d, v0.2d
209; CHECK-NEXT:    ucvtf v1.2d, v1.2d
210; CHECK-NEXT:    fcvtn v0.2s, v0.2d
211; CHECK-NEXT:    fcvtn2 v0.4s, v1.2d
212; CHECK-NEXT:    ret
213  %val = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
214  ret <4 x float> %val
215}
216
217define <4 x float> @sqrt_v4f32(<4 x float> %x) #0 {
218; CHECK-LABEL: sqrt_v4f32:
219; CHECK:       // %bb.0:
220; CHECK-NEXT:    fsqrt v0.4s, v0.4s
221; CHECK-NEXT:    ret
222  %val = call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
223  ret <4 x float> %val
224}
225
226define <4 x float> @rint_v4f32(<4 x float> %x) #0 {
227; CHECK-LABEL: rint_v4f32:
228; CHECK:       // %bb.0:
229; CHECK-NEXT:    frintx v0.4s, v0.4s
230; CHECK-NEXT:    ret
231  %val = call <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
232  ret <4 x float> %val
233}
234
235define <4 x float> @nearbyint_v4f32(<4 x float> %x) #0 {
236; CHECK-LABEL: nearbyint_v4f32:
237; CHECK:       // %bb.0:
238; CHECK-NEXT:    frinti v0.4s, v0.4s
239; CHECK-NEXT:    ret
240  %val = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
241  ret <4 x float> %val
242}
243
244define <4 x float> @maxnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
245; CHECK-LABEL: maxnum_v4f32:
246; CHECK:       // %bb.0:
247; CHECK-NEXT:    fmaxnm v0.4s, v0.4s, v1.4s
248; CHECK-NEXT:    ret
249  %val = call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float> %x, <4 x float> %y, metadata !"fpexcept.strict") #0
250  ret <4 x float> %val
251}
252
253define <4 x float> @minnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
254; CHECK-LABEL: minnum_v4f32:
255; CHECK:       // %bb.0:
256; CHECK-NEXT:    fminnm v0.4s, v0.4s, v1.4s
257; CHECK-NEXT:    ret
258  %val = call <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float> %x, <4 x float> %y, metadata !"fpexcept.strict") #0
259  ret <4 x float> %val
260}
261
262define <4 x float> @ceil_v4f32(<4 x float> %x) #0 {
263; CHECK-LABEL: ceil_v4f32:
264; CHECK:       // %bb.0:
265; CHECK-NEXT:    frintp v0.4s, v0.4s
266; CHECK-NEXT:    ret
267  %val = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
268  ret <4 x float> %val
269}
270
271define <4 x float> @floor_v4f32(<4 x float> %x) #0 {
272; CHECK-LABEL: floor_v4f32:
273; CHECK:       // %bb.0:
274; CHECK-NEXT:    frintm v0.4s, v0.4s
275; CHECK-NEXT:    ret
276  %val = call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
277  ret <4 x float> %val
278}
279
280define <4 x float> @round_v4f32(<4 x float> %x) #0 {
281; CHECK-LABEL: round_v4f32:
282; CHECK:       // %bb.0:
283; CHECK-NEXT:    frinta v0.4s, v0.4s
284; CHECK-NEXT:    ret
285  %val = call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
286  ret <4 x float> %val
287}
288
289define <4 x float> @roundeven_v4f32(<4 x float> %x) #0 {
290; CHECK-LABEL: roundeven_v4f32:
291; CHECK:       // %bb.0:
292; CHECK-NEXT:    frintn v0.4s, v0.4s
293; CHECK-NEXT:    ret
294  %val = call <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
295  ret <4 x float> %val
296}
297
298define <4 x float> @trunc_v4f32(<4 x float> %x) #0 {
299; CHECK-LABEL: trunc_v4f32:
300; CHECK:       // %bb.0:
301; CHECK-NEXT:    frintz v0.4s, v0.4s
302; CHECK-NEXT:    ret
303  %val = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
304  ret <4 x float> %val
305}
306
307define <4 x i1> @fcmp_v4f32(<4 x float> %x, <4 x float> %y) #0 {
308; CHECK-LABEL: fcmp_v4f32:
309; CHECK:       // %bb.0: // %entry
310; CHECK-NEXT:    mov s2, v1.s[1]
311; CHECK-NEXT:    mov s3, v0.s[1]
312; CHECK-NEXT:    fcmp s0, s1
313; CHECK-NEXT:    csetm w8, eq
314; CHECK-NEXT:    fcmp s3, s2
315; CHECK-NEXT:    mov s2, v1.s[2]
316; CHECK-NEXT:    mov s3, v0.s[2]
317; CHECK-NEXT:    fmov s4, w8
318; CHECK-NEXT:    mov s1, v1.s[3]
319; CHECK-NEXT:    mov s0, v0.s[3]
320; CHECK-NEXT:    csetm w8, eq
321; CHECK-NEXT:    mov v4.s[1], w8
322; CHECK-NEXT:    fcmp s3, s2
323; CHECK-NEXT:    csetm w8, eq
324; CHECK-NEXT:    fcmp s0, s1
325; CHECK-NEXT:    mov v4.s[2], w8
326; CHECK-NEXT:    csetm w8, eq
327; CHECK-NEXT:    mov v4.s[3], w8
328; CHECK-NEXT:    xtn v0.4h, v4.4s
329; CHECK-NEXT:    ret
330entry:
331  %val = call <4 x i1> @llvm.experimental.constrained.fcmp.v4f64(<4 x float> %x, <4 x float> %y, metadata !"oeq", metadata !"fpexcept.strict")
332  ret <4 x i1> %val
333}
334
335define <4 x i1> @fcmps_v4f32(<4 x float> %x, <4 x float> %y) #0 {
336; CHECK-LABEL: fcmps_v4f32:
337; CHECK:       // %bb.0: // %entry
338; CHECK-NEXT:    mov s2, v1.s[1]
339; CHECK-NEXT:    mov s3, v0.s[1]
340; CHECK-NEXT:    fcmpe s0, s1
341; CHECK-NEXT:    csetm w8, eq
342; CHECK-NEXT:    fcmpe s3, s2
343; CHECK-NEXT:    mov s2, v1.s[2]
344; CHECK-NEXT:    mov s3, v0.s[2]
345; CHECK-NEXT:    fmov s4, w8
346; CHECK-NEXT:    mov s1, v1.s[3]
347; CHECK-NEXT:    mov s0, v0.s[3]
348; CHECK-NEXT:    csetm w8, eq
349; CHECK-NEXT:    mov v4.s[1], w8
350; CHECK-NEXT:    fcmpe s3, s2
351; CHECK-NEXT:    csetm w8, eq
352; CHECK-NEXT:    fcmpe s0, s1
353; CHECK-NEXT:    mov v4.s[2], w8
354; CHECK-NEXT:    csetm w8, eq
355; CHECK-NEXT:    mov v4.s[3], w8
356; CHECK-NEXT:    xtn v0.4h, v4.4s
357; CHECK-NEXT:    ret
358entry:
359  %val = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %x, <4 x float> %y, metadata !"oeq", metadata !"fpexcept.strict")
360  ret <4 x i1> %val
361}
362
363
364; Double-precision intrinsics
365
366define <2 x double> @add_v2f64(<2 x double> %x, <2 x double> %y) #0 {
367; CHECK-LABEL: add_v2f64:
368; CHECK:       // %bb.0:
369; CHECK-NEXT:    fadd v0.2d, v0.2d, v1.2d
370; CHECK-NEXT:    ret
371  %val = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
372  ret <2 x double> %val
373}
374
375define <2 x double> @sub_v2f64(<2 x double> %x, <2 x double> %y) #0 {
376; CHECK-LABEL: sub_v2f64:
377; CHECK:       // %bb.0:
378; CHECK-NEXT:    fsub v0.2d, v0.2d, v1.2d
379; CHECK-NEXT:    ret
380  %val = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
381  ret <2 x double> %val
382}
383
384define <2 x double> @mul_v2f64(<2 x double> %x, <2 x double> %y) #0 {
385; CHECK-LABEL: mul_v2f64:
386; CHECK:       // %bb.0:
387; CHECK-NEXT:    fmul v0.2d, v0.2d, v1.2d
388; CHECK-NEXT:    ret
389  %val = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
390  ret <2 x double> %val
391}
392
393define <2 x double> @div_v2f64(<2 x double> %x, <2 x double> %y) #0 {
394; CHECK-LABEL: div_v2f64:
395; CHECK:       // %bb.0:
396; CHECK-NEXT:    fdiv v0.2d, v0.2d, v1.2d
397; CHECK-NEXT:    ret
398  %val = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
399  ret <2 x double> %val
400}
401
402define <2 x double> @fma_v2f64(<2 x double> %x, <2 x double> %y, <2 x double> %z) #0 {
403; CHECK-LABEL: fma_v2f64:
404; CHECK:       // %bb.0:
405; CHECK-NEXT:    fmla v2.2d, v1.2d, v0.2d
406; CHECK-NEXT:    mov v0.16b, v2.16b
407; CHECK-NEXT:    ret
408  %val = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %x, <2 x double> %y, <2 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
409  ret <2 x double> %val
410}
411
412define <2 x i32> @fptosi_v2i32_v2f64(<2 x double> %x) #0 {
413; CHECK-LABEL: fptosi_v2i32_v2f64:
414; CHECK:       // %bb.0:
415; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
416; CHECK-NEXT:    xtn v0.2s, v0.2d
417; CHECK-NEXT:    ret
418  %val = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
419  ret <2 x i32> %val
420}
421
422define <2 x i32> @fptoui_v2i32_v2f64(<2 x double> %x) #0 {
423; CHECK-LABEL: fptoui_v2i32_v2f64:
424; CHECK:       // %bb.0:
425; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
426; CHECK-NEXT:    xtn v0.2s, v0.2d
427; CHECK-NEXT:    ret
428  %val = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
429  ret <2 x i32> %val
430}
431
432define <2 x i64> @fptosi_v2i64_v2f64(<2 x double> %x) #0 {
433; CHECK-LABEL: fptosi_v2i64_v2f64:
434; CHECK:       // %bb.0:
435; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
436; CHECK-NEXT:    ret
437  %val = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
438  ret <2 x i64> %val
439}
440
441define <2 x i64> @fptoui_v2i64_v2f64(<2 x double> %x) #0 {
442; CHECK-LABEL: fptoui_v2i64_v2f64:
443; CHECK:       // %bb.0:
444; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
445; CHECK-NEXT:    ret
446  %val = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
447  ret <2 x i64> %val
448}
449
450define <2 x double> @sitofp_v2f64_v2i32(<2 x i32> %x) #0 {
451; CHECK-LABEL: sitofp_v2f64_v2i32:
452; CHECK:       // %bb.0:
453; CHECK-NEXT:    sshll v0.2d, v0.2s, #0
454; CHECK-NEXT:    scvtf v0.2d, v0.2d
455; CHECK-NEXT:    ret
456  %val = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
457  ret <2 x double> %val
458}
459
460define <2 x double> @uitofp_v2f64_v2i32(<2 x i32> %x) #0 {
461; CHECK-LABEL: uitofp_v2f64_v2i32:
462; CHECK:       // %bb.0:
463; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
464; CHECK-NEXT:    ucvtf v0.2d, v0.2d
465; CHECK-NEXT:    ret
466  %val = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
467  ret <2 x double> %val
468}
469
470define <2 x double> @sitofp_v2f64_v2i64(<2 x i64> %x) #0 {
471; CHECK-LABEL: sitofp_v2f64_v2i64:
472; CHECK:       // %bb.0:
473; CHECK-NEXT:    scvtf v0.2d, v0.2d
474; CHECK-NEXT:    ret
475  %val = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
476  ret <2 x double> %val
477}
478
479define <2 x double> @uitofp_v2f64_v2i64(<2 x i64> %x) #0 {
480; CHECK-LABEL: uitofp_v2f64_v2i64:
481; CHECK:       // %bb.0:
482; CHECK-NEXT:    ucvtf v0.2d, v0.2d
483; CHECK-NEXT:    ret
484  %val = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
485  ret <2 x double> %val
486}
487
488define <2 x double> @sqrt_v2f64(<2 x double> %x) #0 {
489; CHECK-LABEL: sqrt_v2f64:
490; CHECK:       // %bb.0:
491; CHECK-NEXT:    fsqrt v0.2d, v0.2d
492; CHECK-NEXT:    ret
493  %val = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
494  ret <2 x double> %val
495}
496
497define <2 x double> @rint_v2f64(<2 x double> %x) #0 {
498; CHECK-LABEL: rint_v2f64:
499; CHECK:       // %bb.0:
500; CHECK-NEXT:    frintx v0.2d, v0.2d
501; CHECK-NEXT:    ret
502  %val = call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
503  ret <2 x double> %val
504}
505
506define <2 x double> @nearbyint_v2f64(<2 x double> %x) #0 {
507; CHECK-LABEL: nearbyint_v2f64:
508; CHECK:       // %bb.0:
509; CHECK-NEXT:    frinti v0.2d, v0.2d
510; CHECK-NEXT:    ret
511  %val = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
512  ret <2 x double> %val
513}
514
515define <2 x double> @maxnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
516; CHECK-LABEL: maxnum_v2f64:
517; CHECK:       // %bb.0:
518; CHECK-NEXT:    fmaxnm v0.2d, v0.2d, v1.2d
519; CHECK-NEXT:    ret
520  %val = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double> %x, <2 x double> %y, metadata !"fpexcept.strict") #0
521  ret <2 x double> %val
522}
523
524define <2 x double> @minnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
525; CHECK-LABEL: minnum_v2f64:
526; CHECK:       // %bb.0:
527; CHECK-NEXT:    fminnm v0.2d, v0.2d, v1.2d
528; CHECK-NEXT:    ret
529  %val = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double> %x, <2 x double> %y, metadata !"fpexcept.strict") #0
530  ret <2 x double> %val
531}
532
533define <2 x double> @ceil_v2f64(<2 x double> %x) #0 {
534; CHECK-LABEL: ceil_v2f64:
535; CHECK:       // %bb.0:
536; CHECK-NEXT:    frintp v0.2d, v0.2d
537; CHECK-NEXT:    ret
538  %val = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
539  ret <2 x double> %val
540}
541
542define <2 x double> @floor_v2f64(<2 x double> %x) #0 {
543; CHECK-LABEL: floor_v2f64:
544; CHECK:       // %bb.0:
545; CHECK-NEXT:    frintm v0.2d, v0.2d
546; CHECK-NEXT:    ret
547  %val = call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
548  ret <2 x double> %val
549}
550
551define <2 x double> @round_v2f64(<2 x double> %x) #0 {
552; CHECK-LABEL: round_v2f64:
553; CHECK:       // %bb.0:
554; CHECK-NEXT:    frinta v0.2d, v0.2d
555; CHECK-NEXT:    ret
556  %val = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
557  ret <2 x double> %val
558}
559
560define <2 x double> @roundeven_v2f64(<2 x double> %x) #0 {
561; CHECK-LABEL: roundeven_v2f64:
562; CHECK:       // %bb.0:
563; CHECK-NEXT:    frintn v0.2d, v0.2d
564; CHECK-NEXT:    ret
565  %val = call <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
566  ret <2 x double> %val
567}
568
569define <2 x double> @trunc_v2f64(<2 x double> %x) #0 {
570; CHECK-LABEL: trunc_v2f64:
571; CHECK:       // %bb.0:
572; CHECK-NEXT:    frintz v0.2d, v0.2d
573; CHECK-NEXT:    ret
574  %val = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
575  ret <2 x double> %val
576}
577
578define <2 x i1> @fcmp_v2f64(<2 x double> %x, <2 x double> %y) #0 {
579; CHECK-LABEL: fcmp_v2f64:
580; CHECK:       // %bb.0: // %entry
581; CHECK-NEXT:    mov d2, v1.d[1]
582; CHECK-NEXT:    mov d3, v0.d[1]
583; CHECK-NEXT:    fcmp d0, d1
584; CHECK-NEXT:    csetm x8, eq
585; CHECK-NEXT:    fcmp d3, d2
586; CHECK-NEXT:    fmov d0, x8
587; CHECK-NEXT:    csetm x8, eq
588; CHECK-NEXT:    mov v0.d[1], x8
589; CHECK-NEXT:    xtn v0.2s, v0.2d
590; CHECK-NEXT:    ret
591entry:
592  %val = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double> %x, <2 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
593  ret <2 x i1> %val
594}
595
596define <2 x i1> @fcmps_v2f64(<2 x double> %x, <2 x double> %y) #0 {
597; CHECK-LABEL: fcmps_v2f64:
598; CHECK:       // %bb.0: // %entry
599; CHECK-NEXT:    mov d2, v1.d[1]
600; CHECK-NEXT:    mov d3, v0.d[1]
601; CHECK-NEXT:    fcmpe d0, d1
602; CHECK-NEXT:    csetm x8, eq
603; CHECK-NEXT:    fcmpe d3, d2
604; CHECK-NEXT:    fmov d0, x8
605; CHECK-NEXT:    csetm x8, eq
606; CHECK-NEXT:    mov v0.d[1], x8
607; CHECK-NEXT:    xtn v0.2s, v0.2d
608; CHECK-NEXT:    ret
609entry:
610  %val = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %x, <2 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
611  ret <2 x i1> %val
612}
613
614
615; Double-precision single element intrinsics
616
617define <1 x double> @add_v1f64(<1 x double> %x, <1 x double> %y) #0 {
618; CHECK-LABEL: add_v1f64:
619; CHECK:       // %bb.0:
620; CHECK-NEXT:    fadd d0, d0, d1
621; CHECK-NEXT:    ret
622  %val = call <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
623  ret <1 x double> %val
624}
625
626define <1 x double> @sub_v1f64(<1 x double> %x, <1 x double> %y) #0 {
627; CHECK-LABEL: sub_v1f64:
628; CHECK:       // %bb.0:
629; CHECK-NEXT:    fsub d0, d0, d1
630; CHECK-NEXT:    ret
631  %val = call <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
632  ret <1 x double> %val
633}
634
635define <1 x double> @mul_v1f64(<1 x double> %x, <1 x double> %y) #0 {
636; CHECK-LABEL: mul_v1f64:
637; CHECK:       // %bb.0:
638; CHECK-NEXT:    fmul d0, d0, d1
639; CHECK-NEXT:    ret
640  %val = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
641  ret <1 x double> %val
642}
643
644define <1 x double> @div_v1f64(<1 x double> %x, <1 x double> %y) #0 {
645; CHECK-LABEL: div_v1f64:
646; CHECK:       // %bb.0:
647; CHECK-NEXT:    fdiv d0, d0, d1
648; CHECK-NEXT:    ret
649  %val = call <1 x double> @llvm.experimental.constrained.fdiv.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
650  ret <1 x double> %val
651}
652
653define <1 x double> @fma_v1f64(<1 x double> %x, <1 x double> %y, <1 x double> %z) #0 {
654; CHECK-LABEL: fma_v1f64:
655; CHECK:       // %bb.0:
656; CHECK-NEXT:    fmadd d0, d0, d1, d2
657; CHECK-NEXT:    ret
658  %val = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double> %x, <1 x double> %y, <1 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
659  ret <1 x double> %val
660}
661
662define <1 x i32> @fptosi_v1i32_v1f64(<1 x double> %x) #0 {
663; CHECK-LABEL: fptosi_v1i32_v1f64:
664; CHECK:       // %bb.0:
665; CHECK-NEXT:    fcvtzs w8, d0
666; CHECK-NEXT:    fmov s0, w8
667; CHECK-NEXT:    ret
668  %val = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
669  ret <1 x i32> %val
670}
671
672define <1 x i32> @fptoui_v1i32_v1f64(<1 x double> %x) #0 {
673; CHECK-LABEL: fptoui_v1i32_v1f64:
674; CHECK:       // %bb.0:
675; CHECK-NEXT:    fcvtzu w8, d0
676; CHECK-NEXT:    fmov s0, w8
677; CHECK-NEXT:    ret
678  %val = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
679  ret <1 x i32> %val
680}
681
682define <1 x i64> @fptosi_v1i64_v1f64(<1 x double> %x) #0 {
683; CHECK-LABEL: fptosi_v1i64_v1f64:
684; CHECK:       // %bb.0:
685; CHECK-NEXT:    fcvtzs x8, d0
686; CHECK-NEXT:    fmov d0, x8
687; CHECK-NEXT:    ret
688  %val = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
689  ret <1 x i64> %val
690}
691
692define <1 x i64> @fptoui_v1i64_v1f64(<1 x double> %x) #0 {
693; CHECK-LABEL: fptoui_v1i64_v1f64:
694; CHECK:       // %bb.0:
695; CHECK-NEXT:    fcvtzu x8, d0
696; CHECK-NEXT:    fmov d0, x8
697; CHECK-NEXT:    ret
698  %val = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
699  ret <1 x i64> %val
700}
701
702define <1 x double> @sitofp_v1f64_v1i32(<1 x i32> %x) #0 {
703; CHECK-LABEL: sitofp_v1f64_v1i32:
704; CHECK:       // %bb.0:
705; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
706; CHECK-NEXT:    fmov w8, s0
707; CHECK-NEXT:    scvtf d0, w8
708; CHECK-NEXT:    ret
709  %val = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
710  ret <1 x double> %val
711}
712
713define <1 x double> @uitofp_v1f64_v1i32(<1 x i32> %x) #0 {
714; CHECK-LABEL: uitofp_v1f64_v1i32:
715; CHECK:       // %bb.0:
716; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
717; CHECK-NEXT:    fmov w8, s0
718; CHECK-NEXT:    ucvtf d0, w8
719; CHECK-NEXT:    ret
720  %val = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
721  ret <1 x double> %val
722}
723
724define <1 x double> @sitofp_v1f64_v1i64(<1 x i64> %x) #0 {
725; CHECK-LABEL: sitofp_v1f64_v1i64:
726; CHECK:       // %bb.0:
727; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
728; CHECK-NEXT:    fmov x8, d0
729; CHECK-NEXT:    scvtf d0, x8
730; CHECK-NEXT:    ret
731  %val = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
732  ret <1 x double> %val
733}
734
735define <1 x double> @uitofp_v1f64_v1i64(<1 x i64> %x) #0 {
736; CHECK-LABEL: uitofp_v1f64_v1i64:
737; CHECK:       // %bb.0:
738; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
739; CHECK-NEXT:    fmov x8, d0
740; CHECK-NEXT:    ucvtf d0, x8
741; CHECK-NEXT:    ret
742  %val = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
743  ret <1 x double> %val
744}
745
746define <1 x double> @sqrt_v1f64(<1 x double> %x) #0 {
747; CHECK-LABEL: sqrt_v1f64:
748; CHECK:       // %bb.0:
749; CHECK-NEXT:    fsqrt d0, d0
750; CHECK-NEXT:    ret
751  %val = call <1 x double> @llvm.experimental.constrained.sqrt.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
752  ret <1 x double> %val
753}
754
755define <1 x double> @rint_v1f64(<1 x double> %x) #0 {
756; CHECK-LABEL: rint_v1f64:
757; CHECK:       // %bb.0:
758; CHECK-NEXT:    frintx d0, d0
759; CHECK-NEXT:    ret
760  %val = call <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
761  ret <1 x double> %val
762}
763
764define <1 x double> @nearbyint_v1f64(<1 x double> %x) #0 {
765; CHECK-LABEL: nearbyint_v1f64:
766; CHECK:       // %bb.0:
767; CHECK-NEXT:    frinti d0, d0
768; CHECK-NEXT:    ret
769  %val = call <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
770  ret <1 x double> %val
771}
772
773define <1 x double> @maxnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
774; CHECK-LABEL: maxnum_v1f64:
775; CHECK:       // %bb.0:
776; CHECK-NEXT:    fmaxnm d0, d0, d1
777; CHECK-NEXT:    ret
778  %val = call <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x double> %x, <1 x double> %y, metadata !"fpexcept.strict") #0
779  ret <1 x double> %val
780}
781
782define <1 x double> @minnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
783; CHECK-LABEL: minnum_v1f64:
784; CHECK:       // %bb.0:
785; CHECK-NEXT:    fminnm d0, d0, d1
786; CHECK-NEXT:    ret
787  %val = call <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x double> %x, <1 x double> %y, metadata !"fpexcept.strict") #0
788  ret <1 x double> %val
789}
790
791define <1 x double> @ceil_v1f64(<1 x double> %x) #0 {
792; CHECK-LABEL: ceil_v1f64:
793; CHECK:       // %bb.0:
794; CHECK-NEXT:    frintp d0, d0
795; CHECK-NEXT:    ret
796  %val = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
797  ret <1 x double> %val
798}
799
800define <1 x double> @floor_v1f64(<1 x double> %x) #0 {
801; CHECK-LABEL: floor_v1f64:
802; CHECK:       // %bb.0:
803; CHECK-NEXT:    frintm d0, d0
804; CHECK-NEXT:    ret
805  %val = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
806  ret <1 x double> %val
807}
808
809define <1 x double> @round_v1f64(<1 x double> %x) #0 {
810; CHECK-LABEL: round_v1f64:
811; CHECK:       // %bb.0:
812; CHECK-NEXT:    frinta d0, d0
813; CHECK-NEXT:    ret
814  %val = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
815  ret <1 x double> %val
816}
817
818define <1 x double> @roundeven_v1f64(<1 x double> %x) #0 {
819; CHECK-LABEL: roundeven_v1f64:
820; CHECK:       // %bb.0:
821; CHECK-NEXT:    frintn d0, d0
822; CHECK-NEXT:    ret
823  %val = call <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
824  ret <1 x double> %val
825}
826
827define <1 x double> @trunc_v1f64(<1 x double> %x) #0 {
828; CHECK-LABEL: trunc_v1f64:
829; CHECK:       // %bb.0:
830; CHECK-NEXT:    frintz d0, d0
831; CHECK-NEXT:    ret
832  %val = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
833  ret <1 x double> %val
834}
835
836define <1 x i1> @fcmp_v1f61(<1 x double> %x, <1 x double> %y) #0 {
837; CHECK-LABEL: fcmp_v1f61:
838; CHECK:       // %bb.0: // %entry
839; CHECK-NEXT:    fcmp d0, d1
840; CHECK-NEXT:    cset w0, eq
841; CHECK-NEXT:    ret
842entry:
843  %val = call <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double> %x, <1 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
844  ret <1 x i1> %val
845}
846
847define <1 x i1> @fcmps_v1f61(<1 x double> %x, <1 x double> %y) #0 {
848; CHECK-LABEL: fcmps_v1f61:
849; CHECK:       // %bb.0: // %entry
850; CHECK-NEXT:    fcmpe d0, d1
851; CHECK-NEXT:    cset w0, eq
852; CHECK-NEXT:    ret
853entry:
854  %val = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> %x, <1 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
855  ret <1 x i1> %val
856}
857
858
859; Intrinsics to convert between floating-point types
860
861define <2 x float> @fptrunc_v2f32_v2f64(<2 x double> %x) #0 {
862; CHECK-LABEL: fptrunc_v2f32_v2f64:
863; CHECK:       // %bb.0:
864; CHECK-NEXT:    fcvtn v0.2s, v0.2d
865; CHECK-NEXT:    ret
866  %val = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
867  ret <2 x float> %val
868}
869
870define <2 x double> @fpext_v2f64_v2f32(<2 x float> %x) #0 {
871; CHECK-LABEL: fpext_v2f64_v2f32:
872; CHECK:       // %bb.0:
873; CHECK-NEXT:    fcvtl v0.2d, v0.2s
874; CHECK-NEXT:    ret
875  %val = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float> %x, metadata !"fpexcept.strict") #0
876  ret <2 x double> %val
877}
878
879
880attributes #0 = { strictfp }
881
882declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata)
883declare <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float>, <4 x float>, metadata, metadata)
884declare <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float>, <4 x float>, metadata, metadata)
885declare <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float>, <4 x float>, metadata, metadata)
886declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
887declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata)
888declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>, metadata)
889declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float>, metadata)
890declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float>, metadata)
891declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
892declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
893declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
894declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
895declare <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float>, metadata, metadata)
896declare <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float>, metadata, metadata)
897declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata)
898declare <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float>, <4 x float>, metadata)
899declare <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float>, <4 x float>, metadata)
900declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata)
901declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
902declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
903declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata)
904declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
905declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float>, <4 x float>, metadata, metadata)
906declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float>, <4 x float>, metadata, metadata)
907
908declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
909declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata)
910declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata)
911declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata)
912declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
913declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double>, metadata)
914declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double>, metadata)
915declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata)
916declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata)
917declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
918declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
919declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
920declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
921declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata)
922declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata)
923declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
924declare <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double>, <2 x double>, metadata)
925declare <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double>, <2 x double>, metadata)
926declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
927declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
928declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
929declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata)
930declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
931declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double>, <2 x double>, metadata, metadata)
932declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double>, <2 x double>, metadata, metadata)
933
934declare <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double>, <1 x double>, metadata, metadata)
935declare <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double>, <1 x double>, metadata, metadata)
936declare <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double>, <1 x double>, metadata, metadata)
937declare <1 x double> @llvm.experimental.constrained.fdiv.v1f64(<1 x double>, <1 x double>, metadata, metadata)
938declare <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double>, <1 x double>, <1 x double>, metadata, metadata)
939declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double>, metadata)
940declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double>, metadata)
941declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double>, metadata)
942declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double>, metadata)
943declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
944declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
945declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
946declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
947declare <1 x double> @llvm.experimental.constrained.sqrt.v1f64(<1 x double>, metadata, metadata)
948declare <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x double>, metadata, metadata)
949declare <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x double>, metadata, metadata)
950declare <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x double>, <1 x double>, metadata)
951declare <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x double>, <1 x double>, metadata)
952declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, metadata)
953declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata)
954declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata)
955declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata)
956declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata)
957declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double>, <1 x double>, metadata, metadata)
958declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double>, <1 x double>, metadata, metadata)
959
960declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata)
961declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata)
962
963;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
964; CHECK-GI: {{.*}}
965; CHECK-SD: {{.*}}
966