xref: /llvm-project/llvm/test/CodeGen/AArch64/fp-intrinsics.ll (revision 28064bfad12cfce959d74fa6d099312e19703f26)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
3; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
4
5; Check that constrained fp intrinsics are correctly lowered.
6
7; CHECK-GI:       warning: Instruction selection used fallback path for add_f32
8; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_f32
9; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_f32
10; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_f32
11; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for frem_f32
12; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_f32
13; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i32_f32
14; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i32_f32
15; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i64_f32
16; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i64_f32
17; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f32_i32
18; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f32_i32
19; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f32_i64
20; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f32_i64
21; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f32_i128
22; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f32_i128
23; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_f32
24; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for powi_f32
25; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sin_f32
26; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cos_f32
27; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tan_f32
28; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for asin_f32
29; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for acos_f32
30; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan_f32
31; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan2_f32
32; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sinh_f32
33; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cosh_f32
34; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tanh_f32
35; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pow_f32
36; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log_f32
37; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log10_f32
38; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log2_f32
39; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp_f32
40; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp2_f32
41; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_f32
42; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_f32
43; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_f32
44; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llrint_f32
45; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_f32
46; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_f32
47; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maximum_f32
48; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minimum_f32
49; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_f32
50; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_f32
51; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lround_f32
52; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llround_f32
53; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_f32
54; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for roundeven_f32
55; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_f32
56; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_olt_f32
57; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ole_f32
58; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ogt_f32
59; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oge_f32
60; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oeq_f32
61; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_one_f32
62; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ult_f32
63; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ule_f32
64; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ugt_f32
65; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_uge_f32
66; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ueq_f32
67; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_une_f32
68; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_olt_f32
69; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ole_f32
70; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ogt_f32
71; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oge_f32
72; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oeq_f32
73; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_one_f32
74; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ult_f32
75; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ule_f32
76; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ugt_f32
77; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_uge_f32
78; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ueq_f32
79; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_une_f32
80; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for add_f64
81; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_f64
82; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_f64
83; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_f64
84; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for frem_f64
85; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_f64
86; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i32_f64
87; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i32_f64
88; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i64_f64
89; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i64_f64
90; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f64_i32
91; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f64_i32
92; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f64_i64
93; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f64_i64
94; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f64_i128
95; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f64_i128
96; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_f64
97; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for powi_f64
98; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sin_f64
99; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cos_f64
100; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tan_f64
101; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for asin_f64
102; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for acos_f64
103; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan_f64
104; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan2_f64
105; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sinh_f64
106; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cosh_f64
107; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tanh_f64
108; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pow_f64
109; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log_f64
110; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log10_f64
111; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log2_f64
112; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp_f64
113; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp2_f64
114; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_f64
115; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_f64
116; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_f64
117; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llrint_f64
118; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_f64
119; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_f64
120; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maximum_f64
121; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minimum_f64
122; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_f64
123; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_f64
124; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lround_f64
125; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llround_f64
126; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_f64
127; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for roundeven_f64
128; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_f64
129; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_olt_f64
130; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ole_f64
131; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ogt_f64
132; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oge_f64
133; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oeq_f64
134; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_one_f64
135; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ult_f64
136; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ule_f64
137; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ugt_f64
138; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_uge_f64
139; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ueq_f64
140; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_une_f64
141; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_olt_f64
142; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ole_f64
143; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ogt_f64
144; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oge_f64
145; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oeq_f64
146; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_one_f64
147; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ult_f64
148; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ule_f64
149; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ugt_f64
150; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_uge_f64
151; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ueq_f64
152; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_une_f64
153; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for add_f128
154; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sub_f128
155; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for mul_f128
156; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for div_f128
157; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for frem_f128
158; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fma_f128
159; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i32_f128
160; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i32_f128
161; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptosi_i64_f128
162; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptoui_i64_f128
163; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f128_i32
164; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f128_i32
165; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f128_i64
166; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f128_i64
167; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sitofp_f128_i128
168; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uitofp_f128_i128
169; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqrt_f128
170; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for powi_f128
171; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sin_f128
172; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cos_f128
173; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tan_f128
174; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for asin_f128
175; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for acos_f128
176; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan_f128
177; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan2_f128
178; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sinh_f128
179; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cosh_f128
180; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tanh_f128
181; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pow_f128
182; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log_f128
183; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log10_f128
184; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log2_f128
185; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp_f128
186; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp2_f128
187; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for rint_f128
188; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for nearbyint_f128
189; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lrint_f128
190; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llrint_f128
191; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for maxnum_f128
192; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for minnum_f128
193; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ceil_f128
194; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for floor_f128
195; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for lround_f128
196; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for llround_f128
197; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for round_f128
198; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for trunc_f128
199; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_olt_f128
200; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ole_f128
201; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ogt_f128
202; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oge_f128
203; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_oeq_f128
204; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_one_f128
205; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ult_f128
206; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ule_f128
207; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ugt_f128
208; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_uge_f128
209; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_ueq_f128
210; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmp_une_f128
211; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_olt_f128
212; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ole_f128
213; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ogt_f128
214; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oge_f128
215; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_oeq_f128
216; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_one_f128
217; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ult_f128
218; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ule_f128
219; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ugt_f128
220; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_uge_f128
221; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_ueq_f128
222; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fcmps_une_f128
223; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptrunc_f32_f64
224; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptrunc_f32_f128
225; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fptrunc_f64_f128
226; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fpext_f64_f32
227; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fpext_f128_f32
228; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fpext_f128_f64
229; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sin_v1f64
230; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cos_v1f64
231; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tan_v1f64
232; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for asin_v1f64
233; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for acos_v1f64
234; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan_v1f64
235; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for atan2_v1f64
236; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sinh_v1f64
237; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for cosh_v1f64
238; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for tanh_v1f64
239; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pow_v1f64
240; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log_v1f64
241; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log2_v1f64
242; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for log10_v1f64
243; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp_v1f64
244; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for exp2_v1f64
245
246
247; Single-precision intrinsics
248
249define float @add_f32(float %x, float %y) #0 {
250; CHECK-LABEL: add_f32:
251; CHECK:       // %bb.0:
252; CHECK-NEXT:    fadd s0, s0, s1
253; CHECK-NEXT:    ret
254  %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
255  ret float %val
256}
257
258define float @sub_f32(float %x, float %y) #0 {
259; CHECK-LABEL: sub_f32:
260; CHECK:       // %bb.0:
261; CHECK-NEXT:    fsub s0, s0, s1
262; CHECK-NEXT:    ret
263  %val = call float @llvm.experimental.constrained.fsub.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
264  ret float %val
265}
266
267define float @mul_f32(float %x, float %y) #0 {
268; CHECK-LABEL: mul_f32:
269; CHECK:       // %bb.0:
270; CHECK-NEXT:    fmul s0, s0, s1
271; CHECK-NEXT:    ret
272  %val = call float @llvm.experimental.constrained.fmul.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
273  ret float %val
274}
275
276define float @div_f32(float %x, float %y) #0 {
277; CHECK-LABEL: div_f32:
278; CHECK:       // %bb.0:
279; CHECK-NEXT:    fdiv s0, s0, s1
280; CHECK-NEXT:    ret
281  %val = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
282  ret float %val
283}
284
285define float @frem_f32(float %x, float %y) #0 {
286; CHECK-LABEL: frem_f32:
287; CHECK:       // %bb.0:
288; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
289; CHECK-NEXT:    .cfi_def_cfa_offset 16
290; CHECK-NEXT:    .cfi_offset w30, -16
291; CHECK-NEXT:    bl fmodf
292; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
293; CHECK-NEXT:    ret
294  %val = call float @llvm.experimental.constrained.frem.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
295  ret float %val
296}
297
298define float @fma_f32(float %x, float %y, float %z) #0 {
299; CHECK-LABEL: fma_f32:
300; CHECK:       // %bb.0:
301; CHECK-NEXT:    fmadd s0, s0, s1, s2
302; CHECK-NEXT:    ret
303  %val = call float @llvm.experimental.constrained.fma.f32(float %x, float %y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
304  ret float %val
305}
306
307define i32 @fptosi_i32_f32(float %x) #0 {
308; CHECK-LABEL: fptosi_i32_f32:
309; CHECK:       // %bb.0:
310; CHECK-NEXT:    fcvtzs w0, s0
311; CHECK-NEXT:    ret
312  %val = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %x, metadata !"fpexcept.strict") #0
313  ret i32 %val
314}
315
316define i32 @fptoui_i32_f32(float %x) #0 {
317; CHECK-LABEL: fptoui_i32_f32:
318; CHECK:       // %bb.0:
319; CHECK-NEXT:    fcvtzu w0, s0
320; CHECK-NEXT:    ret
321  %val = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") #0
322  ret i32 %val
323}
324
325define i64 @fptosi_i64_f32(float %x) #0 {
326; CHECK-LABEL: fptosi_i64_f32:
327; CHECK:       // %bb.0:
328; CHECK-NEXT:    fcvtzs x0, s0
329; CHECK-NEXT:    ret
330  %val = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %x, metadata !"fpexcept.strict") #0
331  ret i64 %val
332}
333
334define i64 @fptoui_i64_f32(float %x) #0 {
335; CHECK-LABEL: fptoui_i64_f32:
336; CHECK:       // %bb.0:
337; CHECK-NEXT:    fcvtzu x0, s0
338; CHECK-NEXT:    ret
339  %val = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %x, metadata !"fpexcept.strict") #0
340  ret i64 %val
341}
342
343define float @sitofp_f32_i32(i32 %x) #0 {
344; CHECK-LABEL: sitofp_f32_i32:
345; CHECK:       // %bb.0:
346; CHECK-NEXT:    scvtf s0, w0
347; CHECK-NEXT:    ret
348  %val = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
349  ret float %val
350}
351
352define float @uitofp_f32_i32(i32 %x) #0 {
353; CHECK-LABEL: uitofp_f32_i32:
354; CHECK:       // %bb.0:
355; CHECK-NEXT:    ucvtf s0, w0
356; CHECK-NEXT:    ret
357  %val = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
358  ret float %val
359}
360
361define float @sitofp_f32_i64(i64 %x) #0 {
362; CHECK-LABEL: sitofp_f32_i64:
363; CHECK:       // %bb.0:
364; CHECK-NEXT:    scvtf s0, x0
365; CHECK-NEXT:    ret
366  %val = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
367  ret float %val
368}
369
370define float @uitofp_f32_i64(i64 %x) #0 {
371; CHECK-LABEL: uitofp_f32_i64:
372; CHECK:       // %bb.0:
373; CHECK-NEXT:    ucvtf s0, x0
374; CHECK-NEXT:    ret
375  %val = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
376  ret float %val
377}
378
379define float @sitofp_f32_i128(i128 %x) #0 {
380; CHECK-LABEL: sitofp_f32_i128:
381; CHECK:       // %bb.0:
382; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
383; CHECK-NEXT:    .cfi_def_cfa_offset 16
384; CHECK-NEXT:    .cfi_offset w30, -16
385; CHECK-NEXT:    bl __floattisf
386; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
387; CHECK-NEXT:    ret
388  %val = call float @llvm.experimental.constrained.sitofp.f32.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
389  ret float %val
390}
391
392define float @uitofp_f32_i128(i128 %x) #0 {
393; CHECK-LABEL: uitofp_f32_i128:
394; CHECK:       // %bb.0:
395; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
396; CHECK-NEXT:    .cfi_def_cfa_offset 16
397; CHECK-NEXT:    .cfi_offset w30, -16
398; CHECK-NEXT:    bl __floatuntisf
399; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
400; CHECK-NEXT:    ret
401  %val = call float @llvm.experimental.constrained.uitofp.f32.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
402  ret float %val
403}
404
405define float @sqrt_f32(float %x) #0 {
406; CHECK-LABEL: sqrt_f32:
407; CHECK:       // %bb.0:
408; CHECK-NEXT:    fsqrt s0, s0
409; CHECK-NEXT:    ret
410  %val = call float @llvm.experimental.constrained.sqrt.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
411  ret float %val
412}
413
414define float @powi_f32(float %x, i32 %y) #0 {
415; CHECK-LABEL: powi_f32:
416; CHECK:       // %bb.0:
417; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
418; CHECK-NEXT:    .cfi_def_cfa_offset 16
419; CHECK-NEXT:    .cfi_offset w30, -16
420; CHECK-NEXT:    bl __powisf2
421; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
422; CHECK-NEXT:    ret
423  %val = call float @llvm.experimental.constrained.powi.f32(float %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
424  ret float %val
425}
426
427define float @sin_f32(float %x) #0 {
428; CHECK-LABEL: sin_f32:
429; CHECK:       // %bb.0:
430; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
431; CHECK-NEXT:    .cfi_def_cfa_offset 16
432; CHECK-NEXT:    .cfi_offset w30, -16
433; CHECK-NEXT:    bl sinf
434; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
435; CHECK-NEXT:    ret
436  %val = call float @llvm.experimental.constrained.sin.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
437  ret float %val
438}
439
440define float @cos_f32(float %x) #0 {
441; CHECK-LABEL: cos_f32:
442; CHECK:       // %bb.0:
443; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
444; CHECK-NEXT:    .cfi_def_cfa_offset 16
445; CHECK-NEXT:    .cfi_offset w30, -16
446; CHECK-NEXT:    bl cosf
447; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
448; CHECK-NEXT:    ret
449  %val = call float @llvm.experimental.constrained.cos.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
450  ret float %val
451}
452
453define float @tan_f32(float %x) #0 {
454; CHECK-LABEL: tan_f32:
455; CHECK:       // %bb.0:
456; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
457; CHECK-NEXT:    .cfi_def_cfa_offset 16
458; CHECK-NEXT:    .cfi_offset w30, -16
459; CHECK-NEXT:    bl tanf
460; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
461; CHECK-NEXT:    ret
462  %val = call float @llvm.experimental.constrained.tan.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
463  ret float %val
464}
465
466define float @asin_f32(float %x) #0 {
467; CHECK-LABEL: asin_f32:
468; CHECK:       // %bb.0:
469; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
470; CHECK-NEXT:    .cfi_def_cfa_offset 16
471; CHECK-NEXT:    .cfi_offset w30, -16
472; CHECK-NEXT:    bl asinf
473; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
474; CHECK-NEXT:    ret
475  %val = call float @llvm.experimental.constrained.asin.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
476  ret float %val
477}
478
479define float @acos_f32(float %x) #0 {
480; CHECK-LABEL: acos_f32:
481; CHECK:       // %bb.0:
482; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
483; CHECK-NEXT:    .cfi_def_cfa_offset 16
484; CHECK-NEXT:    .cfi_offset w30, -16
485; CHECK-NEXT:    bl acosf
486; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
487; CHECK-NEXT:    ret
488  %val = call float @llvm.experimental.constrained.acos.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
489  ret float %val
490}
491
492define float @atan_f32(float %x) #0 {
493; CHECK-LABEL: atan_f32:
494; CHECK:       // %bb.0:
495; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
496; CHECK-NEXT:    .cfi_def_cfa_offset 16
497; CHECK-NEXT:    .cfi_offset w30, -16
498; CHECK-NEXT:    bl atanf
499; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
500; CHECK-NEXT:    ret
501  %val = call float @llvm.experimental.constrained.atan.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
502  ret float %val
503}
504
505define float @atan2_f32(float %x, float %y) #0 {
506; CHECK-LABEL: atan2_f32:
507; CHECK:       // %bb.0:
508; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
509; CHECK-NEXT:    .cfi_def_cfa_offset 16
510; CHECK-NEXT:    .cfi_offset w30, -16
511; CHECK-NEXT:    bl atan2f
512; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
513; CHECK-NEXT:    ret
514  %val = call float @llvm.experimental.constrained.atan2.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
515  ret float %val
516}
517
518define float @sinh_f32(float %x) #0 {
519; CHECK-LABEL: sinh_f32:
520; CHECK:       // %bb.0:
521; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
522; CHECK-NEXT:    .cfi_def_cfa_offset 16
523; CHECK-NEXT:    .cfi_offset w30, -16
524; CHECK-NEXT:    bl sinhf
525; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
526; CHECK-NEXT:    ret
527  %val = call float @llvm.experimental.constrained.sinh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
528  ret float %val
529}
530
531define float @cosh_f32(float %x) #0 {
532; CHECK-LABEL: cosh_f32:
533; CHECK:       // %bb.0:
534; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
535; CHECK-NEXT:    .cfi_def_cfa_offset 16
536; CHECK-NEXT:    .cfi_offset w30, -16
537; CHECK-NEXT:    bl coshf
538; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
539; CHECK-NEXT:    ret
540  %val = call float @llvm.experimental.constrained.cosh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
541  ret float %val
542}
543
544define float @tanh_f32(float %x) #0 {
545; CHECK-LABEL: tanh_f32:
546; CHECK:       // %bb.0:
547; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
548; CHECK-NEXT:    .cfi_def_cfa_offset 16
549; CHECK-NEXT:    .cfi_offset w30, -16
550; CHECK-NEXT:    bl tanhf
551; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
552; CHECK-NEXT:    ret
553  %val = call float @llvm.experimental.constrained.tanh.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
554  ret float %val
555}
556
557define float @pow_f32(float %x, float %y) #0 {
558; CHECK-LABEL: pow_f32:
559; CHECK:       // %bb.0:
560; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
561; CHECK-NEXT:    .cfi_def_cfa_offset 16
562; CHECK-NEXT:    .cfi_offset w30, -16
563; CHECK-NEXT:    bl powf
564; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
565; CHECK-NEXT:    ret
566  %val = call float @llvm.experimental.constrained.pow.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
567  ret float %val
568}
569
570define float @log_f32(float %x) #0 {
571; CHECK-LABEL: log_f32:
572; CHECK:       // %bb.0:
573; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
574; CHECK-NEXT:    .cfi_def_cfa_offset 16
575; CHECK-NEXT:    .cfi_offset w30, -16
576; CHECK-NEXT:    bl logf
577; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
578; CHECK-NEXT:    ret
579  %val = call float @llvm.experimental.constrained.log.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
580  ret float %val
581}
582
583define float @log10_f32(float %x) #0 {
584; CHECK-LABEL: log10_f32:
585; CHECK:       // %bb.0:
586; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
587; CHECK-NEXT:    .cfi_def_cfa_offset 16
588; CHECK-NEXT:    .cfi_offset w30, -16
589; CHECK-NEXT:    bl log10f
590; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
591; CHECK-NEXT:    ret
592  %val = call float @llvm.experimental.constrained.log10.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
593  ret float %val
594}
595
596define float @log2_f32(float %x) #0 {
597; CHECK-LABEL: log2_f32:
598; CHECK:       // %bb.0:
599; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
600; CHECK-NEXT:    .cfi_def_cfa_offset 16
601; CHECK-NEXT:    .cfi_offset w30, -16
602; CHECK-NEXT:    bl log2f
603; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
604; CHECK-NEXT:    ret
605  %val = call float @llvm.experimental.constrained.log2.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
606  ret float %val
607}
608
609define float @exp_f32(float %x) #0 {
610; CHECK-LABEL: exp_f32:
611; CHECK:       // %bb.0:
612; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
613; CHECK-NEXT:    .cfi_def_cfa_offset 16
614; CHECK-NEXT:    .cfi_offset w30, -16
615; CHECK-NEXT:    bl expf
616; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
617; CHECK-NEXT:    ret
618  %val = call float @llvm.experimental.constrained.exp.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
619  ret float %val
620}
621
622define float @exp2_f32(float %x) #0 {
623; CHECK-LABEL: exp2_f32:
624; CHECK:       // %bb.0:
625; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
626; CHECK-NEXT:    .cfi_def_cfa_offset 16
627; CHECK-NEXT:    .cfi_offset w30, -16
628; CHECK-NEXT:    bl exp2f
629; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
630; CHECK-NEXT:    ret
631  %val = call float @llvm.experimental.constrained.exp2.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
632  ret float %val
633}
634
635define float @rint_f32(float %x) #0 {
636; CHECK-LABEL: rint_f32:
637; CHECK:       // %bb.0:
638; CHECK-NEXT:    frintx s0, s0
639; CHECK-NEXT:    ret
640  %val = call float @llvm.experimental.constrained.rint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
641  ret float %val
642}
643
644define float @nearbyint_f32(float %x) #0 {
645; CHECK-LABEL: nearbyint_f32:
646; CHECK:       // %bb.0:
647; CHECK-NEXT:    frinti s0, s0
648; CHECK-NEXT:    ret
649  %val = call float @llvm.experimental.constrained.nearbyint.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
650  ret float %val
651}
652
653define i32 @lrint_f32(float %x) #0 {
654; CHECK-LABEL: lrint_f32:
655; CHECK:       // %bb.0:
656; CHECK-NEXT:    frintx s0, s0
657; CHECK-NEXT:    fcvtzs w0, s0
658; CHECK-NEXT:    ret
659  %val = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
660  ret i32 %val
661}
662
663define i64 @llrint_f32(float %x) #0 {
664; CHECK-LABEL: llrint_f32:
665; CHECK:       // %bb.0:
666; CHECK-NEXT:    frintx s0, s0
667; CHECK-NEXT:    fcvtzs x0, s0
668; CHECK-NEXT:    ret
669  %val = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
670  ret i64 %val
671}
672
673define float @maxnum_f32(float %x, float %y) #0 {
674; CHECK-LABEL: maxnum_f32:
675; CHECK:       // %bb.0:
676; CHECK-NEXT:    fmaxnm s0, s0, s1
677; CHECK-NEXT:    ret
678  %val = call float @llvm.experimental.constrained.maxnum.f32(float %x, float %y, metadata !"fpexcept.strict") #0
679  ret float %val
680}
681
682define float @minnum_f32(float %x, float %y) #0 {
683; CHECK-LABEL: minnum_f32:
684; CHECK:       // %bb.0:
685; CHECK-NEXT:    fminnm s0, s0, s1
686; CHECK-NEXT:    ret
687  %val = call float @llvm.experimental.constrained.minnum.f32(float %x, float %y, metadata !"fpexcept.strict") #0
688  ret float %val
689}
690
691define float @maximum_f32(float %x, float %y) #0 {
692; CHECK-LABEL: maximum_f32:
693; CHECK:       // %bb.0:
694; CHECK-NEXT:    fmax s0, s0, s1
695; CHECK-NEXT:    ret
696  %val = call float @llvm.experimental.constrained.maximum.f32(float %x, float %y, metadata !"fpexcept.strict") #0
697  ret float %val
698}
699
700define float @minimum_f32(float %x, float %y) #0 {
701; CHECK-LABEL: minimum_f32:
702; CHECK:       // %bb.0:
703; CHECK-NEXT:    fmin s0, s0, s1
704; CHECK-NEXT:    ret
705  %val = call float @llvm.experimental.constrained.minimum.f32(float %x, float %y, metadata !"fpexcept.strict") #0
706  ret float %val
707}
708
709define float @ceil_f32(float %x) #0 {
710; CHECK-LABEL: ceil_f32:
711; CHECK:       // %bb.0:
712; CHECK-NEXT:    frintp s0, s0
713; CHECK-NEXT:    ret
714  %val = call float @llvm.experimental.constrained.ceil.f32(float %x, metadata !"fpexcept.strict") #0
715  ret float %val
716}
717
718define float @floor_f32(float %x) #0 {
719; CHECK-LABEL: floor_f32:
720; CHECK:       // %bb.0:
721; CHECK-NEXT:    frintm s0, s0
722; CHECK-NEXT:    ret
723  %val = call float @llvm.experimental.constrained.floor.f32(float %x, metadata !"fpexcept.strict") #0
724  ret float %val
725}
726
727define i32 @lround_f32(float %x) #0 {
728; CHECK-LABEL: lround_f32:
729; CHECK:       // %bb.0:
730; CHECK-NEXT:    fcvtas w0, s0
731; CHECK-NEXT:    ret
732  %val = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x, metadata !"fpexcept.strict") #0
733  ret i32 %val
734}
735
736define i64 @llround_f32(float %x) #0 {
737; CHECK-LABEL: llround_f32:
738; CHECK:       // %bb.0:
739; CHECK-NEXT:    fcvtas x0, s0
740; CHECK-NEXT:    ret
741  %val = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x, metadata !"fpexcept.strict") #0
742  ret i64 %val
743}
744
745define float @round_f32(float %x) #0 {
746; CHECK-LABEL: round_f32:
747; CHECK:       // %bb.0:
748; CHECK-NEXT:    frinta s0, s0
749; CHECK-NEXT:    ret
750  %val = call float @llvm.experimental.constrained.round.f32(float %x, metadata !"fpexcept.strict") #0
751  ret float %val
752}
753
754define float @roundeven_f32(float %x) #0 {
755; CHECK-LABEL: roundeven_f32:
756; CHECK:       // %bb.0:
757; CHECK-NEXT:    frintn s0, s0
758; CHECK-NEXT:    ret
759  %val = call float @llvm.experimental.constrained.roundeven.f32(float %x, metadata !"fpexcept.strict") #0
760  ret float %val
761}
762
763define float @trunc_f32(float %x) #0 {
764; CHECK-LABEL: trunc_f32:
765; CHECK:       // %bb.0:
766; CHECK-NEXT:    frintz s0, s0
767; CHECK-NEXT:    ret
768  %val = call float @llvm.experimental.constrained.trunc.f32(float %x, metadata !"fpexcept.strict") #0
769  ret float %val
770}
771
772define i32 @fcmp_olt_f32(float %a, float %b) #0 {
773; CHECK-LABEL: fcmp_olt_f32:
774; CHECK:       // %bb.0:
775; CHECK-NEXT:    fcmp s0, s1
776; CHECK-NEXT:    cset w0, mi
777; CHECK-NEXT:    ret
778  %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") #0
779  %conv = zext i1 %cmp to i32
780  ret i32 %conv
781}
782
783define i32 @fcmp_ole_f32(float %a, float %b) #0 {
784; CHECK-LABEL: fcmp_ole_f32:
785; CHECK:       // %bb.0:
786; CHECK-NEXT:    fcmp s0, s1
787; CHECK-NEXT:    cset w0, ls
788; CHECK-NEXT:    ret
789  %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") #0
790  %conv = zext i1 %cmp to i32
791  ret i32 %conv
792}
793
794define i32 @fcmp_ogt_f32(float %a, float %b) #0 {
795; CHECK-LABEL: fcmp_ogt_f32:
796; CHECK:       // %bb.0:
797; CHECK-NEXT:    fcmp s0, s1
798; CHECK-NEXT:    cset w0, gt
799; CHECK-NEXT:    ret
800  %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") #0
801  %conv = zext i1 %cmp to i32
802  ret i32 %conv
803}
804
805define i32 @fcmp_oge_f32(float %a, float %b) #0 {
806; CHECK-LABEL: fcmp_oge_f32:
807; CHECK:       // %bb.0:
808; CHECK-NEXT:    fcmp s0, s1
809; CHECK-NEXT:    cset w0, ge
810; CHECK-NEXT:    ret
811  %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") #0
812  %conv = zext i1 %cmp to i32
813  ret i32 %conv
814}
815
816define i32 @fcmp_oeq_f32(float %a, float %b) #0 {
817; CHECK-LABEL: fcmp_oeq_f32:
818; CHECK:       // %bb.0:
819; CHECK-NEXT:    fcmp s0, s1
820; CHECK-NEXT:    cset w0, eq
821; CHECK-NEXT:    ret
822  %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") #0
823  %conv = zext i1 %cmp to i32
824  ret i32 %conv
825}
826
827define i32 @fcmp_one_f32(float %a, float %b) #0 {
828; CHECK-LABEL: fcmp_one_f32:
829; CHECK:       // %bb.0:
830; CHECK-NEXT:    fcmp s0, s1
831; CHECK-NEXT:    cset w8, mi
832; CHECK-NEXT:    csinc w0, w8, wzr, le
833; CHECK-NEXT:    ret
834  %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") #0
835  %conv = zext i1 %cmp to i32
836  ret i32 %conv
837}
838
839define i32 @fcmp_ult_f32(float %a, float %b) #0 {
840; CHECK-LABEL: fcmp_ult_f32:
841; CHECK:       // %bb.0:
842; CHECK-NEXT:    fcmp s0, s1
843; CHECK-NEXT:    cset w0, lt
844; CHECK-NEXT:    ret
845  %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") #0
846  %conv = zext i1 %cmp to i32
847  ret i32 %conv
848}
849
850define i32 @fcmp_ule_f32(float %a, float %b) #0 {
851; CHECK-LABEL: fcmp_ule_f32:
852; CHECK:       // %bb.0:
853; CHECK-NEXT:    fcmp s0, s1
854; CHECK-NEXT:    cset w0, le
855; CHECK-NEXT:    ret
856  %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") #0
857  %conv = zext i1 %cmp to i32
858  ret i32 %conv
859}
860
861define i32 @fcmp_ugt_f32(float %a, float %b) #0 {
862; CHECK-LABEL: fcmp_ugt_f32:
863; CHECK:       // %bb.0:
864; CHECK-NEXT:    fcmp s0, s1
865; CHECK-NEXT:    cset w0, hi
866; CHECK-NEXT:    ret
867  %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") #0
868  %conv = zext i1 %cmp to i32
869  ret i32 %conv
870}
871
872define i32 @fcmp_uge_f32(float %a, float %b) #0 {
873; CHECK-LABEL: fcmp_uge_f32:
874; CHECK:       // %bb.0:
875; CHECK-NEXT:    fcmp s0, s1
876; CHECK-NEXT:    cset w0, pl
877; CHECK-NEXT:    ret
878  %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") #0
879  %conv = zext i1 %cmp to i32
880  ret i32 %conv
881}
882
883define i32 @fcmp_ueq_f32(float %a, float %b) #0 {
884; CHECK-LABEL: fcmp_ueq_f32:
885; CHECK:       // %bb.0:
886; CHECK-NEXT:    fcmp s0, s1
887; CHECK-NEXT:    cset w8, eq
888; CHECK-NEXT:    csinc w0, w8, wzr, vc
889; CHECK-NEXT:    ret
890  %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") #0
891  %conv = zext i1 %cmp to i32
892  ret i32 %conv
893}
894
895define i32 @fcmp_une_f32(float %a, float %b) #0 {
896; CHECK-LABEL: fcmp_une_f32:
897; CHECK:       // %bb.0:
898; CHECK-NEXT:    fcmp s0, s1
899; CHECK-NEXT:    cset w0, ne
900; CHECK-NEXT:    ret
901  %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") #0
902  %conv = zext i1 %cmp to i32
903  ret i32 %conv
904}
905
906define i32 @fcmps_olt_f32(float %a, float %b) #0 {
907; CHECK-LABEL: fcmps_olt_f32:
908; CHECK:       // %bb.0:
909; CHECK-NEXT:    fcmpe s0, s1
910; CHECK-NEXT:    cset w0, mi
911; CHECK-NEXT:    ret
912  %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") #0
913  %conv = zext i1 %cmp to i32
914  ret i32 %conv
915}
916
917define i32 @fcmps_ole_f32(float %a, float %b) #0 {
918; CHECK-LABEL: fcmps_ole_f32:
919; CHECK:       // %bb.0:
920; CHECK-NEXT:    fcmpe s0, s1
921; CHECK-NEXT:    cset w0, ls
922; CHECK-NEXT:    ret
923  %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") #0
924  %conv = zext i1 %cmp to i32
925  ret i32 %conv
926}
927
928define i32 @fcmps_ogt_f32(float %a, float %b) #0 {
929; CHECK-LABEL: fcmps_ogt_f32:
930; CHECK:       // %bb.0:
931; CHECK-NEXT:    fcmpe s0, s1
932; CHECK-NEXT:    cset w0, gt
933; CHECK-NEXT:    ret
934  %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") #0
935  %conv = zext i1 %cmp to i32
936  ret i32 %conv
937}
938
939define i32 @fcmps_oge_f32(float %a, float %b) #0 {
940; CHECK-LABEL: fcmps_oge_f32:
941; CHECK:       // %bb.0:
942; CHECK-NEXT:    fcmpe s0, s1
943; CHECK-NEXT:    cset w0, ge
944; CHECK-NEXT:    ret
945  %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") #0
946  %conv = zext i1 %cmp to i32
947  ret i32 %conv
948}
949
950define i32 @fcmps_oeq_f32(float %a, float %b) #0 {
951; CHECK-LABEL: fcmps_oeq_f32:
952; CHECK:       // %bb.0:
953; CHECK-NEXT:    fcmpe s0, s1
954; CHECK-NEXT:    cset w0, eq
955; CHECK-NEXT:    ret
956  %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") #0
957  %conv = zext i1 %cmp to i32
958  ret i32 %conv
959}
960
961define i32 @fcmps_one_f32(float %a, float %b) #0 {
962; CHECK-LABEL: fcmps_one_f32:
963; CHECK:       // %bb.0:
964; CHECK-NEXT:    fcmpe s0, s1
965; CHECK-NEXT:    cset w8, mi
966; CHECK-NEXT:    csinc w0, w8, wzr, le
967; CHECK-NEXT:    ret
968  %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") #0
969  %conv = zext i1 %cmp to i32
970  ret i32 %conv
971}
972
973define i32 @fcmps_ult_f32(float %a, float %b) #0 {
974; CHECK-LABEL: fcmps_ult_f32:
975; CHECK:       // %bb.0:
976; CHECK-NEXT:    fcmpe s0, s1
977; CHECK-NEXT:    cset w0, lt
978; CHECK-NEXT:    ret
979  %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") #0
980  %conv = zext i1 %cmp to i32
981  ret i32 %conv
982}
983
984define i32 @fcmps_ule_f32(float %a, float %b) #0 {
985; CHECK-LABEL: fcmps_ule_f32:
986; CHECK:       // %bb.0:
987; CHECK-NEXT:    fcmpe s0, s1
988; CHECK-NEXT:    cset w0, le
989; CHECK-NEXT:    ret
990  %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") #0
991  %conv = zext i1 %cmp to i32
992  ret i32 %conv
993}
994
995define i32 @fcmps_ugt_f32(float %a, float %b) #0 {
996; CHECK-LABEL: fcmps_ugt_f32:
997; CHECK:       // %bb.0:
998; CHECK-NEXT:    fcmpe s0, s1
999; CHECK-NEXT:    cset w0, hi
1000; CHECK-NEXT:    ret
1001  %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") #0
1002  %conv = zext i1 %cmp to i32
1003  ret i32 %conv
1004}
1005
1006define i32 @fcmps_uge_f32(float %a, float %b) #0 {
1007; CHECK-LABEL: fcmps_uge_f32:
1008; CHECK:       // %bb.0:
1009; CHECK-NEXT:    fcmpe s0, s1
1010; CHECK-NEXT:    cset w0, pl
1011; CHECK-NEXT:    ret
1012  %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") #0
1013  %conv = zext i1 %cmp to i32
1014  ret i32 %conv
1015}
1016
1017define i32 @fcmps_ueq_f32(float %a, float %b) #0 {
1018; CHECK-LABEL: fcmps_ueq_f32:
1019; CHECK:       // %bb.0:
1020; CHECK-NEXT:    fcmpe s0, s1
1021; CHECK-NEXT:    cset w8, eq
1022; CHECK-NEXT:    csinc w0, w8, wzr, vc
1023; CHECK-NEXT:    ret
1024  %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") #0
1025  %conv = zext i1 %cmp to i32
1026  ret i32 %conv
1027}
1028
1029define i32 @fcmps_une_f32(float %a, float %b) #0 {
1030; CHECK-LABEL: fcmps_une_f32:
1031; CHECK:       // %bb.0:
1032; CHECK-NEXT:    fcmpe s0, s1
1033; CHECK-NEXT:    cset w0, ne
1034; CHECK-NEXT:    ret
1035  %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") #0
1036  %conv = zext i1 %cmp to i32
1037  ret i32 %conv
1038}
1039
1040
1041; Double-precision intrinsics
1042
1043define double @add_f64(double %x, double %y) #0 {
1044; CHECK-LABEL: add_f64:
1045; CHECK:       // %bb.0:
1046; CHECK-NEXT:    fadd d0, d0, d1
1047; CHECK-NEXT:    ret
1048  %val = call double @llvm.experimental.constrained.fadd.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1049  ret double %val
1050}
1051
1052define double @sub_f64(double %x, double %y) #0 {
1053; CHECK-LABEL: sub_f64:
1054; CHECK:       // %bb.0:
1055; CHECK-NEXT:    fsub d0, d0, d1
1056; CHECK-NEXT:    ret
1057  %val = call double @llvm.experimental.constrained.fsub.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1058  ret double %val
1059}
1060
1061define double @mul_f64(double %x, double %y) #0 {
1062; CHECK-LABEL: mul_f64:
1063; CHECK:       // %bb.0:
1064; CHECK-NEXT:    fmul d0, d0, d1
1065; CHECK-NEXT:    ret
1066  %val = call double @llvm.experimental.constrained.fmul.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1067  ret double %val
1068}
1069
1070define double @div_f64(double %x, double %y) #0 {
1071; CHECK-LABEL: div_f64:
1072; CHECK:       // %bb.0:
1073; CHECK-NEXT:    fdiv d0, d0, d1
1074; CHECK-NEXT:    ret
1075  %val = call double @llvm.experimental.constrained.fdiv.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1076  ret double %val
1077}
1078
1079define double @frem_f64(double %x, double %y) #0 {
1080; CHECK-LABEL: frem_f64:
1081; CHECK:       // %bb.0:
1082; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1083; CHECK-NEXT:    .cfi_def_cfa_offset 16
1084; CHECK-NEXT:    .cfi_offset w30, -16
1085; CHECK-NEXT:    bl fmod
1086; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1087; CHECK-NEXT:    ret
1088  %val = call double @llvm.experimental.constrained.frem.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1089  ret double %val
1090}
1091
1092define double @fma_f64(double %x, double %y, double %z) #0 {
1093; CHECK-LABEL: fma_f64:
1094; CHECK:       // %bb.0:
1095; CHECK-NEXT:    fmadd d0, d0, d1, d2
1096; CHECK-NEXT:    ret
1097  %val = call double @llvm.experimental.constrained.fma.f64(double %x, double %y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1098  ret double %val
1099}
1100
1101define i32 @fptosi_i32_f64(double %x) #0 {
1102; CHECK-LABEL: fptosi_i32_f64:
1103; CHECK:       // %bb.0:
1104; CHECK-NEXT:    fcvtzs w0, d0
1105; CHECK-NEXT:    ret
1106  %val = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %x, metadata !"fpexcept.strict") #0
1107  ret i32 %val
1108}
1109
1110define i32 @fptoui_i32_f64(double %x) #0 {
1111; CHECK-LABEL: fptoui_i32_f64:
1112; CHECK:       // %bb.0:
1113; CHECK-NEXT:    fcvtzu w0, d0
1114; CHECK-NEXT:    ret
1115  %val = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict") #0
1116  ret i32 %val
1117}
1118
1119define i64 @fptosi_i64_f64(double %x) #0 {
1120; CHECK-LABEL: fptosi_i64_f64:
1121; CHECK:       // %bb.0:
1122; CHECK-NEXT:    fcvtzs x0, d0
1123; CHECK-NEXT:    ret
1124  %val = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %x, metadata !"fpexcept.strict") #0
1125  ret i64 %val
1126}
1127
1128define i64 @fptoui_i64_f64(double %x) #0 {
1129; CHECK-LABEL: fptoui_i64_f64:
1130; CHECK:       // %bb.0:
1131; CHECK-NEXT:    fcvtzu x0, d0
1132; CHECK-NEXT:    ret
1133  %val = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %x, metadata !"fpexcept.strict") #0
1134  ret i64 %val
1135}
1136
1137define double @sitofp_f64_i32(i32 %x) #0 {
1138; CHECK-LABEL: sitofp_f64_i32:
1139; CHECK:       // %bb.0:
1140; CHECK-NEXT:    scvtf d0, w0
1141; CHECK-NEXT:    ret
1142  %val = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1143  ret double %val
1144}
1145
1146define double @uitofp_f64_i32(i32 %x) #0 {
1147; CHECK-LABEL: uitofp_f64_i32:
1148; CHECK:       // %bb.0:
1149; CHECK-NEXT:    ucvtf d0, w0
1150; CHECK-NEXT:    ret
1151  %val = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1152  ret double %val
1153}
1154
1155define double @sitofp_f64_i64(i64 %x) #0 {
1156; CHECK-LABEL: sitofp_f64_i64:
1157; CHECK:       // %bb.0:
1158; CHECK-NEXT:    scvtf d0, x0
1159; CHECK-NEXT:    ret
1160  %val = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1161  ret double %val
1162}
1163
1164define double @uitofp_f64_i64(i64 %x) #0 {
1165; CHECK-LABEL: uitofp_f64_i64:
1166; CHECK:       // %bb.0:
1167; CHECK-NEXT:    ucvtf d0, x0
1168; CHECK-NEXT:    ret
1169  %val = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1170  ret double %val
1171}
1172
1173define double @sitofp_f64_i128(i128 %x) #0 {
1174; CHECK-LABEL: sitofp_f64_i128:
1175; CHECK:       // %bb.0:
1176; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1177; CHECK-NEXT:    .cfi_def_cfa_offset 16
1178; CHECK-NEXT:    .cfi_offset w30, -16
1179; CHECK-NEXT:    bl __floattidf
1180; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1181; CHECK-NEXT:    ret
1182  %val = call double @llvm.experimental.constrained.sitofp.f64.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1183  ret double %val
1184}
1185
1186define double @uitofp_f64_i128(i128 %x) #0 {
1187; CHECK-LABEL: uitofp_f64_i128:
1188; CHECK:       // %bb.0:
1189; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1190; CHECK-NEXT:    .cfi_def_cfa_offset 16
1191; CHECK-NEXT:    .cfi_offset w30, -16
1192; CHECK-NEXT:    bl __floatuntidf
1193; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1194; CHECK-NEXT:    ret
1195  %val = call double @llvm.experimental.constrained.uitofp.f64.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1196  ret double %val
1197}
1198
1199define double @sqrt_f64(double %x) #0 {
1200; CHECK-LABEL: sqrt_f64:
1201; CHECK:       // %bb.0:
1202; CHECK-NEXT:    fsqrt d0, d0
1203; CHECK-NEXT:    ret
1204  %val = call double @llvm.experimental.constrained.sqrt.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1205  ret double %val
1206}
1207
1208define double @powi_f64(double %x, i32 %y) #0 {
1209; CHECK-LABEL: powi_f64:
1210; CHECK:       // %bb.0:
1211; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1212; CHECK-NEXT:    .cfi_def_cfa_offset 16
1213; CHECK-NEXT:    .cfi_offset w30, -16
1214; CHECK-NEXT:    bl __powidf2
1215; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1216; CHECK-NEXT:    ret
1217  %val = call double @llvm.experimental.constrained.powi.f64(double %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1218  ret double %val
1219}
1220
1221define double @sin_f64(double %x) #0 {
1222; CHECK-LABEL: sin_f64:
1223; CHECK:       // %bb.0:
1224; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1225; CHECK-NEXT:    .cfi_def_cfa_offset 16
1226; CHECK-NEXT:    .cfi_offset w30, -16
1227; CHECK-NEXT:    bl sin
1228; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1229; CHECK-NEXT:    ret
1230  %val = call double @llvm.experimental.constrained.sin.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1231  ret double %val
1232}
1233
1234define double @cos_f64(double %x) #0 {
1235; CHECK-LABEL: cos_f64:
1236; CHECK:       // %bb.0:
1237; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1238; CHECK-NEXT:    .cfi_def_cfa_offset 16
1239; CHECK-NEXT:    .cfi_offset w30, -16
1240; CHECK-NEXT:    bl cos
1241; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1242; CHECK-NEXT:    ret
1243  %val = call double @llvm.experimental.constrained.cos.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1244  ret double %val
1245}
1246
1247define double @tan_f64(double %x) #0 {
1248; CHECK-LABEL: tan_f64:
1249; CHECK:       // %bb.0:
1250; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1251; CHECK-NEXT:    .cfi_def_cfa_offset 16
1252; CHECK-NEXT:    .cfi_offset w30, -16
1253; CHECK-NEXT:    bl tan
1254; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1255; CHECK-NEXT:    ret
1256  %val = call double @llvm.experimental.constrained.tan.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1257  ret double %val
1258}
1259
1260define double @asin_f64(double %x) #0 {
1261; CHECK-LABEL: asin_f64:
1262; CHECK:       // %bb.0:
1263; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1264; CHECK-NEXT:    .cfi_def_cfa_offset 16
1265; CHECK-NEXT:    .cfi_offset w30, -16
1266; CHECK-NEXT:    bl asin
1267; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1268; CHECK-NEXT:    ret
1269  %val = call double @llvm.experimental.constrained.asin.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1270  ret double %val
1271}
1272
1273define double @acos_f64(double %x) #0 {
1274; CHECK-LABEL: acos_f64:
1275; CHECK:       // %bb.0:
1276; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1277; CHECK-NEXT:    .cfi_def_cfa_offset 16
1278; CHECK-NEXT:    .cfi_offset w30, -16
1279; CHECK-NEXT:    bl acos
1280; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1281; CHECK-NEXT:    ret
1282  %val = call double @llvm.experimental.constrained.acos.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1283  ret double %val
1284}
1285
1286define double @atan_f64(double %x) #0 {
1287; CHECK-LABEL: atan_f64:
1288; CHECK:       // %bb.0:
1289; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1290; CHECK-NEXT:    .cfi_def_cfa_offset 16
1291; CHECK-NEXT:    .cfi_offset w30, -16
1292; CHECK-NEXT:    bl atan
1293; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1294; CHECK-NEXT:    ret
1295  %val = call double @llvm.experimental.constrained.atan.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1296  ret double %val
1297}
1298
1299define double @atan2_f64(double %x, double %y) #0 {
1300; CHECK-LABEL: atan2_f64:
1301; CHECK:       // %bb.0:
1302; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1303; CHECK-NEXT:    .cfi_def_cfa_offset 16
1304; CHECK-NEXT:    .cfi_offset w30, -16
1305; CHECK-NEXT:    bl atan2
1306; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1307; CHECK-NEXT:    ret
1308  %val = call double @llvm.experimental.constrained.atan2.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1309  ret double %val
1310}
1311
1312define double @sinh_f64(double %x) #0 {
1313; CHECK-LABEL: sinh_f64:
1314; CHECK:       // %bb.0:
1315; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1316; CHECK-NEXT:    .cfi_def_cfa_offset 16
1317; CHECK-NEXT:    .cfi_offset w30, -16
1318; CHECK-NEXT:    bl sinh
1319; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1320; CHECK-NEXT:    ret
1321  %val = call double @llvm.experimental.constrained.sinh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1322  ret double %val
1323}
1324
1325define double @cosh_f64(double %x) #0 {
1326; CHECK-LABEL: cosh_f64:
1327; CHECK:       // %bb.0:
1328; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1329; CHECK-NEXT:    .cfi_def_cfa_offset 16
1330; CHECK-NEXT:    .cfi_offset w30, -16
1331; CHECK-NEXT:    bl cosh
1332; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1333; CHECK-NEXT:    ret
1334  %val = call double @llvm.experimental.constrained.cosh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1335  ret double %val
1336}
1337
1338define double @tanh_f64(double %x) #0 {
1339; CHECK-LABEL: tanh_f64:
1340; CHECK:       // %bb.0:
1341; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1342; CHECK-NEXT:    .cfi_def_cfa_offset 16
1343; CHECK-NEXT:    .cfi_offset w30, -16
1344; CHECK-NEXT:    bl tanh
1345; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1346; CHECK-NEXT:    ret
1347  %val = call double @llvm.experimental.constrained.tanh.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1348  ret double %val
1349}
1350
1351define double @pow_f64(double %x, double %y) #0 {
1352; CHECK-LABEL: pow_f64:
1353; CHECK:       // %bb.0:
1354; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1355; CHECK-NEXT:    .cfi_def_cfa_offset 16
1356; CHECK-NEXT:    .cfi_offset w30, -16
1357; CHECK-NEXT:    bl pow
1358; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1359; CHECK-NEXT:    ret
1360  %val = call double @llvm.experimental.constrained.pow.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1361  ret double %val
1362}
1363
1364define double @log_f64(double %x) #0 {
1365; CHECK-LABEL: log_f64:
1366; CHECK:       // %bb.0:
1367; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1368; CHECK-NEXT:    .cfi_def_cfa_offset 16
1369; CHECK-NEXT:    .cfi_offset w30, -16
1370; CHECK-NEXT:    bl log
1371; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1372; CHECK-NEXT:    ret
1373  %val = call double @llvm.experimental.constrained.log.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1374  ret double %val
1375}
1376
1377define double @log10_f64(double %x) #0 {
1378; CHECK-LABEL: log10_f64:
1379; CHECK:       // %bb.0:
1380; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1381; CHECK-NEXT:    .cfi_def_cfa_offset 16
1382; CHECK-NEXT:    .cfi_offset w30, -16
1383; CHECK-NEXT:    bl log10
1384; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1385; CHECK-NEXT:    ret
1386  %val = call double @llvm.experimental.constrained.log10.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1387  ret double %val
1388}
1389
1390define double @log2_f64(double %x) #0 {
1391; CHECK-LABEL: log2_f64:
1392; CHECK:       // %bb.0:
1393; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1394; CHECK-NEXT:    .cfi_def_cfa_offset 16
1395; CHECK-NEXT:    .cfi_offset w30, -16
1396; CHECK-NEXT:    bl log2
1397; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1398; CHECK-NEXT:    ret
1399  %val = call double @llvm.experimental.constrained.log2.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1400  ret double %val
1401}
1402
1403define double @exp_f64(double %x) #0 {
1404; CHECK-LABEL: exp_f64:
1405; CHECK:       // %bb.0:
1406; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1407; CHECK-NEXT:    .cfi_def_cfa_offset 16
1408; CHECK-NEXT:    .cfi_offset w30, -16
1409; CHECK-NEXT:    bl exp
1410; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1411; CHECK-NEXT:    ret
1412  %val = call double @llvm.experimental.constrained.exp.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1413  ret double %val
1414}
1415
1416define double @exp2_f64(double %x) #0 {
1417; CHECK-LABEL: exp2_f64:
1418; CHECK:       // %bb.0:
1419; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1420; CHECK-NEXT:    .cfi_def_cfa_offset 16
1421; CHECK-NEXT:    .cfi_offset w30, -16
1422; CHECK-NEXT:    bl exp2
1423; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1424; CHECK-NEXT:    ret
1425  %val = call double @llvm.experimental.constrained.exp2.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1426  ret double %val
1427}
1428
1429define double @rint_f64(double %x) #0 {
1430; CHECK-LABEL: rint_f64:
1431; CHECK:       // %bb.0:
1432; CHECK-NEXT:    frintx d0, d0
1433; CHECK-NEXT:    ret
1434  %val = call double @llvm.experimental.constrained.rint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1435  ret double %val
1436}
1437
1438define double @nearbyint_f64(double %x) #0 {
1439; CHECK-LABEL: nearbyint_f64:
1440; CHECK:       // %bb.0:
1441; CHECK-NEXT:    frinti d0, d0
1442; CHECK-NEXT:    ret
1443  %val = call double @llvm.experimental.constrained.nearbyint.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1444  ret double %val
1445}
1446
1447define i32 @lrint_f64(double %x) #0 {
1448; CHECK-LABEL: lrint_f64:
1449; CHECK:       // %bb.0:
1450; CHECK-NEXT:    frintx d0, d0
1451; CHECK-NEXT:    fcvtzs w0, d0
1452; CHECK-NEXT:    ret
1453  %val = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1454  ret i32 %val
1455}
1456
1457define i64 @llrint_f64(double %x) #0 {
1458; CHECK-LABEL: llrint_f64:
1459; CHECK:       // %bb.0:
1460; CHECK-NEXT:    frintx d0, d0
1461; CHECK-NEXT:    fcvtzs x0, d0
1462; CHECK-NEXT:    ret
1463  %val = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1464  ret i64 %val
1465}
1466
1467define double @maxnum_f64(double %x, double %y) #0 {
1468; CHECK-LABEL: maxnum_f64:
1469; CHECK:       // %bb.0:
1470; CHECK-NEXT:    fmaxnm d0, d0, d1
1471; CHECK-NEXT:    ret
1472  %val = call double @llvm.experimental.constrained.maxnum.f64(double %x, double %y, metadata !"fpexcept.strict") #0
1473  ret double %val
1474}
1475
1476define double @minnum_f64(double %x, double %y) #0 {
1477; CHECK-LABEL: minnum_f64:
1478; CHECK:       // %bb.0:
1479; CHECK-NEXT:    fminnm d0, d0, d1
1480; CHECK-NEXT:    ret
1481  %val = call double @llvm.experimental.constrained.minnum.f64(double %x, double %y, metadata !"fpexcept.strict") #0
1482  ret double %val
1483}
1484
1485define double @maximum_f64(double %x, double %y) #0 {
1486; CHECK-LABEL: maximum_f64:
1487; CHECK:       // %bb.0:
1488; CHECK-NEXT:    fmax d0, d0, d1
1489; CHECK-NEXT:    ret
1490  %val = call double @llvm.experimental.constrained.maximum.f64(double %x, double %y, metadata !"fpexcept.strict") #0
1491  ret double %val
1492}
1493
1494define double @minimum_f64(double %x, double %y) #0 {
1495; CHECK-LABEL: minimum_f64:
1496; CHECK:       // %bb.0:
1497; CHECK-NEXT:    fmin d0, d0, d1
1498; CHECK-NEXT:    ret
1499  %val = call double @llvm.experimental.constrained.minimum.f64(double %x, double %y, metadata !"fpexcept.strict") #0
1500  ret double %val
1501}
1502
1503define double @ceil_f64(double %x) #0 {
1504; CHECK-LABEL: ceil_f64:
1505; CHECK:       // %bb.0:
1506; CHECK-NEXT:    frintp d0, d0
1507; CHECK-NEXT:    ret
1508  %val = call double @llvm.experimental.constrained.ceil.f64(double %x, metadata !"fpexcept.strict") #0
1509  ret double %val
1510}
1511
1512define double @floor_f64(double %x) #0 {
1513; CHECK-LABEL: floor_f64:
1514; CHECK:       // %bb.0:
1515; CHECK-NEXT:    frintm d0, d0
1516; CHECK-NEXT:    ret
1517  %val = call double @llvm.experimental.constrained.floor.f64(double %x, metadata !"fpexcept.strict") #0
1518  ret double %val
1519}
1520
1521define i32 @lround_f64(double %x) #0 {
1522; CHECK-LABEL: lround_f64:
1523; CHECK:       // %bb.0:
1524; CHECK-NEXT:    fcvtas w0, d0
1525; CHECK-NEXT:    ret
1526  %val = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x, metadata !"fpexcept.strict") #0
1527  ret i32 %val
1528}
1529
1530define i64 @llround_f64(double %x) #0 {
1531; CHECK-LABEL: llround_f64:
1532; CHECK:       // %bb.0:
1533; CHECK-NEXT:    fcvtas x0, d0
1534; CHECK-NEXT:    ret
1535  %val = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x, metadata !"fpexcept.strict") #0
1536  ret i64 %val
1537}
1538
1539define double @round_f64(double %x) #0 {
1540; CHECK-LABEL: round_f64:
1541; CHECK:       // %bb.0:
1542; CHECK-NEXT:    frinta d0, d0
1543; CHECK-NEXT:    ret
1544  %val = call double @llvm.experimental.constrained.round.f64(double %x, metadata !"fpexcept.strict") #0
1545  ret double %val
1546}
1547
1548define double @roundeven_f64(double %x) #0 {
1549; CHECK-LABEL: roundeven_f64:
1550; CHECK:       // %bb.0:
1551; CHECK-NEXT:    frintn d0, d0
1552; CHECK-NEXT:    ret
1553  %val = call double @llvm.experimental.constrained.roundeven.f64(double %x, metadata !"fpexcept.strict") #0
1554  ret double %val
1555}
1556
1557define double @trunc_f64(double %x) #0 {
1558; CHECK-LABEL: trunc_f64:
1559; CHECK:       // %bb.0:
1560; CHECK-NEXT:    frintz d0, d0
1561; CHECK-NEXT:    ret
1562  %val = call double @llvm.experimental.constrained.trunc.f64(double %x, metadata !"fpexcept.strict") #0
1563  ret double %val
1564}
1565
1566define i32 @fcmp_olt_f64(double %a, double %b) #0 {
1567; CHECK-LABEL: fcmp_olt_f64:
1568; CHECK:       // %bb.0:
1569; CHECK-NEXT:    fcmp d0, d1
1570; CHECK-NEXT:    cset w0, mi
1571; CHECK-NEXT:    ret
1572  %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") #0
1573  %conv = zext i1 %cmp to i32
1574  ret i32 %conv
1575}
1576
1577define i32 @fcmp_ole_f64(double %a, double %b) #0 {
1578; CHECK-LABEL: fcmp_ole_f64:
1579; CHECK:       // %bb.0:
1580; CHECK-NEXT:    fcmp d0, d1
1581; CHECK-NEXT:    cset w0, ls
1582; CHECK-NEXT:    ret
1583  %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") #0
1584  %conv = zext i1 %cmp to i32
1585  ret i32 %conv
1586}
1587
1588define i32 @fcmp_ogt_f64(double %a, double %b) #0 {
1589; CHECK-LABEL: fcmp_ogt_f64:
1590; CHECK:       // %bb.0:
1591; CHECK-NEXT:    fcmp d0, d1
1592; CHECK-NEXT:    cset w0, gt
1593; CHECK-NEXT:    ret
1594  %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") #0
1595  %conv = zext i1 %cmp to i32
1596  ret i32 %conv
1597}
1598
1599define i32 @fcmp_oge_f64(double %a, double %b) #0 {
1600; CHECK-LABEL: fcmp_oge_f64:
1601; CHECK:       // %bb.0:
1602; CHECK-NEXT:    fcmp d0, d1
1603; CHECK-NEXT:    cset w0, ge
1604; CHECK-NEXT:    ret
1605  %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") #0
1606  %conv = zext i1 %cmp to i32
1607  ret i32 %conv
1608}
1609
1610define i32 @fcmp_oeq_f64(double %a, double %b) #0 {
1611; CHECK-LABEL: fcmp_oeq_f64:
1612; CHECK:       // %bb.0:
1613; CHECK-NEXT:    fcmp d0, d1
1614; CHECK-NEXT:    cset w0, eq
1615; CHECK-NEXT:    ret
1616  %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
1617  %conv = zext i1 %cmp to i32
1618  ret i32 %conv
1619}
1620
1621define i32 @fcmp_one_f64(double %a, double %b) #0 {
1622; CHECK-LABEL: fcmp_one_f64:
1623; CHECK:       // %bb.0:
1624; CHECK-NEXT:    fcmp d0, d1
1625; CHECK-NEXT:    cset w8, mi
1626; CHECK-NEXT:    csinc w0, w8, wzr, le
1627; CHECK-NEXT:    ret
1628  %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") #0
1629  %conv = zext i1 %cmp to i32
1630  ret i32 %conv
1631}
1632
1633define i32 @fcmp_ult_f64(double %a, double %b) #0 {
1634; CHECK-LABEL: fcmp_ult_f64:
1635; CHECK:       // %bb.0:
1636; CHECK-NEXT:    fcmp d0, d1
1637; CHECK-NEXT:    cset w0, lt
1638; CHECK-NEXT:    ret
1639  %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") #0
1640  %conv = zext i1 %cmp to i32
1641  ret i32 %conv
1642}
1643
1644define i32 @fcmp_ule_f64(double %a, double %b) #0 {
1645; CHECK-LABEL: fcmp_ule_f64:
1646; CHECK:       // %bb.0:
1647; CHECK-NEXT:    fcmp d0, d1
1648; CHECK-NEXT:    cset w0, le
1649; CHECK-NEXT:    ret
1650  %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") #0
1651  %conv = zext i1 %cmp to i32
1652  ret i32 %conv
1653}
1654
1655define i32 @fcmp_ugt_f64(double %a, double %b) #0 {
1656; CHECK-LABEL: fcmp_ugt_f64:
1657; CHECK:       // %bb.0:
1658; CHECK-NEXT:    fcmp d0, d1
1659; CHECK-NEXT:    cset w0, hi
1660; CHECK-NEXT:    ret
1661  %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") #0
1662  %conv = zext i1 %cmp to i32
1663  ret i32 %conv
1664}
1665
1666define i32 @fcmp_uge_f64(double %a, double %b) #0 {
1667; CHECK-LABEL: fcmp_uge_f64:
1668; CHECK:       // %bb.0:
1669; CHECK-NEXT:    fcmp d0, d1
1670; CHECK-NEXT:    cset w0, pl
1671; CHECK-NEXT:    ret
1672  %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") #0
1673  %conv = zext i1 %cmp to i32
1674  ret i32 %conv
1675}
1676
1677define i32 @fcmp_ueq_f64(double %a, double %b) #0 {
1678; CHECK-LABEL: fcmp_ueq_f64:
1679; CHECK:       // %bb.0:
1680; CHECK-NEXT:    fcmp d0, d1
1681; CHECK-NEXT:    cset w8, eq
1682; CHECK-NEXT:    csinc w0, w8, wzr, vc
1683; CHECK-NEXT:    ret
1684  %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") #0
1685  %conv = zext i1 %cmp to i32
1686  ret i32 %conv
1687}
1688
1689define i32 @fcmp_une_f64(double %a, double %b) #0 {
1690; CHECK-LABEL: fcmp_une_f64:
1691; CHECK:       // %bb.0:
1692; CHECK-NEXT:    fcmp d0, d1
1693; CHECK-NEXT:    cset w0, ne
1694; CHECK-NEXT:    ret
1695  %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") #0
1696  %conv = zext i1 %cmp to i32
1697  ret i32 %conv
1698}
1699
1700define i32 @fcmps_olt_f64(double %a, double %b) #0 {
1701; CHECK-LABEL: fcmps_olt_f64:
1702; CHECK:       // %bb.0:
1703; CHECK-NEXT:    fcmpe d0, d1
1704; CHECK-NEXT:    cset w0, mi
1705; CHECK-NEXT:    ret
1706  %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") #0
1707  %conv = zext i1 %cmp to i32
1708  ret i32 %conv
1709}
1710
1711define i32 @fcmps_ole_f64(double %a, double %b) #0 {
1712; CHECK-LABEL: fcmps_ole_f64:
1713; CHECK:       // %bb.0:
1714; CHECK-NEXT:    fcmpe d0, d1
1715; CHECK-NEXT:    cset w0, ls
1716; CHECK-NEXT:    ret
1717  %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") #0
1718  %conv = zext i1 %cmp to i32
1719  ret i32 %conv
1720}
1721
1722define i32 @fcmps_ogt_f64(double %a, double %b) #0 {
1723; CHECK-LABEL: fcmps_ogt_f64:
1724; CHECK:       // %bb.0:
1725; CHECK-NEXT:    fcmpe d0, d1
1726; CHECK-NEXT:    cset w0, gt
1727; CHECK-NEXT:    ret
1728  %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") #0
1729  %conv = zext i1 %cmp to i32
1730  ret i32 %conv
1731}
1732
1733define i32 @fcmps_oge_f64(double %a, double %b) #0 {
1734; CHECK-LABEL: fcmps_oge_f64:
1735; CHECK:       // %bb.0:
1736; CHECK-NEXT:    fcmpe d0, d1
1737; CHECK-NEXT:    cset w0, ge
1738; CHECK-NEXT:    ret
1739  %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") #0
1740  %conv = zext i1 %cmp to i32
1741  ret i32 %conv
1742}
1743
1744define i32 @fcmps_oeq_f64(double %a, double %b) #0 {
1745; CHECK-LABEL: fcmps_oeq_f64:
1746; CHECK:       // %bb.0:
1747; CHECK-NEXT:    fcmpe d0, d1
1748; CHECK-NEXT:    cset w0, eq
1749; CHECK-NEXT:    ret
1750  %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
1751  %conv = zext i1 %cmp to i32
1752  ret i32 %conv
1753}
1754
1755define i32 @fcmps_one_f64(double %a, double %b) #0 {
1756; CHECK-LABEL: fcmps_one_f64:
1757; CHECK:       // %bb.0:
1758; CHECK-NEXT:    fcmpe d0, d1
1759; CHECK-NEXT:    cset w8, mi
1760; CHECK-NEXT:    csinc w0, w8, wzr, le
1761; CHECK-NEXT:    ret
1762  %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") #0
1763  %conv = zext i1 %cmp to i32
1764  ret i32 %conv
1765}
1766
1767define i32 @fcmps_ult_f64(double %a, double %b) #0 {
1768; CHECK-LABEL: fcmps_ult_f64:
1769; CHECK:       // %bb.0:
1770; CHECK-NEXT:    fcmpe d0, d1
1771; CHECK-NEXT:    cset w0, lt
1772; CHECK-NEXT:    ret
1773  %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") #0
1774  %conv = zext i1 %cmp to i32
1775  ret i32 %conv
1776}
1777
1778define i32 @fcmps_ule_f64(double %a, double %b) #0 {
1779; CHECK-LABEL: fcmps_ule_f64:
1780; CHECK:       // %bb.0:
1781; CHECK-NEXT:    fcmpe d0, d1
1782; CHECK-NEXT:    cset w0, le
1783; CHECK-NEXT:    ret
1784  %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") #0
1785  %conv = zext i1 %cmp to i32
1786  ret i32 %conv
1787}
1788
1789define i32 @fcmps_ugt_f64(double %a, double %b) #0 {
1790; CHECK-LABEL: fcmps_ugt_f64:
1791; CHECK:       // %bb.0:
1792; CHECK-NEXT:    fcmpe d0, d1
1793; CHECK-NEXT:    cset w0, hi
1794; CHECK-NEXT:    ret
1795  %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") #0
1796  %conv = zext i1 %cmp to i32
1797  ret i32 %conv
1798}
1799
1800define i32 @fcmps_uge_f64(double %a, double %b) #0 {
1801; CHECK-LABEL: fcmps_uge_f64:
1802; CHECK:       // %bb.0:
1803; CHECK-NEXT:    fcmpe d0, d1
1804; CHECK-NEXT:    cset w0, pl
1805; CHECK-NEXT:    ret
1806  %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") #0
1807  %conv = zext i1 %cmp to i32
1808  ret i32 %conv
1809}
1810
1811define i32 @fcmps_ueq_f64(double %a, double %b) #0 {
1812; CHECK-LABEL: fcmps_ueq_f64:
1813; CHECK:       // %bb.0:
1814; CHECK-NEXT:    fcmpe d0, d1
1815; CHECK-NEXT:    cset w8, eq
1816; CHECK-NEXT:    csinc w0, w8, wzr, vc
1817; CHECK-NEXT:    ret
1818  %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") #0
1819  %conv = zext i1 %cmp to i32
1820  ret i32 %conv
1821}
1822
1823define i32 @fcmps_une_f64(double %a, double %b) #0 {
1824; CHECK-LABEL: fcmps_une_f64:
1825; CHECK:       // %bb.0:
1826; CHECK-NEXT:    fcmpe d0, d1
1827; CHECK-NEXT:    cset w0, ne
1828; CHECK-NEXT:    ret
1829  %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") #0
1830  %conv = zext i1 %cmp to i32
1831  ret i32 %conv
1832}
1833
1834
1835; Long-double-precision intrinsics
1836
1837define fp128 @add_f128(fp128 %x, fp128 %y) #0 {
1838; CHECK-LABEL: add_f128:
1839; CHECK:       // %bb.0:
1840; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1841; CHECK-NEXT:    .cfi_def_cfa_offset 16
1842; CHECK-NEXT:    .cfi_offset w30, -16
1843; CHECK-NEXT:    bl __addtf3
1844; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1845; CHECK-NEXT:    ret
1846  %val = call fp128 @llvm.experimental.constrained.fadd.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1847  ret fp128 %val
1848}
1849
1850define fp128 @sub_f128(fp128 %x, fp128 %y) #0 {
1851; CHECK-LABEL: sub_f128:
1852; CHECK:       // %bb.0:
1853; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1854; CHECK-NEXT:    .cfi_def_cfa_offset 16
1855; CHECK-NEXT:    .cfi_offset w30, -16
1856; CHECK-NEXT:    bl __subtf3
1857; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1858; CHECK-NEXT:    ret
1859  %val = call fp128 @llvm.experimental.constrained.fsub.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1860  ret fp128 %val
1861}
1862
1863define fp128 @mul_f128(fp128 %x, fp128 %y) #0 {
1864; CHECK-LABEL: mul_f128:
1865; CHECK:       // %bb.0:
1866; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1867; CHECK-NEXT:    .cfi_def_cfa_offset 16
1868; CHECK-NEXT:    .cfi_offset w30, -16
1869; CHECK-NEXT:    bl __multf3
1870; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1871; CHECK-NEXT:    ret
1872  %val = call fp128 @llvm.experimental.constrained.fmul.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1873  ret fp128 %val
1874}
1875
1876define fp128 @div_f128(fp128 %x, fp128 %y) #0 {
1877; CHECK-LABEL: div_f128:
1878; CHECK:       // %bb.0:
1879; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1880; CHECK-NEXT:    .cfi_def_cfa_offset 16
1881; CHECK-NEXT:    .cfi_offset w30, -16
1882; CHECK-NEXT:    bl __divtf3
1883; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1884; CHECK-NEXT:    ret
1885  %val = call fp128 @llvm.experimental.constrained.fdiv.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1886  ret fp128 %val
1887}
1888
1889define fp128 @frem_f128(fp128 %x, fp128 %y) #0 {
1890; CHECK-LABEL: frem_f128:
1891; CHECK:       // %bb.0:
1892; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1893; CHECK-NEXT:    .cfi_def_cfa_offset 16
1894; CHECK-NEXT:    .cfi_offset w30, -16
1895; CHECK-NEXT:    bl fmodl
1896; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1897; CHECK-NEXT:    ret
1898  %val = call fp128 @llvm.experimental.constrained.frem.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1899  ret fp128 %val
1900}
1901
1902define fp128 @fma_f128(fp128 %x, fp128 %y, fp128 %z) #0 {
1903; CHECK-LABEL: fma_f128:
1904; CHECK:       // %bb.0:
1905; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1906; CHECK-NEXT:    .cfi_def_cfa_offset 16
1907; CHECK-NEXT:    .cfi_offset w30, -16
1908; CHECK-NEXT:    bl fmal
1909; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1910; CHECK-NEXT:    ret
1911  %val = call fp128 @llvm.experimental.constrained.fma.f128(fp128 %x, fp128 %y, fp128 %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1912  ret fp128 %val
1913}
1914
1915define i32 @fptosi_i32_f128(fp128 %x) #0 {
1916; CHECK-LABEL: fptosi_i32_f128:
1917; CHECK:       // %bb.0:
1918; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1919; CHECK-NEXT:    .cfi_def_cfa_offset 16
1920; CHECK-NEXT:    .cfi_offset w30, -16
1921; CHECK-NEXT:    bl __fixtfsi
1922; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1923; CHECK-NEXT:    ret
1924  %val = call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0
1925  ret i32 %val
1926}
1927
1928define i32 @fptoui_i32_f128(fp128 %x) #0 {
1929; CHECK-LABEL: fptoui_i32_f128:
1930; CHECK:       // %bb.0:
1931; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1932; CHECK-NEXT:    .cfi_def_cfa_offset 16
1933; CHECK-NEXT:    .cfi_offset w30, -16
1934; CHECK-NEXT:    bl __fixunstfsi
1935; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1936; CHECK-NEXT:    ret
1937  %val = call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0
1938  ret i32 %val
1939}
1940
1941define i64 @fptosi_i64_f128(fp128 %x) #0 {
1942; CHECK-LABEL: fptosi_i64_f128:
1943; CHECK:       // %bb.0:
1944; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1945; CHECK-NEXT:    .cfi_def_cfa_offset 16
1946; CHECK-NEXT:    .cfi_offset w30, -16
1947; CHECK-NEXT:    bl __fixtfdi
1948; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1949; CHECK-NEXT:    ret
1950  %val = call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0
1951  ret i64 %val
1952}
1953
1954define i64 @fptoui_i64_f128(fp128 %x) #0 {
1955; CHECK-LABEL: fptoui_i64_f128:
1956; CHECK:       // %bb.0:
1957; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1958; CHECK-NEXT:    .cfi_def_cfa_offset 16
1959; CHECK-NEXT:    .cfi_offset w30, -16
1960; CHECK-NEXT:    bl __fixunstfdi
1961; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1962; CHECK-NEXT:    ret
1963  %val = call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0
1964  ret i64 %val
1965}
1966
1967define fp128 @sitofp_f128_i32(i32 %x) #0 {
1968; CHECK-LABEL: sitofp_f128_i32:
1969; CHECK:       // %bb.0:
1970; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1971; CHECK-NEXT:    .cfi_def_cfa_offset 16
1972; CHECK-NEXT:    .cfi_offset w30, -16
1973; CHECK-NEXT:    bl __floatsitf
1974; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1975; CHECK-NEXT:    ret
1976  %val = call fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1977  ret fp128 %val
1978}
1979
1980define fp128 @uitofp_f128_i32(i32 %x) #0 {
1981; CHECK-LABEL: uitofp_f128_i32:
1982; CHECK:       // %bb.0:
1983; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1984; CHECK-NEXT:    .cfi_def_cfa_offset 16
1985; CHECK-NEXT:    .cfi_offset w30, -16
1986; CHECK-NEXT:    bl __floatunsitf
1987; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
1988; CHECK-NEXT:    ret
1989  %val = call fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
1990  ret fp128 %val
1991}
1992
1993define fp128 @sitofp_f128_i64(i64 %x) #0 {
1994; CHECK-LABEL: sitofp_f128_i64:
1995; CHECK:       // %bb.0:
1996; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
1997; CHECK-NEXT:    .cfi_def_cfa_offset 16
1998; CHECK-NEXT:    .cfi_offset w30, -16
1999; CHECK-NEXT:    bl __floatditf
2000; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2001; CHECK-NEXT:    ret
2002  %val = call fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2003  ret fp128 %val
2004}
2005
2006define fp128 @uitofp_f128_i64(i64 %x) #0 {
2007; CHECK-LABEL: uitofp_f128_i64:
2008; CHECK:       // %bb.0:
2009; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2010; CHECK-NEXT:    .cfi_def_cfa_offset 16
2011; CHECK-NEXT:    .cfi_offset w30, -16
2012; CHECK-NEXT:    bl __floatunditf
2013; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2014; CHECK-NEXT:    ret
2015  %val = call fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2016  ret fp128 %val
2017}
2018
2019define fp128 @sitofp_f128_i128(i128 %x) #0 {
2020; CHECK-LABEL: sitofp_f128_i128:
2021; CHECK:       // %bb.0:
2022; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2023; CHECK-NEXT:    .cfi_def_cfa_offset 16
2024; CHECK-NEXT:    .cfi_offset w30, -16
2025; CHECK-NEXT:    bl __floattitf
2026; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2027; CHECK-NEXT:    ret
2028  %val = call fp128 @llvm.experimental.constrained.sitofp.f128.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2029  ret fp128 %val
2030}
2031
2032define fp128 @uitofp_f128_i128(i128 %x) #0 {
2033; CHECK-LABEL: uitofp_f128_i128:
2034; CHECK:       // %bb.0:
2035; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2036; CHECK-NEXT:    .cfi_def_cfa_offset 16
2037; CHECK-NEXT:    .cfi_offset w30, -16
2038; CHECK-NEXT:    bl __floatuntitf
2039; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2040; CHECK-NEXT:    ret
2041  %val = call fp128 @llvm.experimental.constrained.uitofp.f128.i128(i128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2042  ret fp128 %val
2043}
2044
2045define fp128 @sqrt_f128(fp128 %x) #0 {
2046; CHECK-LABEL: sqrt_f128:
2047; CHECK:       // %bb.0:
2048; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2049; CHECK-NEXT:    .cfi_def_cfa_offset 16
2050; CHECK-NEXT:    .cfi_offset w30, -16
2051; CHECK-NEXT:    bl sqrtl
2052; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2053; CHECK-NEXT:    ret
2054  %val = call fp128 @llvm.experimental.constrained.sqrt.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2055  ret fp128 %val
2056}
2057
2058define fp128 @powi_f128(fp128 %x, i32 %y) #0 {
2059; CHECK-LABEL: powi_f128:
2060; CHECK:       // %bb.0:
2061; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2062; CHECK-NEXT:    .cfi_def_cfa_offset 16
2063; CHECK-NEXT:    .cfi_offset w30, -16
2064; CHECK-NEXT:    bl __powitf2
2065; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2066; CHECK-NEXT:    ret
2067  %val = call fp128 @llvm.experimental.constrained.powi.f128(fp128 %x, i32 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2068  ret fp128 %val
2069}
2070
2071define fp128 @sin_f128(fp128 %x) #0 {
2072; CHECK-LABEL: sin_f128:
2073; CHECK:       // %bb.0:
2074; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2075; CHECK-NEXT:    .cfi_def_cfa_offset 16
2076; CHECK-NEXT:    .cfi_offset w30, -16
2077; CHECK-NEXT:    bl sinl
2078; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2079; CHECK-NEXT:    ret
2080  %val = call fp128 @llvm.experimental.constrained.sin.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2081  ret fp128 %val
2082}
2083
2084define fp128 @cos_f128(fp128 %x) #0 {
2085; CHECK-LABEL: cos_f128:
2086; CHECK:       // %bb.0:
2087; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2088; CHECK-NEXT:    .cfi_def_cfa_offset 16
2089; CHECK-NEXT:    .cfi_offset w30, -16
2090; CHECK-NEXT:    bl cosl
2091; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2092; CHECK-NEXT:    ret
2093  %val = call fp128 @llvm.experimental.constrained.cos.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2094  ret fp128 %val
2095}
2096
2097define fp128 @tan_f128(fp128 %x) #0 {
2098; CHECK-LABEL: tan_f128:
2099; CHECK:       // %bb.0:
2100; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2101; CHECK-NEXT:    .cfi_def_cfa_offset 16
2102; CHECK-NEXT:    .cfi_offset w30, -16
2103; CHECK-NEXT:    bl tanl
2104; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2105; CHECK-NEXT:    ret
2106  %val = call fp128 @llvm.experimental.constrained.tan.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2107  ret fp128 %val
2108}
2109
2110define fp128 @asin_f128(fp128 %x) #0 {
2111; CHECK-LABEL: asin_f128:
2112; CHECK:       // %bb.0:
2113; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2114; CHECK-NEXT:    .cfi_def_cfa_offset 16
2115; CHECK-NEXT:    .cfi_offset w30, -16
2116; CHECK-NEXT:    bl asinl
2117; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2118; CHECK-NEXT:    ret
2119  %val = call fp128 @llvm.experimental.constrained.asin.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2120  ret fp128 %val
2121}
2122
2123define fp128 @acos_f128(fp128 %x) #0 {
2124; CHECK-LABEL: acos_f128:
2125; CHECK:       // %bb.0:
2126; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2127; CHECK-NEXT:    .cfi_def_cfa_offset 16
2128; CHECK-NEXT:    .cfi_offset w30, -16
2129; CHECK-NEXT:    bl acosl
2130; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2131; CHECK-NEXT:    ret
2132  %val = call fp128 @llvm.experimental.constrained.acos.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2133  ret fp128 %val
2134}
2135
2136define fp128 @atan_f128(fp128 %x) #0 {
2137; CHECK-LABEL: atan_f128:
2138; CHECK:       // %bb.0:
2139; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2140; CHECK-NEXT:    .cfi_def_cfa_offset 16
2141; CHECK-NEXT:    .cfi_offset w30, -16
2142; CHECK-NEXT:    bl atanl
2143; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2144; CHECK-NEXT:    ret
2145  %val = call fp128 @llvm.experimental.constrained.atan.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2146  ret fp128 %val
2147}
2148
2149define fp128 @atan2_f128(fp128 %x, fp128 %y) #0 {
2150; CHECK-LABEL: atan2_f128:
2151; CHECK:       // %bb.0:
2152; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2153; CHECK-NEXT:    .cfi_def_cfa_offset 16
2154; CHECK-NEXT:    .cfi_offset w30, -16
2155; CHECK-NEXT:    bl atan2l
2156; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2157; CHECK-NEXT:    ret
2158  %val = call fp128 @llvm.experimental.constrained.atan2.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2159  ret fp128 %val
2160}
2161
2162define fp128 @sinh_f128(fp128 %x) #0 {
2163; CHECK-LABEL: sinh_f128:
2164; CHECK:       // %bb.0:
2165; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2166; CHECK-NEXT:    .cfi_def_cfa_offset 16
2167; CHECK-NEXT:    .cfi_offset w30, -16
2168; CHECK-NEXT:    bl sinhl
2169; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2170; CHECK-NEXT:    ret
2171  %val = call fp128 @llvm.experimental.constrained.sinh.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2172  ret fp128 %val
2173}
2174
2175define fp128 @cosh_f128(fp128 %x) #0 {
2176; CHECK-LABEL: cosh_f128:
2177; CHECK:       // %bb.0:
2178; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2179; CHECK-NEXT:    .cfi_def_cfa_offset 16
2180; CHECK-NEXT:    .cfi_offset w30, -16
2181; CHECK-NEXT:    bl coshl
2182; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2183; CHECK-NEXT:    ret
2184  %val = call fp128 @llvm.experimental.constrained.cosh.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2185  ret fp128 %val
2186}
2187
2188define fp128 @tanh_f128(fp128 %x) #0 {
2189; CHECK-LABEL: tanh_f128:
2190; CHECK:       // %bb.0:
2191; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2192; CHECK-NEXT:    .cfi_def_cfa_offset 16
2193; CHECK-NEXT:    .cfi_offset w30, -16
2194; CHECK-NEXT:    bl tanhl
2195; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2196; CHECK-NEXT:    ret
2197  %val = call fp128 @llvm.experimental.constrained.tanh.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2198  ret fp128 %val
2199}
2200
2201define fp128 @pow_f128(fp128 %x, fp128 %y) #0 {
2202; CHECK-LABEL: pow_f128:
2203; CHECK:       // %bb.0:
2204; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2205; CHECK-NEXT:    .cfi_def_cfa_offset 16
2206; CHECK-NEXT:    .cfi_offset w30, -16
2207; CHECK-NEXT:    bl powl
2208; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2209; CHECK-NEXT:    ret
2210  %val = call fp128 @llvm.experimental.constrained.pow.f128(fp128 %x, fp128 %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2211  ret fp128 %val
2212}
2213
2214define fp128 @log_f128(fp128 %x) #0 {
2215; CHECK-LABEL: log_f128:
2216; CHECK:       // %bb.0:
2217; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2218; CHECK-NEXT:    .cfi_def_cfa_offset 16
2219; CHECK-NEXT:    .cfi_offset w30, -16
2220; CHECK-NEXT:    bl logl
2221; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2222; CHECK-NEXT:    ret
2223  %val = call fp128 @llvm.experimental.constrained.log.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2224  ret fp128 %val
2225}
2226
2227define fp128 @log10_f128(fp128 %x) #0 {
2228; CHECK-LABEL: log10_f128:
2229; CHECK:       // %bb.0:
2230; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2231; CHECK-NEXT:    .cfi_def_cfa_offset 16
2232; CHECK-NEXT:    .cfi_offset w30, -16
2233; CHECK-NEXT:    bl log10l
2234; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2235; CHECK-NEXT:    ret
2236  %val = call fp128 @llvm.experimental.constrained.log10.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2237  ret fp128 %val
2238}
2239
2240define fp128 @log2_f128(fp128 %x) #0 {
2241; CHECK-LABEL: log2_f128:
2242; CHECK:       // %bb.0:
2243; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2244; CHECK-NEXT:    .cfi_def_cfa_offset 16
2245; CHECK-NEXT:    .cfi_offset w30, -16
2246; CHECK-NEXT:    bl log2l
2247; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2248; CHECK-NEXT:    ret
2249  %val = call fp128 @llvm.experimental.constrained.log2.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2250  ret fp128 %val
2251}
2252
2253define fp128 @exp_f128(fp128 %x) #0 {
2254; CHECK-LABEL: exp_f128:
2255; CHECK:       // %bb.0:
2256; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2257; CHECK-NEXT:    .cfi_def_cfa_offset 16
2258; CHECK-NEXT:    .cfi_offset w30, -16
2259; CHECK-NEXT:    bl expl
2260; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2261; CHECK-NEXT:    ret
2262  %val = call fp128 @llvm.experimental.constrained.exp.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2263  ret fp128 %val
2264}
2265
2266define fp128 @exp2_f128(fp128 %x) #0 {
2267; CHECK-LABEL: exp2_f128:
2268; CHECK:       // %bb.0:
2269; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2270; CHECK-NEXT:    .cfi_def_cfa_offset 16
2271; CHECK-NEXT:    .cfi_offset w30, -16
2272; CHECK-NEXT:    bl exp2l
2273; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2274; CHECK-NEXT:    ret
2275  %val = call fp128 @llvm.experimental.constrained.exp2.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2276  ret fp128 %val
2277}
2278
2279define fp128 @rint_f128(fp128 %x) #0 {
2280; CHECK-LABEL: rint_f128:
2281; CHECK:       // %bb.0:
2282; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2283; CHECK-NEXT:    .cfi_def_cfa_offset 16
2284; CHECK-NEXT:    .cfi_offset w30, -16
2285; CHECK-NEXT:    bl rintl
2286; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2287; CHECK-NEXT:    ret
2288  %val = call fp128 @llvm.experimental.constrained.rint.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2289  ret fp128 %val
2290}
2291
2292define fp128 @nearbyint_f128(fp128 %x) #0 {
2293; CHECK-LABEL: nearbyint_f128:
2294; CHECK:       // %bb.0:
2295; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2296; CHECK-NEXT:    .cfi_def_cfa_offset 16
2297; CHECK-NEXT:    .cfi_offset w30, -16
2298; CHECK-NEXT:    bl nearbyintl
2299; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2300; CHECK-NEXT:    ret
2301  %val = call fp128 @llvm.experimental.constrained.nearbyint.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2302  ret fp128 %val
2303}
2304
2305define i32 @lrint_f128(fp128 %x) #0 {
2306; CHECK-LABEL: lrint_f128:
2307; CHECK:       // %bb.0:
2308; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2309; CHECK-NEXT:    .cfi_def_cfa_offset 16
2310; CHECK-NEXT:    .cfi_offset w30, -16
2311; CHECK-NEXT:    bl lrintl
2312; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2313; CHECK-NEXT:    ret
2314  %val = call i32 @llvm.experimental.constrained.lrint.i32.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2315  ret i32 %val
2316}
2317
2318define i64 @llrint_f128(fp128 %x) #0 {
2319; CHECK-LABEL: llrint_f128:
2320; CHECK:       // %bb.0:
2321; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2322; CHECK-NEXT:    .cfi_def_cfa_offset 16
2323; CHECK-NEXT:    .cfi_offset w30, -16
2324; CHECK-NEXT:    bl llrintl
2325; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2326; CHECK-NEXT:    ret
2327  %val = call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2328  ret i64 %val
2329}
2330
2331define fp128 @maxnum_f128(fp128 %x, fp128 %y) #0 {
2332; CHECK-LABEL: maxnum_f128:
2333; CHECK:       // %bb.0:
2334; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2335; CHECK-NEXT:    .cfi_def_cfa_offset 16
2336; CHECK-NEXT:    .cfi_offset w30, -16
2337; CHECK-NEXT:    bl fmaxl
2338; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2339; CHECK-NEXT:    ret
2340  %val = call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %x, fp128 %y, metadata !"fpexcept.strict") #0
2341  ret fp128 %val
2342}
2343
2344define fp128 @minnum_f128(fp128 %x, fp128 %y) #0 {
2345; CHECK-LABEL: minnum_f128:
2346; CHECK:       // %bb.0:
2347; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2348; CHECK-NEXT:    .cfi_def_cfa_offset 16
2349; CHECK-NEXT:    .cfi_offset w30, -16
2350; CHECK-NEXT:    bl fminl
2351; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2352; CHECK-NEXT:    ret
2353  %val = call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %x, fp128 %y, metadata !"fpexcept.strict") #0
2354  ret fp128 %val
2355}
2356
2357define fp128 @ceil_f128(fp128 %x) #0 {
2358; CHECK-LABEL: ceil_f128:
2359; CHECK:       // %bb.0:
2360; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2361; CHECK-NEXT:    .cfi_def_cfa_offset 16
2362; CHECK-NEXT:    .cfi_offset w30, -16
2363; CHECK-NEXT:    bl ceill
2364; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2365; CHECK-NEXT:    ret
2366  %val = call fp128 @llvm.experimental.constrained.ceil.f128(fp128 %x, metadata !"fpexcept.strict") #0
2367  ret fp128 %val
2368}
2369
2370define fp128 @floor_f128(fp128 %x) #0 {
2371; CHECK-LABEL: floor_f128:
2372; CHECK:       // %bb.0:
2373; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2374; CHECK-NEXT:    .cfi_def_cfa_offset 16
2375; CHECK-NEXT:    .cfi_offset w30, -16
2376; CHECK-NEXT:    bl floorl
2377; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2378; CHECK-NEXT:    ret
2379  %val = call fp128 @llvm.experimental.constrained.floor.f128(fp128 %x, metadata !"fpexcept.strict") #0
2380  ret fp128 %val
2381}
2382
2383define i32 @lround_f128(fp128 %x) #0 {
2384; CHECK-LABEL: lround_f128:
2385; CHECK:       // %bb.0:
2386; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2387; CHECK-NEXT:    .cfi_def_cfa_offset 16
2388; CHECK-NEXT:    .cfi_offset w30, -16
2389; CHECK-NEXT:    bl lroundl
2390; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2391; CHECK-NEXT:    ret
2392  %val = call i32 @llvm.experimental.constrained.lround.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0
2393  ret i32 %val
2394}
2395
2396define i64 @llround_f128(fp128 %x) #0 {
2397; CHECK-LABEL: llround_f128:
2398; CHECK:       // %bb.0:
2399; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2400; CHECK-NEXT:    .cfi_def_cfa_offset 16
2401; CHECK-NEXT:    .cfi_offset w30, -16
2402; CHECK-NEXT:    bl llroundl
2403; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2404; CHECK-NEXT:    ret
2405  %val = call i64 @llvm.experimental.constrained.llround.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0
2406  ret i64 %val
2407}
2408
2409define fp128 @round_f128(fp128 %x) #0 {
2410; CHECK-LABEL: round_f128:
2411; CHECK:       // %bb.0:
2412; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2413; CHECK-NEXT:    .cfi_def_cfa_offset 16
2414; CHECK-NEXT:    .cfi_offset w30, -16
2415; CHECK-NEXT:    bl roundl
2416; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2417; CHECK-NEXT:    ret
2418  %val = call fp128 @llvm.experimental.constrained.round.f128(fp128 %x, metadata !"fpexcept.strict") #0
2419  ret fp128 %val
2420}
2421
2422define fp128 @trunc_f128(fp128 %x) #0 {
2423; CHECK-LABEL: trunc_f128:
2424; CHECK:       // %bb.0:
2425; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2426; CHECK-NEXT:    .cfi_def_cfa_offset 16
2427; CHECK-NEXT:    .cfi_offset w30, -16
2428; CHECK-NEXT:    bl truncl
2429; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2430; CHECK-NEXT:    ret
2431  %val = call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %x, metadata !"fpexcept.strict") #0
2432  ret fp128 %val
2433}
2434
2435define i32 @fcmp_olt_f128(fp128 %a, fp128 %b) #0 {
2436; CHECK-LABEL: fcmp_olt_f128:
2437; CHECK:       // %bb.0:
2438; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2439; CHECK-NEXT:    .cfi_def_cfa_offset 16
2440; CHECK-NEXT:    .cfi_offset w30, -16
2441; CHECK-NEXT:    bl __lttf2
2442; CHECK-NEXT:    cmp w0, #0
2443; CHECK-NEXT:    cset w0, lt
2444; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2445; CHECK-NEXT:    ret
2446  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"olt", metadata !"fpexcept.strict") #0
2447  %conv = zext i1 %cmp to i32
2448  ret i32 %conv
2449}
2450
2451define i32 @fcmp_ole_f128(fp128 %a, fp128 %b) #0 {
2452; CHECK-LABEL: fcmp_ole_f128:
2453; CHECK:       // %bb.0:
2454; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2455; CHECK-NEXT:    .cfi_def_cfa_offset 16
2456; CHECK-NEXT:    .cfi_offset w30, -16
2457; CHECK-NEXT:    bl __letf2
2458; CHECK-NEXT:    cmp w0, #0
2459; CHECK-NEXT:    cset w0, le
2460; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2461; CHECK-NEXT:    ret
2462  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ole", metadata !"fpexcept.strict") #0
2463  %conv = zext i1 %cmp to i32
2464  ret i32 %conv
2465}
2466
2467define i32 @fcmp_ogt_f128(fp128 %a, fp128 %b) #0 {
2468; CHECK-LABEL: fcmp_ogt_f128:
2469; CHECK:       // %bb.0:
2470; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2471; CHECK-NEXT:    .cfi_def_cfa_offset 16
2472; CHECK-NEXT:    .cfi_offset w30, -16
2473; CHECK-NEXT:    bl __gttf2
2474; CHECK-NEXT:    cmp w0, #0
2475; CHECK-NEXT:    cset w0, gt
2476; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2477; CHECK-NEXT:    ret
2478  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ogt", metadata !"fpexcept.strict") #0
2479  %conv = zext i1 %cmp to i32
2480  ret i32 %conv
2481}
2482
2483define i32 @fcmp_oge_f128(fp128 %a, fp128 %b) #0 {
2484; CHECK-LABEL: fcmp_oge_f128:
2485; CHECK:       // %bb.0:
2486; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2487; CHECK-NEXT:    .cfi_def_cfa_offset 16
2488; CHECK-NEXT:    .cfi_offset w30, -16
2489; CHECK-NEXT:    bl __getf2
2490; CHECK-NEXT:    cmp w0, #0
2491; CHECK-NEXT:    cset w0, ge
2492; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2493; CHECK-NEXT:    ret
2494  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"oge", metadata !"fpexcept.strict") #0
2495  %conv = zext i1 %cmp to i32
2496  ret i32 %conv
2497}
2498
2499define i32 @fcmp_oeq_f128(fp128 %a, fp128 %b) #0 {
2500; CHECK-LABEL: fcmp_oeq_f128:
2501; CHECK:       // %bb.0:
2502; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2503; CHECK-NEXT:    .cfi_def_cfa_offset 16
2504; CHECK-NEXT:    .cfi_offset w30, -16
2505; CHECK-NEXT:    bl __eqtf2
2506; CHECK-NEXT:    cmp w0, #0
2507; CHECK-NEXT:    cset w0, eq
2508; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2509; CHECK-NEXT:    ret
2510  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"oeq", metadata !"fpexcept.strict") #0
2511  %conv = zext i1 %cmp to i32
2512  ret i32 %conv
2513}
2514
2515define i32 @fcmp_one_f128(fp128 %a, fp128 %b) #0 {
2516; CHECK-LABEL: fcmp_one_f128:
2517; CHECK:       // %bb.0:
2518; CHECK-NEXT:    sub sp, sp, #48
2519; CHECK-NEXT:    stp x30, x19, [sp, #32] // 16-byte Folded Spill
2520; CHECK-NEXT:    .cfi_def_cfa_offset 48
2521; CHECK-NEXT:    .cfi_offset w19, -8
2522; CHECK-NEXT:    .cfi_offset w30, -16
2523; CHECK-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
2524; CHECK-NEXT:    bl __eqtf2
2525; CHECK-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
2526; CHECK-NEXT:    mov w19, w0
2527; CHECK-NEXT:    bl __unordtf2
2528; CHECK-NEXT:    cmp w0, #0
2529; CHECK-NEXT:    ccmp w19, #0, #4, eq
2530; CHECK-NEXT:    ldp x30, x19, [sp, #32] // 16-byte Folded Reload
2531; CHECK-NEXT:    cset w0, ne
2532; CHECK-NEXT:    add sp, sp, #48
2533; CHECK-NEXT:    ret
2534  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"one", metadata !"fpexcept.strict") #0
2535  %conv = zext i1 %cmp to i32
2536  ret i32 %conv
2537}
2538
2539define i32 @fcmp_ult_f128(fp128 %a, fp128 %b) #0 {
2540; CHECK-LABEL: fcmp_ult_f128:
2541; CHECK:       // %bb.0:
2542; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2543; CHECK-NEXT:    .cfi_def_cfa_offset 16
2544; CHECK-NEXT:    .cfi_offset w30, -16
2545; CHECK-NEXT:    bl __getf2
2546; CHECK-NEXT:    cmp w0, #0
2547; CHECK-NEXT:    cset w0, lt
2548; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2549; CHECK-NEXT:    ret
2550  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ult", metadata !"fpexcept.strict") #0
2551  %conv = zext i1 %cmp to i32
2552  ret i32 %conv
2553}
2554
2555define i32 @fcmp_ule_f128(fp128 %a, fp128 %b) #0 {
2556; CHECK-LABEL: fcmp_ule_f128:
2557; CHECK:       // %bb.0:
2558; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2559; CHECK-NEXT:    .cfi_def_cfa_offset 16
2560; CHECK-NEXT:    .cfi_offset w30, -16
2561; CHECK-NEXT:    bl __gttf2
2562; CHECK-NEXT:    cmp w0, #0
2563; CHECK-NEXT:    cset w0, le
2564; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2565; CHECK-NEXT:    ret
2566  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ule", metadata !"fpexcept.strict") #0
2567  %conv = zext i1 %cmp to i32
2568  ret i32 %conv
2569}
2570
2571define i32 @fcmp_ugt_f128(fp128 %a, fp128 %b) #0 {
2572; CHECK-LABEL: fcmp_ugt_f128:
2573; CHECK:       // %bb.0:
2574; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2575; CHECK-NEXT:    .cfi_def_cfa_offset 16
2576; CHECK-NEXT:    .cfi_offset w30, -16
2577; CHECK-NEXT:    bl __letf2
2578; CHECK-NEXT:    cmp w0, #0
2579; CHECK-NEXT:    cset w0, gt
2580; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2581; CHECK-NEXT:    ret
2582  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ugt", metadata !"fpexcept.strict") #0
2583  %conv = zext i1 %cmp to i32
2584  ret i32 %conv
2585}
2586
2587define i32 @fcmp_uge_f128(fp128 %a, fp128 %b) #0 {
2588; CHECK-LABEL: fcmp_uge_f128:
2589; CHECK:       // %bb.0:
2590; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2591; CHECK-NEXT:    .cfi_def_cfa_offset 16
2592; CHECK-NEXT:    .cfi_offset w30, -16
2593; CHECK-NEXT:    bl __lttf2
2594; CHECK-NEXT:    cmp w0, #0
2595; CHECK-NEXT:    cset w0, ge
2596; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2597; CHECK-NEXT:    ret
2598  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"uge", metadata !"fpexcept.strict") #0
2599  %conv = zext i1 %cmp to i32
2600  ret i32 %conv
2601}
2602
2603define i32 @fcmp_ueq_f128(fp128 %a, fp128 %b) #0 {
2604; CHECK-LABEL: fcmp_ueq_f128:
2605; CHECK:       // %bb.0:
2606; CHECK-NEXT:    sub sp, sp, #48
2607; CHECK-NEXT:    stp x30, x19, [sp, #32] // 16-byte Folded Spill
2608; CHECK-NEXT:    .cfi_def_cfa_offset 48
2609; CHECK-NEXT:    .cfi_offset w19, -8
2610; CHECK-NEXT:    .cfi_offset w30, -16
2611; CHECK-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
2612; CHECK-NEXT:    bl __eqtf2
2613; CHECK-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
2614; CHECK-NEXT:    mov w19, w0
2615; CHECK-NEXT:    bl __unordtf2
2616; CHECK-NEXT:    cmp w0, #0
2617; CHECK-NEXT:    ccmp w19, #0, #4, eq
2618; CHECK-NEXT:    ldp x30, x19, [sp, #32] // 16-byte Folded Reload
2619; CHECK-NEXT:    cset w0, eq
2620; CHECK-NEXT:    add sp, sp, #48
2621; CHECK-NEXT:    ret
2622  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ueq", metadata !"fpexcept.strict") #0
2623  %conv = zext i1 %cmp to i32
2624  ret i32 %conv
2625}
2626
2627define i32 @fcmp_une_f128(fp128 %a, fp128 %b) #0 {
2628; CHECK-LABEL: fcmp_une_f128:
2629; CHECK:       // %bb.0:
2630; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2631; CHECK-NEXT:    .cfi_def_cfa_offset 16
2632; CHECK-NEXT:    .cfi_offset w30, -16
2633; CHECK-NEXT:    bl __netf2
2634; CHECK-NEXT:    cmp w0, #0
2635; CHECK-NEXT:    cset w0, ne
2636; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2637; CHECK-NEXT:    ret
2638  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"une", metadata !"fpexcept.strict") #0
2639  %conv = zext i1 %cmp to i32
2640  ret i32 %conv
2641}
2642
2643define i32 @fcmps_olt_f128(fp128 %a, fp128 %b) #0 {
2644; CHECK-LABEL: fcmps_olt_f128:
2645; CHECK:       // %bb.0:
2646; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2647; CHECK-NEXT:    .cfi_def_cfa_offset 16
2648; CHECK-NEXT:    .cfi_offset w30, -16
2649; CHECK-NEXT:    bl __lttf2
2650; CHECK-NEXT:    cmp w0, #0
2651; CHECK-NEXT:    cset w0, lt
2652; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2653; CHECK-NEXT:    ret
2654  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"olt", metadata !"fpexcept.strict") #0
2655  %conv = zext i1 %cmp to i32
2656  ret i32 %conv
2657}
2658
2659define i32 @fcmps_ole_f128(fp128 %a, fp128 %b) #0 {
2660; CHECK-LABEL: fcmps_ole_f128:
2661; CHECK:       // %bb.0:
2662; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2663; CHECK-NEXT:    .cfi_def_cfa_offset 16
2664; CHECK-NEXT:    .cfi_offset w30, -16
2665; CHECK-NEXT:    bl __letf2
2666; CHECK-NEXT:    cmp w0, #0
2667; CHECK-NEXT:    cset w0, le
2668; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2669; CHECK-NEXT:    ret
2670  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ole", metadata !"fpexcept.strict") #0
2671  %conv = zext i1 %cmp to i32
2672  ret i32 %conv
2673}
2674
2675define i32 @fcmps_ogt_f128(fp128 %a, fp128 %b) #0 {
2676; CHECK-LABEL: fcmps_ogt_f128:
2677; CHECK:       // %bb.0:
2678; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2679; CHECK-NEXT:    .cfi_def_cfa_offset 16
2680; CHECK-NEXT:    .cfi_offset w30, -16
2681; CHECK-NEXT:    bl __gttf2
2682; CHECK-NEXT:    cmp w0, #0
2683; CHECK-NEXT:    cset w0, gt
2684; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2685; CHECK-NEXT:    ret
2686  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ogt", metadata !"fpexcept.strict") #0
2687  %conv = zext i1 %cmp to i32
2688  ret i32 %conv
2689}
2690
2691define i32 @fcmps_oge_f128(fp128 %a, fp128 %b) #0 {
2692; CHECK-LABEL: fcmps_oge_f128:
2693; CHECK:       // %bb.0:
2694; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2695; CHECK-NEXT:    .cfi_def_cfa_offset 16
2696; CHECK-NEXT:    .cfi_offset w30, -16
2697; CHECK-NEXT:    bl __getf2
2698; CHECK-NEXT:    cmp w0, #0
2699; CHECK-NEXT:    cset w0, ge
2700; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2701; CHECK-NEXT:    ret
2702  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"oge", metadata !"fpexcept.strict") #0
2703  %conv = zext i1 %cmp to i32
2704  ret i32 %conv
2705}
2706
2707define i32 @fcmps_oeq_f128(fp128 %a, fp128 %b) #0 {
2708; CHECK-LABEL: fcmps_oeq_f128:
2709; CHECK:       // %bb.0:
2710; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2711; CHECK-NEXT:    .cfi_def_cfa_offset 16
2712; CHECK-NEXT:    .cfi_offset w30, -16
2713; CHECK-NEXT:    bl __eqtf2
2714; CHECK-NEXT:    cmp w0, #0
2715; CHECK-NEXT:    cset w0, eq
2716; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2717; CHECK-NEXT:    ret
2718  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"oeq", metadata !"fpexcept.strict") #0
2719  %conv = zext i1 %cmp to i32
2720  ret i32 %conv
2721}
2722
2723define i32 @fcmps_one_f128(fp128 %a, fp128 %b) #0 {
2724; CHECK-LABEL: fcmps_one_f128:
2725; CHECK:       // %bb.0:
2726; CHECK-NEXT:    sub sp, sp, #48
2727; CHECK-NEXT:    stp x30, x19, [sp, #32] // 16-byte Folded Spill
2728; CHECK-NEXT:    .cfi_def_cfa_offset 48
2729; CHECK-NEXT:    .cfi_offset w19, -8
2730; CHECK-NEXT:    .cfi_offset w30, -16
2731; CHECK-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
2732; CHECK-NEXT:    bl __eqtf2
2733; CHECK-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
2734; CHECK-NEXT:    mov w19, w0
2735; CHECK-NEXT:    bl __unordtf2
2736; CHECK-NEXT:    cmp w0, #0
2737; CHECK-NEXT:    ccmp w19, #0, #4, eq
2738; CHECK-NEXT:    ldp x30, x19, [sp, #32] // 16-byte Folded Reload
2739; CHECK-NEXT:    cset w0, ne
2740; CHECK-NEXT:    add sp, sp, #48
2741; CHECK-NEXT:    ret
2742  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"one", metadata !"fpexcept.strict") #0
2743  %conv = zext i1 %cmp to i32
2744  ret i32 %conv
2745}
2746
2747define i32 @fcmps_ult_f128(fp128 %a, fp128 %b) #0 {
2748; CHECK-LABEL: fcmps_ult_f128:
2749; CHECK:       // %bb.0:
2750; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2751; CHECK-NEXT:    .cfi_def_cfa_offset 16
2752; CHECK-NEXT:    .cfi_offset w30, -16
2753; CHECK-NEXT:    bl __getf2
2754; CHECK-NEXT:    cmp w0, #0
2755; CHECK-NEXT:    cset w0, lt
2756; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2757; CHECK-NEXT:    ret
2758  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ult", metadata !"fpexcept.strict") #0
2759  %conv = zext i1 %cmp to i32
2760  ret i32 %conv
2761}
2762
2763define i32 @fcmps_ule_f128(fp128 %a, fp128 %b) #0 {
2764; CHECK-LABEL: fcmps_ule_f128:
2765; CHECK:       // %bb.0:
2766; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2767; CHECK-NEXT:    .cfi_def_cfa_offset 16
2768; CHECK-NEXT:    .cfi_offset w30, -16
2769; CHECK-NEXT:    bl __gttf2
2770; CHECK-NEXT:    cmp w0, #0
2771; CHECK-NEXT:    cset w0, le
2772; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2773; CHECK-NEXT:    ret
2774  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ule", metadata !"fpexcept.strict") #0
2775  %conv = zext i1 %cmp to i32
2776  ret i32 %conv
2777}
2778
2779define i32 @fcmps_ugt_f128(fp128 %a, fp128 %b) #0 {
2780; CHECK-LABEL: fcmps_ugt_f128:
2781; CHECK:       // %bb.0:
2782; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2783; CHECK-NEXT:    .cfi_def_cfa_offset 16
2784; CHECK-NEXT:    .cfi_offset w30, -16
2785; CHECK-NEXT:    bl __letf2
2786; CHECK-NEXT:    cmp w0, #0
2787; CHECK-NEXT:    cset w0, gt
2788; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2789; CHECK-NEXT:    ret
2790  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ugt", metadata !"fpexcept.strict") #0
2791  %conv = zext i1 %cmp to i32
2792  ret i32 %conv
2793}
2794
2795define i32 @fcmps_uge_f128(fp128 %a, fp128 %b) #0 {
2796; CHECK-LABEL: fcmps_uge_f128:
2797; CHECK:       // %bb.0:
2798; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2799; CHECK-NEXT:    .cfi_def_cfa_offset 16
2800; CHECK-NEXT:    .cfi_offset w30, -16
2801; CHECK-NEXT:    bl __lttf2
2802; CHECK-NEXT:    cmp w0, #0
2803; CHECK-NEXT:    cset w0, ge
2804; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2805; CHECK-NEXT:    ret
2806  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"uge", metadata !"fpexcept.strict") #0
2807  %conv = zext i1 %cmp to i32
2808  ret i32 %conv
2809}
2810
2811define i32 @fcmps_ueq_f128(fp128 %a, fp128 %b) #0 {
2812; CHECK-LABEL: fcmps_ueq_f128:
2813; CHECK:       // %bb.0:
2814; CHECK-NEXT:    sub sp, sp, #48
2815; CHECK-NEXT:    stp x30, x19, [sp, #32] // 16-byte Folded Spill
2816; CHECK-NEXT:    .cfi_def_cfa_offset 48
2817; CHECK-NEXT:    .cfi_offset w19, -8
2818; CHECK-NEXT:    .cfi_offset w30, -16
2819; CHECK-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
2820; CHECK-NEXT:    bl __eqtf2
2821; CHECK-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
2822; CHECK-NEXT:    mov w19, w0
2823; CHECK-NEXT:    bl __unordtf2
2824; CHECK-NEXT:    cmp w0, #0
2825; CHECK-NEXT:    ccmp w19, #0, #4, eq
2826; CHECK-NEXT:    ldp x30, x19, [sp, #32] // 16-byte Folded Reload
2827; CHECK-NEXT:    cset w0, eq
2828; CHECK-NEXT:    add sp, sp, #48
2829; CHECK-NEXT:    ret
2830  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ueq", metadata !"fpexcept.strict") #0
2831  %conv = zext i1 %cmp to i32
2832  ret i32 %conv
2833}
2834
2835define i32 @fcmps_une_f128(fp128 %a, fp128 %b) #0 {
2836; CHECK-LABEL: fcmps_une_f128:
2837; CHECK:       // %bb.0:
2838; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2839; CHECK-NEXT:    .cfi_def_cfa_offset 16
2840; CHECK-NEXT:    .cfi_offset w30, -16
2841; CHECK-NEXT:    bl __netf2
2842; CHECK-NEXT:    cmp w0, #0
2843; CHECK-NEXT:    cset w0, ne
2844; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2845; CHECK-NEXT:    ret
2846  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"une", metadata !"fpexcept.strict") #0
2847  %conv = zext i1 %cmp to i32
2848  ret i32 %conv
2849}
2850
2851
2852; Intrinsics to convert between floating-point types
2853
2854define float @fptrunc_f32_f64(double %x) #0 {
2855; CHECK-LABEL: fptrunc_f32_f64:
2856; CHECK:       // %bb.0:
2857; CHECK-NEXT:    fcvt s0, d0
2858; CHECK-NEXT:    ret
2859  %val = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2860  ret float %val
2861}
2862
2863define float @fptrunc_f32_f128(fp128 %x) #0 {
2864; CHECK-LABEL: fptrunc_f32_f128:
2865; CHECK:       // %bb.0:
2866; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2867; CHECK-NEXT:    .cfi_def_cfa_offset 16
2868; CHECK-NEXT:    .cfi_offset w30, -16
2869; CHECK-NEXT:    bl __trunctfsf2
2870; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2871; CHECK-NEXT:    ret
2872  %val = call float @llvm.experimental.constrained.fptrunc.f32.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2873  ret float %val
2874}
2875
2876define double @fptrunc_f64_f128(fp128 %x) #0 {
2877; CHECK-LABEL: fptrunc_f64_f128:
2878; CHECK:       // %bb.0:
2879; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2880; CHECK-NEXT:    .cfi_def_cfa_offset 16
2881; CHECK-NEXT:    .cfi_offset w30, -16
2882; CHECK-NEXT:    bl __trunctfdf2
2883; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2884; CHECK-NEXT:    ret
2885  %val = call double @llvm.experimental.constrained.fptrunc.f64.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2886  ret double %val
2887}
2888
2889define double @fpext_f64_f32(float %x) #0 {
2890; CHECK-LABEL: fpext_f64_f32:
2891; CHECK:       // %bb.0:
2892; CHECK-NEXT:    fcvt d0, s0
2893; CHECK-NEXT:    ret
2894  %val = call double @llvm.experimental.constrained.fpext.f64.f32(float %x, metadata !"fpexcept.strict") #0
2895  ret double %val
2896}
2897
2898define fp128 @fpext_f128_f32(float %x) #0 {
2899; CHECK-LABEL: fpext_f128_f32:
2900; CHECK:       // %bb.0:
2901; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2902; CHECK-NEXT:    .cfi_def_cfa_offset 16
2903; CHECK-NEXT:    .cfi_offset w30, -16
2904; CHECK-NEXT:    bl __extendsftf2
2905; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2906; CHECK-NEXT:    ret
2907  %val = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %x, metadata !"fpexcept.strict") #0
2908  ret fp128 %val
2909}
2910
2911define fp128 @fpext_f128_f64(double %x) #0 {
2912; CHECK-LABEL: fpext_f128_f64:
2913; CHECK:       // %bb.0:
2914; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2915; CHECK-NEXT:    .cfi_def_cfa_offset 16
2916; CHECK-NEXT:    .cfi_offset w30, -16
2917; CHECK-NEXT:    bl __extenddftf2
2918; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2919; CHECK-NEXT:    ret
2920  %val = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %x, metadata !"fpexcept.strict") #0
2921  ret fp128 %val
2922}
2923
2924define <1 x double> @sin_v1f64(<1 x double> %x, <1 x double> %y) #0 {
2925; CHECK-LABEL: sin_v1f64:
2926; CHECK:       // %bb.0:
2927; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2928; CHECK-NEXT:    .cfi_def_cfa_offset 16
2929; CHECK-NEXT:    .cfi_offset w30, -16
2930; CHECK-NEXT:    bl sin
2931; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2932; CHECK-NEXT:    ret
2933  %val = call <1 x double> @llvm.experimental.constrained.sin.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2934  ret <1 x double> %val
2935}
2936
2937define <1 x double> @cos_v1f64(<1 x double> %x, <1 x double> %y) #0 {
2938; CHECK-LABEL: cos_v1f64:
2939; CHECK:       // %bb.0:
2940; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2941; CHECK-NEXT:    .cfi_def_cfa_offset 16
2942; CHECK-NEXT:    .cfi_offset w30, -16
2943; CHECK-NEXT:    bl cos
2944; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2945; CHECK-NEXT:    ret
2946  %val = call <1 x double> @llvm.experimental.constrained.cos.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2947  ret <1 x double> %val
2948}
2949
2950define <1 x double> @tan_v1f64(<1 x double> %x, <1 x double> %y) #0 {
2951; CHECK-LABEL: tan_v1f64:
2952; CHECK:       // %bb.0:
2953; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2954; CHECK-NEXT:    .cfi_def_cfa_offset 16
2955; CHECK-NEXT:    .cfi_offset w30, -16
2956; CHECK-NEXT:    bl tan
2957; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2958; CHECK-NEXT:    ret
2959  %val = call <1 x double> @llvm.experimental.constrained.tan.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2960  ret <1 x double> %val
2961}
2962
2963define <1 x double> @asin_v1f64(<1 x double> %x, <1 x double> %y) #0 {
2964; CHECK-LABEL: asin_v1f64:
2965; CHECK:       // %bb.0:
2966; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2967; CHECK-NEXT:    .cfi_def_cfa_offset 16
2968; CHECK-NEXT:    .cfi_offset w30, -16
2969; CHECK-NEXT:    bl asin
2970; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2971; CHECK-NEXT:    ret
2972  %val = call <1 x double> @llvm.experimental.constrained.asin.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2973  ret <1 x double> %val
2974}
2975
2976define <1 x double> @acos_v1f64(<1 x double> %x, <1 x double> %y) #0 {
2977; CHECK-LABEL: acos_v1f64:
2978; CHECK:       // %bb.0:
2979; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2980; CHECK-NEXT:    .cfi_def_cfa_offset 16
2981; CHECK-NEXT:    .cfi_offset w30, -16
2982; CHECK-NEXT:    bl acos
2983; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2984; CHECK-NEXT:    ret
2985  %val = call <1 x double> @llvm.experimental.constrained.acos.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2986  ret <1 x double> %val
2987}
2988
2989define <1 x double> @atan_v1f64(<1 x double> %x, <1 x double> %y) #0 {
2990; CHECK-LABEL: atan_v1f64:
2991; CHECK:       // %bb.0:
2992; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
2993; CHECK-NEXT:    .cfi_def_cfa_offset 16
2994; CHECK-NEXT:    .cfi_offset w30, -16
2995; CHECK-NEXT:    bl atan
2996; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
2997; CHECK-NEXT:    ret
2998  %val = call <1 x double> @llvm.experimental.constrained.atan.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
2999  ret <1 x double> %val
3000}
3001
3002define <1 x double> @atan2_v1f64(<1 x double> %x, <1 x double> %y) #0 {
3003; CHECK-LABEL: atan2_v1f64:
3004; CHECK:       // %bb.0:
3005; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
3006; CHECK-NEXT:    .cfi_def_cfa_offset 16
3007; CHECK-NEXT:    .cfi_offset w30, -16
3008; CHECK-NEXT:    bl atan2
3009; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
3010; CHECK-NEXT:    ret
3011  %val = call <1 x double> @llvm.experimental.constrained.atan2.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
3012  ret <1 x double> %val
3013}
3014
3015define <1 x double> @sinh_v1f64(<1 x double> %x, <1 x double> %y) #0 {
3016; CHECK-LABEL: sinh_v1f64:
3017; CHECK:       // %bb.0:
3018; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
3019; CHECK-NEXT:    .cfi_def_cfa_offset 16
3020; CHECK-NEXT:    .cfi_offset w30, -16
3021; CHECK-NEXT:    bl sinh
3022; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
3023; CHECK-NEXT:    ret
3024  %val = call <1 x double> @llvm.experimental.constrained.sinh.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
3025  ret <1 x double> %val
3026}
3027
3028define <1 x double> @cosh_v1f64(<1 x double> %x, <1 x double> %y) #0 {
3029; CHECK-LABEL: cosh_v1f64:
3030; CHECK:       // %bb.0:
3031; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
3032; CHECK-NEXT:    .cfi_def_cfa_offset 16
3033; CHECK-NEXT:    .cfi_offset w30, -16
3034; CHECK-NEXT:    bl cosh
3035; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
3036; CHECK-NEXT:    ret
3037  %val = call <1 x double> @llvm.experimental.constrained.cosh.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
3038  ret <1 x double> %val
3039}
3040
3041define <1 x double> @tanh_v1f64(<1 x double> %x, <1 x double> %y) #0 {
3042; CHECK-LABEL: tanh_v1f64:
3043; CHECK:       // %bb.0:
3044; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
3045; CHECK-NEXT:    .cfi_def_cfa_offset 16
3046; CHECK-NEXT:    .cfi_offset w30, -16
3047; CHECK-NEXT:    bl tanh
3048; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
3049; CHECK-NEXT:    ret
3050  %val = call <1 x double> @llvm.experimental.constrained.tanh.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
3051  ret <1 x double> %val
3052}
3053
3054define <1 x double> @pow_v1f64(<1 x double> %x, <1 x double> %y) #0 {
3055; CHECK-LABEL: pow_v1f64:
3056; CHECK:       // %bb.0:
3057; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
3058; CHECK-NEXT:    .cfi_def_cfa_offset 16
3059; CHECK-NEXT:    .cfi_offset w30, -16
3060; CHECK-NEXT:    bl pow
3061; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
3062; CHECK-NEXT:    ret
3063  %val = call <1 x double> @llvm.experimental.constrained.pow.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
3064  ret <1 x double> %val
3065}
3066
3067define <1 x double> @log_v1f64(<1 x double> %x, <1 x double> %y) #0 {
3068; CHECK-LABEL: log_v1f64:
3069; CHECK:       // %bb.0:
3070; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
3071; CHECK-NEXT:    .cfi_def_cfa_offset 16
3072; CHECK-NEXT:    .cfi_offset w30, -16
3073; CHECK-NEXT:    bl log
3074; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
3075; CHECK-NEXT:    ret
3076  %val = call <1 x double> @llvm.experimental.constrained.log.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
3077  ret <1 x double> %val
3078}
3079
3080define <1 x double> @log2_v1f64(<1 x double> %x, <1 x double> %y) #0 {
3081; CHECK-LABEL: log2_v1f64:
3082; CHECK:       // %bb.0:
3083; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
3084; CHECK-NEXT:    .cfi_def_cfa_offset 16
3085; CHECK-NEXT:    .cfi_offset w30, -16
3086; CHECK-NEXT:    bl log2
3087; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
3088; CHECK-NEXT:    ret
3089  %val = call <1 x double> @llvm.experimental.constrained.log2.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
3090  ret <1 x double> %val
3091}
3092
3093define <1 x double> @log10_v1f64(<1 x double> %x, <1 x double> %y) #0 {
3094; CHECK-LABEL: log10_v1f64:
3095; CHECK:       // %bb.0:
3096; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
3097; CHECK-NEXT:    .cfi_def_cfa_offset 16
3098; CHECK-NEXT:    .cfi_offset w30, -16
3099; CHECK-NEXT:    bl log10
3100; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
3101; CHECK-NEXT:    ret
3102  %val = call <1 x double> @llvm.experimental.constrained.log10.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
3103  ret <1 x double> %val
3104}
3105
3106define <1 x double> @exp_v1f64(<1 x double> %x, <1 x double> %y) #0 {
3107; CHECK-LABEL: exp_v1f64:
3108; CHECK:       // %bb.0:
3109; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
3110; CHECK-NEXT:    .cfi_def_cfa_offset 16
3111; CHECK-NEXT:    .cfi_offset w30, -16
3112; CHECK-NEXT:    bl exp
3113; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
3114; CHECK-NEXT:    ret
3115  %val = call <1 x double> @llvm.experimental.constrained.exp.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
3116  ret <1 x double> %val
3117}
3118
3119define <1 x double> @exp2_v1f64(<1 x double> %x, <1 x double> %y) #0 {
3120; CHECK-LABEL: exp2_v1f64:
3121; CHECK:       // %bb.0:
3122; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
3123; CHECK-NEXT:    .cfi_def_cfa_offset 16
3124; CHECK-NEXT:    .cfi_offset w30, -16
3125; CHECK-NEXT:    bl exp2
3126; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
3127; CHECK-NEXT:    ret
3128  %val = call <1 x double> @llvm.experimental.constrained.exp2.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
3129  ret <1 x double> %val
3130}
3131
3132attributes #0 = { strictfp }
3133
3134declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
3135declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata)
3136declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata)
3137declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata)
3138declare float @llvm.experimental.constrained.frem.f32(float, float, metadata, metadata)
3139declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata)
3140declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
3141declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
3142declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
3143declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
3144declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
3145declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
3146declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
3147declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
3148declare float @llvm.experimental.constrained.sitofp.f32.i128(i128, metadata, metadata)
3149declare float @llvm.experimental.constrained.uitofp.f32.i128(i128, metadata, metadata)
3150declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
3151declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata)
3152declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata)
3153declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata)
3154declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata)
3155declare float @llvm.experimental.constrained.asin.f32(float, metadata, metadata)
3156declare float @llvm.experimental.constrained.acos.f32(float, metadata, metadata)
3157declare float @llvm.experimental.constrained.atan.f32(float, metadata, metadata)
3158declare float @llvm.experimental.constrained.atan2.f32(float, float, metadata, metadata)
3159declare float @llvm.experimental.constrained.sinh.f32(float, metadata, metadata)
3160declare float @llvm.experimental.constrained.cosh.f32(float, metadata, metadata)
3161declare float @llvm.experimental.constrained.tanh.f32(float, metadata, metadata)
3162declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata)
3163declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata)
3164declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata)
3165declare float @llvm.experimental.constrained.log2.f32(float, metadata, metadata)
3166declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata)
3167declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata)
3168declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
3169declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
3170declare i32 @llvm.experimental.constrained.lrint.i32.f32(float, metadata, metadata)
3171declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata)
3172declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata)
3173declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata)
3174declare float @llvm.experimental.constrained.maximum.f32(float, float, metadata)
3175declare float @llvm.experimental.constrained.minimum.f32(float, float, metadata)
3176declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
3177declare float @llvm.experimental.constrained.floor.f32(float, metadata)
3178declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata)
3179declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
3180declare float @llvm.experimental.constrained.round.f32(float, metadata)
3181declare float @llvm.experimental.constrained.roundeven.f32(float, metadata)
3182declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
3183declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata)
3184declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
3185
3186declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
3187declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
3188declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
3189declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
3190declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
3191declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)
3192declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
3193declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
3194declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
3195declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
3196declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
3197declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
3198declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
3199declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
3200declare double @llvm.experimental.constrained.sitofp.f64.i128(i128, metadata, metadata)
3201declare double @llvm.experimental.constrained.uitofp.f64.i128(i128, metadata, metadata)
3202declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
3203declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
3204declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
3205declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
3206declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata)
3207declare double @llvm.experimental.constrained.asin.f64(double, metadata, metadata)
3208declare double @llvm.experimental.constrained.acos.f64(double, metadata, metadata)
3209declare double @llvm.experimental.constrained.atan.f64(double, metadata, metadata)
3210declare double @llvm.experimental.constrained.atan2.f64(double, double, metadata, metadata)
3211declare double @llvm.experimental.constrained.sinh.f64(double, metadata, metadata)
3212declare double @llvm.experimental.constrained.cosh.f64(double, metadata, metadata)
3213declare double @llvm.experimental.constrained.tanh.f64(double, metadata, metadata)
3214declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
3215declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
3216declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
3217declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
3218declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata)
3219declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
3220declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
3221declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
3222declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata)
3223declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
3224declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata)
3225declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata)
3226declare double @llvm.experimental.constrained.maximum.f64(double, double, metadata)
3227declare double @llvm.experimental.constrained.minimum.f64(double, double, metadata)
3228declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
3229declare double @llvm.experimental.constrained.floor.f64(double, metadata)
3230declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
3231declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
3232declare double @llvm.experimental.constrained.round.f64(double, metadata)
3233declare double @llvm.experimental.constrained.roundeven.f64(double, metadata)
3234declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
3235declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
3236declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
3237
3238declare fp128 @llvm.experimental.constrained.fadd.f128(fp128, fp128, metadata, metadata)
3239declare fp128 @llvm.experimental.constrained.fsub.f128(fp128, fp128, metadata, metadata)
3240declare fp128 @llvm.experimental.constrained.fmul.f128(fp128, fp128, metadata, metadata)
3241declare fp128 @llvm.experimental.constrained.fdiv.f128(fp128, fp128, metadata, metadata)
3242declare fp128 @llvm.experimental.constrained.frem.f128(fp128, fp128, metadata, metadata)
3243declare fp128 @llvm.experimental.constrained.fma.f128(fp128, fp128, fp128, metadata, metadata)
3244declare i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128, metadata)
3245declare i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128, metadata)
3246declare i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128, metadata)
3247declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata)
3248declare fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32, metadata, metadata)
3249declare fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32, metadata, metadata)
3250declare fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64, metadata, metadata)
3251declare fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64, metadata, metadata)
3252declare fp128 @llvm.experimental.constrained.sitofp.f128.i128(i128, metadata, metadata)
3253declare fp128 @llvm.experimental.constrained.uitofp.f128.i128(i128, metadata, metadata)
3254declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata)
3255declare fp128 @llvm.experimental.constrained.powi.f128(fp128, i32, metadata, metadata)
3256declare fp128 @llvm.experimental.constrained.sin.f128(fp128, metadata, metadata)
3257declare fp128 @llvm.experimental.constrained.cos.f128(fp128, metadata, metadata)
3258declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata)
3259declare fp128 @llvm.experimental.constrained.asin.f128(fp128, metadata, metadata)
3260declare fp128 @llvm.experimental.constrained.acos.f128(fp128, metadata, metadata)
3261declare fp128 @llvm.experimental.constrained.atan.f128(fp128, metadata, metadata)
3262declare fp128 @llvm.experimental.constrained.atan2.f128(fp128, fp128, metadata, metadata)
3263declare fp128 @llvm.experimental.constrained.sinh.f128(fp128, metadata, metadata)
3264declare fp128 @llvm.experimental.constrained.cosh.f128(fp128, metadata, metadata)
3265declare fp128 @llvm.experimental.constrained.tanh.f128(fp128, metadata, metadata)
3266declare fp128 @llvm.experimental.constrained.pow.f128(fp128, fp128, metadata, metadata)
3267declare fp128 @llvm.experimental.constrained.log.f128(fp128, metadata, metadata)
3268declare fp128 @llvm.experimental.constrained.log10.f128(fp128, metadata, metadata)
3269declare fp128 @llvm.experimental.constrained.log2.f128(fp128, metadata, metadata)
3270declare fp128 @llvm.experimental.constrained.exp.f128(fp128, metadata, metadata)
3271declare fp128 @llvm.experimental.constrained.exp2.f128(fp128, metadata, metadata)
3272declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
3273declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
3274declare i32 @llvm.experimental.constrained.lrint.i32.f128(fp128, metadata, metadata)
3275declare i64 @llvm.experimental.constrained.llrint.i64.f128(fp128, metadata, metadata)
3276declare fp128 @llvm.experimental.constrained.maxnum.f128(fp128, fp128, metadata)
3277declare fp128 @llvm.experimental.constrained.minnum.f128(fp128, fp128, metadata)
3278declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata)
3279declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata)
3280declare i32 @llvm.experimental.constrained.lround.i32.f128(fp128, metadata)
3281declare i64 @llvm.experimental.constrained.llround.i64.f128(fp128, metadata)
3282declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata)
3283declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
3284declare i1 @llvm.experimental.constrained.fcmps.f128(fp128, fp128, metadata, metadata)
3285declare i1 @llvm.experimental.constrained.fcmp.f128(fp128, fp128, metadata, metadata)
3286
3287declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
3288declare float @llvm.experimental.constrained.fptrunc.f32.f128(fp128, metadata, metadata)
3289declare double @llvm.experimental.constrained.fptrunc.f64.f128(fp128, metadata, metadata)
3290declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
3291declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata)
3292declare fp128 @llvm.experimental.constrained.fpext.f128.f64(double, metadata)
3293
3294;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
3295; CHECK-GI: {{.*}}
3296; CHECK-SD: {{.*}}
3297