xref: /llvm-project/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll (revision 1bc9de247477b58a14547a31047d1c9a365e2d5d)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d \
3; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=ilp32d \
4; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
5; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \
6; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=lp64d \
7; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
8; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zdinx \
9; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=ilp32 \
10; RUN:   | FileCheck -check-prefix=RV32IZFINXZDINX %s
11; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zdinx \
12; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=lp64 \
13; RUN:   | FileCheck -check-prefix=RV64IZFINXZDINX %s
14; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \
15; RUN:   -verify-machineinstrs -disable-strictnode-mutation \
16; RUN:   | FileCheck -check-prefix=RV32I %s
17; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \
18; RUN:   -verify-machineinstrs -disable-strictnode-mutation \
19; RUN:   | FileCheck -check-prefix=RV64I %s
20
21declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
22
23define double @sqrt_f64(double %a) nounwind strictfp {
24; CHECKIFD-LABEL: sqrt_f64:
25; CHECKIFD:       # %bb.0:
26; CHECKIFD-NEXT:    fsqrt.d fa0, fa0
27; CHECKIFD-NEXT:    ret
28;
29; RV32IZFINXZDINX-LABEL: sqrt_f64:
30; RV32IZFINXZDINX:       # %bb.0:
31; RV32IZFINXZDINX-NEXT:    fsqrt.d a0, a0
32; RV32IZFINXZDINX-NEXT:    ret
33;
34; RV64IZFINXZDINX-LABEL: sqrt_f64:
35; RV64IZFINXZDINX:       # %bb.0:
36; RV64IZFINXZDINX-NEXT:    fsqrt.d a0, a0
37; RV64IZFINXZDINX-NEXT:    ret
38;
39; RV32I-LABEL: sqrt_f64:
40; RV32I:       # %bb.0:
41; RV32I-NEXT:    addi sp, sp, -16
42; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
43; RV32I-NEXT:    call sqrt
44; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
45; RV32I-NEXT:    addi sp, sp, 16
46; RV32I-NEXT:    ret
47;
48; RV64I-LABEL: sqrt_f64:
49; RV64I:       # %bb.0:
50; RV64I-NEXT:    addi sp, sp, -16
51; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
52; RV64I-NEXT:    call sqrt
53; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
54; RV64I-NEXT:    addi sp, sp, 16
55; RV64I-NEXT:    ret
56  %1 = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
57  ret double %1
58}
59
60declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
61
62define double @powi_f64(double %a, i32 %b) nounwind strictfp {
63; RV32IFD-LABEL: powi_f64:
64; RV32IFD:       # %bb.0:
65; RV32IFD-NEXT:    addi sp, sp, -16
66; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
67; RV32IFD-NEXT:    call __powidf2
68; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
69; RV32IFD-NEXT:    addi sp, sp, 16
70; RV32IFD-NEXT:    ret
71;
72; RV64IFD-LABEL: powi_f64:
73; RV64IFD:       # %bb.0:
74; RV64IFD-NEXT:    addi sp, sp, -16
75; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
76; RV64IFD-NEXT:    sext.w a0, a0
77; RV64IFD-NEXT:    call __powidf2
78; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
79; RV64IFD-NEXT:    addi sp, sp, 16
80; RV64IFD-NEXT:    ret
81;
82; RV32IZFINXZDINX-LABEL: powi_f64:
83; RV32IZFINXZDINX:       # %bb.0:
84; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
85; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
86; RV32IZFINXZDINX-NEXT:    call __powidf2
87; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
88; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
89; RV32IZFINXZDINX-NEXT:    ret
90;
91; RV64IZFINXZDINX-LABEL: powi_f64:
92; RV64IZFINXZDINX:       # %bb.0:
93; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
94; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
95; RV64IZFINXZDINX-NEXT:    sext.w a1, a1
96; RV64IZFINXZDINX-NEXT:    call __powidf2
97; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
98; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
99; RV64IZFINXZDINX-NEXT:    ret
100;
101; RV32I-LABEL: powi_f64:
102; RV32I:       # %bb.0:
103; RV32I-NEXT:    addi sp, sp, -16
104; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
105; RV32I-NEXT:    call __powidf2
106; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
107; RV32I-NEXT:    addi sp, sp, 16
108; RV32I-NEXT:    ret
109;
110; RV64I-LABEL: powi_f64:
111; RV64I:       # %bb.0:
112; RV64I-NEXT:    addi sp, sp, -16
113; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
114; RV64I-NEXT:    sext.w a1, a1
115; RV64I-NEXT:    call __powidf2
116; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
117; RV64I-NEXT:    addi sp, sp, 16
118; RV64I-NEXT:    ret
119  %1 = call double @llvm.experimental.constrained.powi.f64(double %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
120  ret double %1
121}
122
123declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
124
125define double @sin_f64(double %a) nounwind strictfp {
126; RV32IFD-LABEL: sin_f64:
127; RV32IFD:       # %bb.0:
128; RV32IFD-NEXT:    addi sp, sp, -16
129; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
130; RV32IFD-NEXT:    call sin
131; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
132; RV32IFD-NEXT:    addi sp, sp, 16
133; RV32IFD-NEXT:    ret
134;
135; RV64IFD-LABEL: sin_f64:
136; RV64IFD:       # %bb.0:
137; RV64IFD-NEXT:    addi sp, sp, -16
138; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
139; RV64IFD-NEXT:    call sin
140; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
141; RV64IFD-NEXT:    addi sp, sp, 16
142; RV64IFD-NEXT:    ret
143;
144; RV32IZFINXZDINX-LABEL: sin_f64:
145; RV32IZFINXZDINX:       # %bb.0:
146; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
147; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
148; RV32IZFINXZDINX-NEXT:    call sin
149; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
150; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
151; RV32IZFINXZDINX-NEXT:    ret
152;
153; RV64IZFINXZDINX-LABEL: sin_f64:
154; RV64IZFINXZDINX:       # %bb.0:
155; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
156; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
157; RV64IZFINXZDINX-NEXT:    call sin
158; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
159; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
160; RV64IZFINXZDINX-NEXT:    ret
161;
162; RV32I-LABEL: sin_f64:
163; RV32I:       # %bb.0:
164; RV32I-NEXT:    addi sp, sp, -16
165; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
166; RV32I-NEXT:    call sin
167; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
168; RV32I-NEXT:    addi sp, sp, 16
169; RV32I-NEXT:    ret
170;
171; RV64I-LABEL: sin_f64:
172; RV64I:       # %bb.0:
173; RV64I-NEXT:    addi sp, sp, -16
174; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
175; RV64I-NEXT:    call sin
176; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
177; RV64I-NEXT:    addi sp, sp, 16
178; RV64I-NEXT:    ret
179  %1 = call double @llvm.experimental.constrained.sin.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
180  ret double %1
181}
182
183declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
184
185define double @cos_f64(double %a) nounwind strictfp {
186; RV32IFD-LABEL: cos_f64:
187; RV32IFD:       # %bb.0:
188; RV32IFD-NEXT:    addi sp, sp, -16
189; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
190; RV32IFD-NEXT:    call cos
191; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
192; RV32IFD-NEXT:    addi sp, sp, 16
193; RV32IFD-NEXT:    ret
194;
195; RV64IFD-LABEL: cos_f64:
196; RV64IFD:       # %bb.0:
197; RV64IFD-NEXT:    addi sp, sp, -16
198; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
199; RV64IFD-NEXT:    call cos
200; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
201; RV64IFD-NEXT:    addi sp, sp, 16
202; RV64IFD-NEXT:    ret
203;
204; RV32IZFINXZDINX-LABEL: cos_f64:
205; RV32IZFINXZDINX:       # %bb.0:
206; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
207; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
208; RV32IZFINXZDINX-NEXT:    call cos
209; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
210; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
211; RV32IZFINXZDINX-NEXT:    ret
212;
213; RV64IZFINXZDINX-LABEL: cos_f64:
214; RV64IZFINXZDINX:       # %bb.0:
215; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
216; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
217; RV64IZFINXZDINX-NEXT:    call cos
218; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
219; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
220; RV64IZFINXZDINX-NEXT:    ret
221;
222; RV32I-LABEL: cos_f64:
223; RV32I:       # %bb.0:
224; RV32I-NEXT:    addi sp, sp, -16
225; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
226; RV32I-NEXT:    call cos
227; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
228; RV32I-NEXT:    addi sp, sp, 16
229; RV32I-NEXT:    ret
230;
231; RV64I-LABEL: cos_f64:
232; RV64I:       # %bb.0:
233; RV64I-NEXT:    addi sp, sp, -16
234; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
235; RV64I-NEXT:    call cos
236; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
237; RV64I-NEXT:    addi sp, sp, 16
238; RV64I-NEXT:    ret
239  %1 = call double @llvm.experimental.constrained.cos.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
240  ret double %1
241}
242
243; The sin+cos combination results in an FSINCOS SelectionDAG node.
244define double @sincos_f64(double %a) nounwind strictfp {
245; RV32IFD-LABEL: sincos_f64:
246; RV32IFD:       # %bb.0:
247; RV32IFD-NEXT:    addi sp, sp, -32
248; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
249; RV32IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
250; RV32IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
251; RV32IFD-NEXT:    fmv.d fs0, fa0
252; RV32IFD-NEXT:    call sin
253; RV32IFD-NEXT:    fmv.d fs1, fa0
254; RV32IFD-NEXT:    fmv.d fa0, fs0
255; RV32IFD-NEXT:    call cos
256; RV32IFD-NEXT:    fadd.d fa0, fs1, fa0
257; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
258; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
259; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
260; RV32IFD-NEXT:    addi sp, sp, 32
261; RV32IFD-NEXT:    ret
262;
263; RV64IFD-LABEL: sincos_f64:
264; RV64IFD:       # %bb.0:
265; RV64IFD-NEXT:    addi sp, sp, -32
266; RV64IFD-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
267; RV64IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
268; RV64IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
269; RV64IFD-NEXT:    fmv.d fs0, fa0
270; RV64IFD-NEXT:    call sin
271; RV64IFD-NEXT:    fmv.d fs1, fa0
272; RV64IFD-NEXT:    fmv.d fa0, fs0
273; RV64IFD-NEXT:    call cos
274; RV64IFD-NEXT:    fadd.d fa0, fs1, fa0
275; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
276; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
277; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
278; RV64IFD-NEXT:    addi sp, sp, 32
279; RV64IFD-NEXT:    ret
280;
281; RV32IZFINXZDINX-LABEL: sincos_f64:
282; RV32IZFINXZDINX:       # %bb.0:
283; RV32IZFINXZDINX-NEXT:    addi sp, sp, -32
284; RV32IZFINXZDINX-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
285; RV32IZFINXZDINX-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
286; RV32IZFINXZDINX-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
287; RV32IZFINXZDINX-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
288; RV32IZFINXZDINX-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
289; RV32IZFINXZDINX-NEXT:    mv s0, a1
290; RV32IZFINXZDINX-NEXT:    mv s1, a0
291; RV32IZFINXZDINX-NEXT:    call sin
292; RV32IZFINXZDINX-NEXT:    mv s2, a0
293; RV32IZFINXZDINX-NEXT:    mv s3, a1
294; RV32IZFINXZDINX-NEXT:    mv a0, s1
295; RV32IZFINXZDINX-NEXT:    mv a1, s0
296; RV32IZFINXZDINX-NEXT:    call cos
297; RV32IZFINXZDINX-NEXT:    fadd.d a0, s2, a0
298; RV32IZFINXZDINX-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
299; RV32IZFINXZDINX-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
300; RV32IZFINXZDINX-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
301; RV32IZFINXZDINX-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
302; RV32IZFINXZDINX-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
303; RV32IZFINXZDINX-NEXT:    addi sp, sp, 32
304; RV32IZFINXZDINX-NEXT:    ret
305;
306; RV64IZFINXZDINX-LABEL: sincos_f64:
307; RV64IZFINXZDINX:       # %bb.0:
308; RV64IZFINXZDINX-NEXT:    addi sp, sp, -32
309; RV64IZFINXZDINX-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
310; RV64IZFINXZDINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
311; RV64IZFINXZDINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
312; RV64IZFINXZDINX-NEXT:    mv s0, a0
313; RV64IZFINXZDINX-NEXT:    call sin
314; RV64IZFINXZDINX-NEXT:    mv s1, a0
315; RV64IZFINXZDINX-NEXT:    mv a0, s0
316; RV64IZFINXZDINX-NEXT:    call cos
317; RV64IZFINXZDINX-NEXT:    fadd.d a0, s1, a0
318; RV64IZFINXZDINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
319; RV64IZFINXZDINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
320; RV64IZFINXZDINX-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
321; RV64IZFINXZDINX-NEXT:    addi sp, sp, 32
322; RV64IZFINXZDINX-NEXT:    ret
323;
324; RV32I-LABEL: sincos_f64:
325; RV32I:       # %bb.0:
326; RV32I-NEXT:    addi sp, sp, -32
327; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
328; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
329; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
330; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
331; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
332; RV32I-NEXT:    mv s0, a1
333; RV32I-NEXT:    mv s1, a0
334; RV32I-NEXT:    call sin
335; RV32I-NEXT:    mv s2, a0
336; RV32I-NEXT:    mv s3, a1
337; RV32I-NEXT:    mv a0, s1
338; RV32I-NEXT:    mv a1, s0
339; RV32I-NEXT:    call cos
340; RV32I-NEXT:    mv a2, a0
341; RV32I-NEXT:    mv a3, a1
342; RV32I-NEXT:    mv a0, s2
343; RV32I-NEXT:    mv a1, s3
344; RV32I-NEXT:    call __adddf3
345; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
346; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
347; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
348; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
349; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
350; RV32I-NEXT:    addi sp, sp, 32
351; RV32I-NEXT:    ret
352;
353; RV64I-LABEL: sincos_f64:
354; RV64I:       # %bb.0:
355; RV64I-NEXT:    addi sp, sp, -32
356; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
357; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
358; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
359; RV64I-NEXT:    mv s0, a0
360; RV64I-NEXT:    call sin
361; RV64I-NEXT:    mv s1, a0
362; RV64I-NEXT:    mv a0, s0
363; RV64I-NEXT:    call cos
364; RV64I-NEXT:    mv a1, a0
365; RV64I-NEXT:    mv a0, s1
366; RV64I-NEXT:    call __adddf3
367; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
368; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
369; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
370; RV64I-NEXT:    addi sp, sp, 32
371; RV64I-NEXT:    ret
372  %1 = call double @llvm.experimental.constrained.sin.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
373  %2 = call double @llvm.experimental.constrained.cos.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
374  %3 = fadd double %1, %2
375  ret double %3
376}
377
378declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata)
379
380define double @tan_f64(double %a) nounwind strictfp {
381; RV32IFD-LABEL: tan_f64:
382; RV32IFD:       # %bb.0:
383; RV32IFD-NEXT:    addi sp, sp, -16
384; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
385; RV32IFD-NEXT:    call tan
386; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
387; RV32IFD-NEXT:    addi sp, sp, 16
388; RV32IFD-NEXT:    ret
389;
390; RV64IFD-LABEL: tan_f64:
391; RV64IFD:       # %bb.0:
392; RV64IFD-NEXT:    addi sp, sp, -16
393; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
394; RV64IFD-NEXT:    call tan
395; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
396; RV64IFD-NEXT:    addi sp, sp, 16
397; RV64IFD-NEXT:    ret
398;
399; RV32IZFINXZDINX-LABEL: tan_f64:
400; RV32IZFINXZDINX:       # %bb.0:
401; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
402; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
403; RV32IZFINXZDINX-NEXT:    call tan
404; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
405; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
406; RV32IZFINXZDINX-NEXT:    ret
407;
408; RV64IZFINXZDINX-LABEL: tan_f64:
409; RV64IZFINXZDINX:       # %bb.0:
410; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
411; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
412; RV64IZFINXZDINX-NEXT:    call tan
413; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
414; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
415; RV64IZFINXZDINX-NEXT:    ret
416;
417; RV32I-LABEL: tan_f64:
418; RV32I:       # %bb.0:
419; RV32I-NEXT:    addi sp, sp, -16
420; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
421; RV32I-NEXT:    call tan
422; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
423; RV32I-NEXT:    addi sp, sp, 16
424; RV32I-NEXT:    ret
425;
426; RV64I-LABEL: tan_f64:
427; RV64I:       # %bb.0:
428; RV64I-NEXT:    addi sp, sp, -16
429; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
430; RV64I-NEXT:    call tan
431; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
432; RV64I-NEXT:    addi sp, sp, 16
433; RV64I-NEXT:    ret
434  %1 = call double @llvm.experimental.constrained.tan.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
435  ret double %1
436}
437
438define double @asin_f64(double %a) nounwind strictfp {
439; RV32IFD-LABEL: asin_f64:
440; RV32IFD:       # %bb.0:
441; RV32IFD-NEXT:    addi sp, sp, -16
442; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
443; RV32IFD-NEXT:    call asin
444; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
445; RV32IFD-NEXT:    addi sp, sp, 16
446; RV32IFD-NEXT:    ret
447;
448; RV64IFD-LABEL: asin_f64:
449; RV64IFD:       # %bb.0:
450; RV64IFD-NEXT:    addi sp, sp, -16
451; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
452; RV64IFD-NEXT:    call asin
453; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
454; RV64IFD-NEXT:    addi sp, sp, 16
455; RV64IFD-NEXT:    ret
456;
457; RV32IZFINXZDINX-LABEL: asin_f64:
458; RV32IZFINXZDINX:       # %bb.0:
459; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
460; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
461; RV32IZFINXZDINX-NEXT:    call asin
462; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
463; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
464; RV32IZFINXZDINX-NEXT:    ret
465;
466; RV64IZFINXZDINX-LABEL: asin_f64:
467; RV64IZFINXZDINX:       # %bb.0:
468; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
469; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
470; RV64IZFINXZDINX-NEXT:    call asin
471; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
472; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
473; RV64IZFINXZDINX-NEXT:    ret
474;
475; RV32I-LABEL: asin_f64:
476; RV32I:       # %bb.0:
477; RV32I-NEXT:    addi sp, sp, -16
478; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
479; RV32I-NEXT:    call asin
480; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
481; RV32I-NEXT:    addi sp, sp, 16
482; RV32I-NEXT:    ret
483;
484; RV64I-LABEL: asin_f64:
485; RV64I:       # %bb.0:
486; RV64I-NEXT:    addi sp, sp, -16
487; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
488; RV64I-NEXT:    call asin
489; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
490; RV64I-NEXT:    addi sp, sp, 16
491; RV64I-NEXT:    ret
492  %1 = call double @llvm.experimental.constrained.asin.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
493  ret double %1
494}
495
496define double @acos_f64(double %a) nounwind strictfp {
497; RV32IFD-LABEL: acos_f64:
498; RV32IFD:       # %bb.0:
499; RV32IFD-NEXT:    addi sp, sp, -16
500; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
501; RV32IFD-NEXT:    call acos
502; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
503; RV32IFD-NEXT:    addi sp, sp, 16
504; RV32IFD-NEXT:    ret
505;
506; RV64IFD-LABEL: acos_f64:
507; RV64IFD:       # %bb.0:
508; RV64IFD-NEXT:    addi sp, sp, -16
509; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
510; RV64IFD-NEXT:    call acos
511; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
512; RV64IFD-NEXT:    addi sp, sp, 16
513; RV64IFD-NEXT:    ret
514;
515; RV32IZFINXZDINX-LABEL: acos_f64:
516; RV32IZFINXZDINX:       # %bb.0:
517; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
518; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
519; RV32IZFINXZDINX-NEXT:    call acos
520; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
521; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
522; RV32IZFINXZDINX-NEXT:    ret
523;
524; RV64IZFINXZDINX-LABEL: acos_f64:
525; RV64IZFINXZDINX:       # %bb.0:
526; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
527; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
528; RV64IZFINXZDINX-NEXT:    call acos
529; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
530; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
531; RV64IZFINXZDINX-NEXT:    ret
532;
533; RV32I-LABEL: acos_f64:
534; RV32I:       # %bb.0:
535; RV32I-NEXT:    addi sp, sp, -16
536; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
537; RV32I-NEXT:    call acos
538; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
539; RV32I-NEXT:    addi sp, sp, 16
540; RV32I-NEXT:    ret
541;
542; RV64I-LABEL: acos_f64:
543; RV64I:       # %bb.0:
544; RV64I-NEXT:    addi sp, sp, -16
545; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
546; RV64I-NEXT:    call acos
547; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
548; RV64I-NEXT:    addi sp, sp, 16
549; RV64I-NEXT:    ret
550  %1 = call double @llvm.experimental.constrained.acos.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
551  ret double %1
552}
553
554define double @atan_f64(double %a) nounwind strictfp {
555; RV32IFD-LABEL: atan_f64:
556; RV32IFD:       # %bb.0:
557; RV32IFD-NEXT:    addi sp, sp, -16
558; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
559; RV32IFD-NEXT:    call atan
560; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
561; RV32IFD-NEXT:    addi sp, sp, 16
562; RV32IFD-NEXT:    ret
563;
564; RV64IFD-LABEL: atan_f64:
565; RV64IFD:       # %bb.0:
566; RV64IFD-NEXT:    addi sp, sp, -16
567; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
568; RV64IFD-NEXT:    call atan
569; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
570; RV64IFD-NEXT:    addi sp, sp, 16
571; RV64IFD-NEXT:    ret
572;
573; RV32IZFINXZDINX-LABEL: atan_f64:
574; RV32IZFINXZDINX:       # %bb.0:
575; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
576; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
577; RV32IZFINXZDINX-NEXT:    call atan
578; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
579; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
580; RV32IZFINXZDINX-NEXT:    ret
581;
582; RV64IZFINXZDINX-LABEL: atan_f64:
583; RV64IZFINXZDINX:       # %bb.0:
584; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
585; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
586; RV64IZFINXZDINX-NEXT:    call atan
587; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
588; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
589; RV64IZFINXZDINX-NEXT:    ret
590;
591; RV32I-LABEL: atan_f64:
592; RV32I:       # %bb.0:
593; RV32I-NEXT:    addi sp, sp, -16
594; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
595; RV32I-NEXT:    call atan
596; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
597; RV32I-NEXT:    addi sp, sp, 16
598; RV32I-NEXT:    ret
599;
600; RV64I-LABEL: atan_f64:
601; RV64I:       # %bb.0:
602; RV64I-NEXT:    addi sp, sp, -16
603; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
604; RV64I-NEXT:    call atan
605; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
606; RV64I-NEXT:    addi sp, sp, 16
607; RV64I-NEXT:    ret
608  %1 = call double @llvm.experimental.constrained.atan.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
609  ret double %1
610}
611
612declare double @llvm.experimental.constrained.atan2.f64(double, double, metadata, metadata)
613
614define double @atan2_f64(double %a, double %b) nounwind strictfp {
615; RV32IFD-LABEL: atan2_f64:
616; RV32IFD:       # %bb.0:
617; RV32IFD-NEXT:    addi sp, sp, -16
618; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
619; RV32IFD-NEXT:    call atan2
620; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
621; RV32IFD-NEXT:    addi sp, sp, 16
622; RV32IFD-NEXT:    ret
623;
624; RV64IFD-LABEL: atan2_f64:
625; RV64IFD:       # %bb.0:
626; RV64IFD-NEXT:    addi sp, sp, -16
627; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
628; RV64IFD-NEXT:    call atan2
629; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
630; RV64IFD-NEXT:    addi sp, sp, 16
631; RV64IFD-NEXT:    ret
632;
633; RV32IZFINXZDINX-LABEL: atan2_f64:
634; RV32IZFINXZDINX:       # %bb.0:
635; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
636; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
637; RV32IZFINXZDINX-NEXT:    call atan2
638; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
639; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
640; RV32IZFINXZDINX-NEXT:    ret
641;
642; RV64IZFINXZDINX-LABEL: atan2_f64:
643; RV64IZFINXZDINX:       # %bb.0:
644; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
645; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
646; RV64IZFINXZDINX-NEXT:    call atan2
647; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
648; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
649; RV64IZFINXZDINX-NEXT:    ret
650;
651; RV32I-LABEL: atan2_f64:
652; RV32I:       # %bb.0:
653; RV32I-NEXT:    addi sp, sp, -16
654; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
655; RV32I-NEXT:    call atan2
656; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
657; RV32I-NEXT:    addi sp, sp, 16
658; RV32I-NEXT:    ret
659;
660; RV64I-LABEL: atan2_f64:
661; RV64I:       # %bb.0:
662; RV64I-NEXT:    addi sp, sp, -16
663; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
664; RV64I-NEXT:    call atan2
665; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
666; RV64I-NEXT:    addi sp, sp, 16
667; RV64I-NEXT:    ret
668  %1 = call double @llvm.experimental.constrained.atan2.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
669  ret double %1
670}
671
672define double @sinh_f64(double %a) nounwind strictfp {
673; RV32IFD-LABEL: sinh_f64:
674; RV32IFD:       # %bb.0:
675; RV32IFD-NEXT:    addi sp, sp, -16
676; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
677; RV32IFD-NEXT:    call sinh
678; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
679; RV32IFD-NEXT:    addi sp, sp, 16
680; RV32IFD-NEXT:    ret
681;
682; RV64IFD-LABEL: sinh_f64:
683; RV64IFD:       # %bb.0:
684; RV64IFD-NEXT:    addi sp, sp, -16
685; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
686; RV64IFD-NEXT:    call sinh
687; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
688; RV64IFD-NEXT:    addi sp, sp, 16
689; RV64IFD-NEXT:    ret
690;
691; RV32IZFINXZDINX-LABEL: sinh_f64:
692; RV32IZFINXZDINX:       # %bb.0:
693; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
694; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
695; RV32IZFINXZDINX-NEXT:    call sinh
696; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
697; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
698; RV32IZFINXZDINX-NEXT:    ret
699;
700; RV64IZFINXZDINX-LABEL: sinh_f64:
701; RV64IZFINXZDINX:       # %bb.0:
702; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
703; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
704; RV64IZFINXZDINX-NEXT:    call sinh
705; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
706; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
707; RV64IZFINXZDINX-NEXT:    ret
708;
709; RV32I-LABEL: sinh_f64:
710; RV32I:       # %bb.0:
711; RV32I-NEXT:    addi sp, sp, -16
712; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
713; RV32I-NEXT:    call sinh
714; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
715; RV32I-NEXT:    addi sp, sp, 16
716; RV32I-NEXT:    ret
717;
718; RV64I-LABEL: sinh_f64:
719; RV64I:       # %bb.0:
720; RV64I-NEXT:    addi sp, sp, -16
721; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
722; RV64I-NEXT:    call sinh
723; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
724; RV64I-NEXT:    addi sp, sp, 16
725; RV64I-NEXT:    ret
726  %1 = call double @llvm.experimental.constrained.sinh.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
727  ret double %1
728}
729
730define double @cosh_f64(double %a) nounwind strictfp {
731; RV32IFD-LABEL: cosh_f64:
732; RV32IFD:       # %bb.0:
733; RV32IFD-NEXT:    addi sp, sp, -16
734; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
735; RV32IFD-NEXT:    call cosh
736; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
737; RV32IFD-NEXT:    addi sp, sp, 16
738; RV32IFD-NEXT:    ret
739;
740; RV64IFD-LABEL: cosh_f64:
741; RV64IFD:       # %bb.0:
742; RV64IFD-NEXT:    addi sp, sp, -16
743; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
744; RV64IFD-NEXT:    call cosh
745; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
746; RV64IFD-NEXT:    addi sp, sp, 16
747; RV64IFD-NEXT:    ret
748;
749; RV32IZFINXZDINX-LABEL: cosh_f64:
750; RV32IZFINXZDINX:       # %bb.0:
751; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
752; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
753; RV32IZFINXZDINX-NEXT:    call cosh
754; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
755; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
756; RV32IZFINXZDINX-NEXT:    ret
757;
758; RV64IZFINXZDINX-LABEL: cosh_f64:
759; RV64IZFINXZDINX:       # %bb.0:
760; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
761; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
762; RV64IZFINXZDINX-NEXT:    call cosh
763; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
764; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
765; RV64IZFINXZDINX-NEXT:    ret
766;
767; RV32I-LABEL: cosh_f64:
768; RV32I:       # %bb.0:
769; RV32I-NEXT:    addi sp, sp, -16
770; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
771; RV32I-NEXT:    call cosh
772; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
773; RV32I-NEXT:    addi sp, sp, 16
774; RV32I-NEXT:    ret
775;
776; RV64I-LABEL: cosh_f64:
777; RV64I:       # %bb.0:
778; RV64I-NEXT:    addi sp, sp, -16
779; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
780; RV64I-NEXT:    call cosh
781; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
782; RV64I-NEXT:    addi sp, sp, 16
783; RV64I-NEXT:    ret
784  %1 = call double @llvm.experimental.constrained.cosh.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
785  ret double %1
786}
787
788define double @tanh_f64(double %a) nounwind strictfp {
789; RV32IFD-LABEL: tanh_f64:
790; RV32IFD:       # %bb.0:
791; RV32IFD-NEXT:    addi sp, sp, -16
792; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
793; RV32IFD-NEXT:    call tanh
794; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
795; RV32IFD-NEXT:    addi sp, sp, 16
796; RV32IFD-NEXT:    ret
797;
798; RV64IFD-LABEL: tanh_f64:
799; RV64IFD:       # %bb.0:
800; RV64IFD-NEXT:    addi sp, sp, -16
801; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
802; RV64IFD-NEXT:    call tanh
803; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
804; RV64IFD-NEXT:    addi sp, sp, 16
805; RV64IFD-NEXT:    ret
806;
807; RV32IZFINXZDINX-LABEL: tanh_f64:
808; RV32IZFINXZDINX:       # %bb.0:
809; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
810; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
811; RV32IZFINXZDINX-NEXT:    call tanh
812; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
813; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
814; RV32IZFINXZDINX-NEXT:    ret
815;
816; RV64IZFINXZDINX-LABEL: tanh_f64:
817; RV64IZFINXZDINX:       # %bb.0:
818; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
819; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
820; RV64IZFINXZDINX-NEXT:    call tanh
821; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
822; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
823; RV64IZFINXZDINX-NEXT:    ret
824;
825; RV32I-LABEL: tanh_f64:
826; RV32I:       # %bb.0:
827; RV32I-NEXT:    addi sp, sp, -16
828; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
829; RV32I-NEXT:    call tanh
830; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
831; RV32I-NEXT:    addi sp, sp, 16
832; RV32I-NEXT:    ret
833;
834; RV64I-LABEL: tanh_f64:
835; RV64I:       # %bb.0:
836; RV64I-NEXT:    addi sp, sp, -16
837; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
838; RV64I-NEXT:    call tanh
839; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
840; RV64I-NEXT:    addi sp, sp, 16
841; RV64I-NEXT:    ret
842  %1 = call double @llvm.experimental.constrained.tanh.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
843  ret double %1
844}
845
846declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
847
848define double @pow_f64(double %a, double %b) nounwind strictfp {
849; RV32IFD-LABEL: pow_f64:
850; RV32IFD:       # %bb.0:
851; RV32IFD-NEXT:    addi sp, sp, -16
852; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
853; RV32IFD-NEXT:    call pow
854; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
855; RV32IFD-NEXT:    addi sp, sp, 16
856; RV32IFD-NEXT:    ret
857;
858; RV64IFD-LABEL: pow_f64:
859; RV64IFD:       # %bb.0:
860; RV64IFD-NEXT:    addi sp, sp, -16
861; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
862; RV64IFD-NEXT:    call pow
863; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
864; RV64IFD-NEXT:    addi sp, sp, 16
865; RV64IFD-NEXT:    ret
866;
867; RV32IZFINXZDINX-LABEL: pow_f64:
868; RV32IZFINXZDINX:       # %bb.0:
869; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
870; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
871; RV32IZFINXZDINX-NEXT:    call pow
872; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
873; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
874; RV32IZFINXZDINX-NEXT:    ret
875;
876; RV64IZFINXZDINX-LABEL: pow_f64:
877; RV64IZFINXZDINX:       # %bb.0:
878; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
879; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
880; RV64IZFINXZDINX-NEXT:    call pow
881; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
882; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
883; RV64IZFINXZDINX-NEXT:    ret
884;
885; RV32I-LABEL: pow_f64:
886; RV32I:       # %bb.0:
887; RV32I-NEXT:    addi sp, sp, -16
888; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
889; RV32I-NEXT:    call pow
890; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
891; RV32I-NEXT:    addi sp, sp, 16
892; RV32I-NEXT:    ret
893;
894; RV64I-LABEL: pow_f64:
895; RV64I:       # %bb.0:
896; RV64I-NEXT:    addi sp, sp, -16
897; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
898; RV64I-NEXT:    call pow
899; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
900; RV64I-NEXT:    addi sp, sp, 16
901; RV64I-NEXT:    ret
902  %1 = call double @llvm.experimental.constrained.pow.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
903  ret double %1
904}
905
906declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata)
907
908define double @exp_f64(double %a) nounwind strictfp {
909; RV32IFD-LABEL: exp_f64:
910; RV32IFD:       # %bb.0:
911; RV32IFD-NEXT:    addi sp, sp, -16
912; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
913; RV32IFD-NEXT:    call exp
914; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
915; RV32IFD-NEXT:    addi sp, sp, 16
916; RV32IFD-NEXT:    ret
917;
918; RV64IFD-LABEL: exp_f64:
919; RV64IFD:       # %bb.0:
920; RV64IFD-NEXT:    addi sp, sp, -16
921; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
922; RV64IFD-NEXT:    call exp
923; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
924; RV64IFD-NEXT:    addi sp, sp, 16
925; RV64IFD-NEXT:    ret
926;
927; RV32IZFINXZDINX-LABEL: exp_f64:
928; RV32IZFINXZDINX:       # %bb.0:
929; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
930; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
931; RV32IZFINXZDINX-NEXT:    call exp
932; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
933; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
934; RV32IZFINXZDINX-NEXT:    ret
935;
936; RV64IZFINXZDINX-LABEL: exp_f64:
937; RV64IZFINXZDINX:       # %bb.0:
938; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
939; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
940; RV64IZFINXZDINX-NEXT:    call exp
941; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
942; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
943; RV64IZFINXZDINX-NEXT:    ret
944;
945; RV32I-LABEL: exp_f64:
946; RV32I:       # %bb.0:
947; RV32I-NEXT:    addi sp, sp, -16
948; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
949; RV32I-NEXT:    call exp
950; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
951; RV32I-NEXT:    addi sp, sp, 16
952; RV32I-NEXT:    ret
953;
954; RV64I-LABEL: exp_f64:
955; RV64I:       # %bb.0:
956; RV64I-NEXT:    addi sp, sp, -16
957; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
958; RV64I-NEXT:    call exp
959; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
960; RV64I-NEXT:    addi sp, sp, 16
961; RV64I-NEXT:    ret
962  %1 = call double @llvm.experimental.constrained.exp.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
963  ret double %1
964}
965
966declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
967
968define double @exp2_f64(double %a) nounwind strictfp {
969; RV32IFD-LABEL: exp2_f64:
970; RV32IFD:       # %bb.0:
971; RV32IFD-NEXT:    addi sp, sp, -16
972; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
973; RV32IFD-NEXT:    call exp2
974; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
975; RV32IFD-NEXT:    addi sp, sp, 16
976; RV32IFD-NEXT:    ret
977;
978; RV64IFD-LABEL: exp2_f64:
979; RV64IFD:       # %bb.0:
980; RV64IFD-NEXT:    addi sp, sp, -16
981; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
982; RV64IFD-NEXT:    call exp2
983; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
984; RV64IFD-NEXT:    addi sp, sp, 16
985; RV64IFD-NEXT:    ret
986;
987; RV32IZFINXZDINX-LABEL: exp2_f64:
988; RV32IZFINXZDINX:       # %bb.0:
989; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
990; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
991; RV32IZFINXZDINX-NEXT:    call exp2
992; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
993; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
994; RV32IZFINXZDINX-NEXT:    ret
995;
996; RV64IZFINXZDINX-LABEL: exp2_f64:
997; RV64IZFINXZDINX:       # %bb.0:
998; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
999; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1000; RV64IZFINXZDINX-NEXT:    call exp2
1001; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1002; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1003; RV64IZFINXZDINX-NEXT:    ret
1004;
1005; RV32I-LABEL: exp2_f64:
1006; RV32I:       # %bb.0:
1007; RV32I-NEXT:    addi sp, sp, -16
1008; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1009; RV32I-NEXT:    call exp2
1010; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1011; RV32I-NEXT:    addi sp, sp, 16
1012; RV32I-NEXT:    ret
1013;
1014; RV64I-LABEL: exp2_f64:
1015; RV64I:       # %bb.0:
1016; RV64I-NEXT:    addi sp, sp, -16
1017; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1018; RV64I-NEXT:    call exp2
1019; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1020; RV64I-NEXT:    addi sp, sp, 16
1021; RV64I-NEXT:    ret
1022  %1 = call double @llvm.experimental.constrained.exp2.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1023  ret double %1
1024}
1025
1026declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
1027
1028define double @log_f64(double %a) nounwind strictfp {
1029; RV32IFD-LABEL: log_f64:
1030; RV32IFD:       # %bb.0:
1031; RV32IFD-NEXT:    addi sp, sp, -16
1032; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1033; RV32IFD-NEXT:    call log
1034; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1035; RV32IFD-NEXT:    addi sp, sp, 16
1036; RV32IFD-NEXT:    ret
1037;
1038; RV64IFD-LABEL: log_f64:
1039; RV64IFD:       # %bb.0:
1040; RV64IFD-NEXT:    addi sp, sp, -16
1041; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1042; RV64IFD-NEXT:    call log
1043; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1044; RV64IFD-NEXT:    addi sp, sp, 16
1045; RV64IFD-NEXT:    ret
1046;
1047; RV32IZFINXZDINX-LABEL: log_f64:
1048; RV32IZFINXZDINX:       # %bb.0:
1049; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1050; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1051; RV32IZFINXZDINX-NEXT:    call log
1052; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1053; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1054; RV32IZFINXZDINX-NEXT:    ret
1055;
1056; RV64IZFINXZDINX-LABEL: log_f64:
1057; RV64IZFINXZDINX:       # %bb.0:
1058; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
1059; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1060; RV64IZFINXZDINX-NEXT:    call log
1061; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1062; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1063; RV64IZFINXZDINX-NEXT:    ret
1064;
1065; RV32I-LABEL: log_f64:
1066; RV32I:       # %bb.0:
1067; RV32I-NEXT:    addi sp, sp, -16
1068; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1069; RV32I-NEXT:    call log
1070; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1071; RV32I-NEXT:    addi sp, sp, 16
1072; RV32I-NEXT:    ret
1073;
1074; RV64I-LABEL: log_f64:
1075; RV64I:       # %bb.0:
1076; RV64I-NEXT:    addi sp, sp, -16
1077; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1078; RV64I-NEXT:    call log
1079; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1080; RV64I-NEXT:    addi sp, sp, 16
1081; RV64I-NEXT:    ret
1082  %1 = call double @llvm.experimental.constrained.log.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1083  ret double %1
1084}
1085
1086declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
1087
1088define double @log10_f64(double %a) nounwind strictfp {
1089; RV32IFD-LABEL: log10_f64:
1090; RV32IFD:       # %bb.0:
1091; RV32IFD-NEXT:    addi sp, sp, -16
1092; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1093; RV32IFD-NEXT:    call log10
1094; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1095; RV32IFD-NEXT:    addi sp, sp, 16
1096; RV32IFD-NEXT:    ret
1097;
1098; RV64IFD-LABEL: log10_f64:
1099; RV64IFD:       # %bb.0:
1100; RV64IFD-NEXT:    addi sp, sp, -16
1101; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1102; RV64IFD-NEXT:    call log10
1103; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1104; RV64IFD-NEXT:    addi sp, sp, 16
1105; RV64IFD-NEXT:    ret
1106;
1107; RV32IZFINXZDINX-LABEL: log10_f64:
1108; RV32IZFINXZDINX:       # %bb.0:
1109; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1110; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1111; RV32IZFINXZDINX-NEXT:    call log10
1112; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1113; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1114; RV32IZFINXZDINX-NEXT:    ret
1115;
1116; RV64IZFINXZDINX-LABEL: log10_f64:
1117; RV64IZFINXZDINX:       # %bb.0:
1118; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
1119; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1120; RV64IZFINXZDINX-NEXT:    call log10
1121; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1122; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1123; RV64IZFINXZDINX-NEXT:    ret
1124;
1125; RV32I-LABEL: log10_f64:
1126; RV32I:       # %bb.0:
1127; RV32I-NEXT:    addi sp, sp, -16
1128; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1129; RV32I-NEXT:    call log10
1130; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1131; RV32I-NEXT:    addi sp, sp, 16
1132; RV32I-NEXT:    ret
1133;
1134; RV64I-LABEL: log10_f64:
1135; RV64I:       # %bb.0:
1136; RV64I-NEXT:    addi sp, sp, -16
1137; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1138; RV64I-NEXT:    call log10
1139; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1140; RV64I-NEXT:    addi sp, sp, 16
1141; RV64I-NEXT:    ret
1142  %1 = call double @llvm.experimental.constrained.log10.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1143  ret double %1
1144}
1145
1146declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
1147
1148define double @log2_f64(double %a) nounwind strictfp {
1149; RV32IFD-LABEL: log2_f64:
1150; RV32IFD:       # %bb.0:
1151; RV32IFD-NEXT:    addi sp, sp, -16
1152; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1153; RV32IFD-NEXT:    call log2
1154; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1155; RV32IFD-NEXT:    addi sp, sp, 16
1156; RV32IFD-NEXT:    ret
1157;
1158; RV64IFD-LABEL: log2_f64:
1159; RV64IFD:       # %bb.0:
1160; RV64IFD-NEXT:    addi sp, sp, -16
1161; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1162; RV64IFD-NEXT:    call log2
1163; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1164; RV64IFD-NEXT:    addi sp, sp, 16
1165; RV64IFD-NEXT:    ret
1166;
1167; RV32IZFINXZDINX-LABEL: log2_f64:
1168; RV32IZFINXZDINX:       # %bb.0:
1169; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1170; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1171; RV32IZFINXZDINX-NEXT:    call log2
1172; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1173; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1174; RV32IZFINXZDINX-NEXT:    ret
1175;
1176; RV64IZFINXZDINX-LABEL: log2_f64:
1177; RV64IZFINXZDINX:       # %bb.0:
1178; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
1179; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1180; RV64IZFINXZDINX-NEXT:    call log2
1181; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1182; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1183; RV64IZFINXZDINX-NEXT:    ret
1184;
1185; RV32I-LABEL: log2_f64:
1186; RV32I:       # %bb.0:
1187; RV32I-NEXT:    addi sp, sp, -16
1188; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1189; RV32I-NEXT:    call log2
1190; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1191; RV32I-NEXT:    addi sp, sp, 16
1192; RV32I-NEXT:    ret
1193;
1194; RV64I-LABEL: log2_f64:
1195; RV64I:       # %bb.0:
1196; RV64I-NEXT:    addi sp, sp, -16
1197; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1198; RV64I-NEXT:    call log2
1199; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1200; RV64I-NEXT:    addi sp, sp, 16
1201; RV64I-NEXT:    ret
1202  %1 = call double @llvm.experimental.constrained.log2.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1203  ret double %1
1204}
1205
1206declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)
1207
1208define double @fma_f64(double %a, double %b, double %c) nounwind strictfp {
1209; CHECKIFD-LABEL: fma_f64:
1210; CHECKIFD:       # %bb.0:
1211; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
1212; CHECKIFD-NEXT:    ret
1213;
1214; RV32IZFINXZDINX-LABEL: fma_f64:
1215; RV32IZFINXZDINX:       # %bb.0:
1216; RV32IZFINXZDINX-NEXT:    fmadd.d a0, a0, a2, a4
1217; RV32IZFINXZDINX-NEXT:    ret
1218;
1219; RV64IZFINXZDINX-LABEL: fma_f64:
1220; RV64IZFINXZDINX:       # %bb.0:
1221; RV64IZFINXZDINX-NEXT:    fmadd.d a0, a0, a1, a2
1222; RV64IZFINXZDINX-NEXT:    ret
1223;
1224; RV32I-LABEL: fma_f64:
1225; RV32I:       # %bb.0:
1226; RV32I-NEXT:    addi sp, sp, -16
1227; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1228; RV32I-NEXT:    call fma
1229; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1230; RV32I-NEXT:    addi sp, sp, 16
1231; RV32I-NEXT:    ret
1232;
1233; RV64I-LABEL: fma_f64:
1234; RV64I:       # %bb.0:
1235; RV64I-NEXT:    addi sp, sp, -16
1236; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1237; RV64I-NEXT:    call fma
1238; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1239; RV64I-NEXT:    addi sp, sp, 16
1240; RV64I-NEXT:    ret
1241  %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %b, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1242  ret double %1
1243}
1244
1245declare double @llvm.experimental.constrained.fmuladd.f64(double, double, double, metadata, metadata)
1246
1247define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp {
1248; CHECKIFD-LABEL: fmuladd_f64:
1249; CHECKIFD:       # %bb.0:
1250; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
1251; CHECKIFD-NEXT:    ret
1252;
1253; RV32IZFINXZDINX-LABEL: fmuladd_f64:
1254; RV32IZFINXZDINX:       # %bb.0:
1255; RV32IZFINXZDINX-NEXT:    fmadd.d a0, a0, a2, a4
1256; RV32IZFINXZDINX-NEXT:    ret
1257;
1258; RV64IZFINXZDINX-LABEL: fmuladd_f64:
1259; RV64IZFINXZDINX:       # %bb.0:
1260; RV64IZFINXZDINX-NEXT:    fmadd.d a0, a0, a1, a2
1261; RV64IZFINXZDINX-NEXT:    ret
1262;
1263; RV32I-LABEL: fmuladd_f64:
1264; RV32I:       # %bb.0:
1265; RV32I-NEXT:    addi sp, sp, -16
1266; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1267; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1268; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1269; RV32I-NEXT:    mv s0, a5
1270; RV32I-NEXT:    mv s1, a4
1271; RV32I-NEXT:    call __muldf3
1272; RV32I-NEXT:    mv a2, s1
1273; RV32I-NEXT:    mv a3, s0
1274; RV32I-NEXT:    call __adddf3
1275; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1276; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1277; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1278; RV32I-NEXT:    addi sp, sp, 16
1279; RV32I-NEXT:    ret
1280;
1281; RV64I-LABEL: fmuladd_f64:
1282; RV64I:       # %bb.0:
1283; RV64I-NEXT:    addi sp, sp, -16
1284; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1285; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
1286; RV64I-NEXT:    mv s0, a2
1287; RV64I-NEXT:    call __muldf3
1288; RV64I-NEXT:    mv a1, s0
1289; RV64I-NEXT:    call __adddf3
1290; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1291; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
1292; RV64I-NEXT:    addi sp, sp, 16
1293; RV64I-NEXT:    ret
1294  %1 = call double @llvm.experimental.constrained.fmuladd.f64(double %a, double %b, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1295  ret double %1
1296}
1297
1298declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata)
1299
1300define double @minnum_f64(double %a, double %b) nounwind strictfp {
1301; RV32IFD-LABEL: minnum_f64:
1302; RV32IFD:       # %bb.0:
1303; RV32IFD-NEXT:    addi sp, sp, -16
1304; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1305; RV32IFD-NEXT:    call fmin
1306; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1307; RV32IFD-NEXT:    addi sp, sp, 16
1308; RV32IFD-NEXT:    ret
1309;
1310; RV64IFD-LABEL: minnum_f64:
1311; RV64IFD:       # %bb.0:
1312; RV64IFD-NEXT:    addi sp, sp, -16
1313; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1314; RV64IFD-NEXT:    call fmin
1315; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1316; RV64IFD-NEXT:    addi sp, sp, 16
1317; RV64IFD-NEXT:    ret
1318;
1319; RV32IZFINXZDINX-LABEL: minnum_f64:
1320; RV32IZFINXZDINX:       # %bb.0:
1321; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1322; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1323; RV32IZFINXZDINX-NEXT:    call fmin
1324; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1325; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1326; RV32IZFINXZDINX-NEXT:    ret
1327;
1328; RV64IZFINXZDINX-LABEL: minnum_f64:
1329; RV64IZFINXZDINX:       # %bb.0:
1330; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
1331; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1332; RV64IZFINXZDINX-NEXT:    call fmin
1333; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1334; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1335; RV64IZFINXZDINX-NEXT:    ret
1336;
1337; RV32I-LABEL: minnum_f64:
1338; RV32I:       # %bb.0:
1339; RV32I-NEXT:    addi sp, sp, -16
1340; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1341; RV32I-NEXT:    call fmin
1342; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1343; RV32I-NEXT:    addi sp, sp, 16
1344; RV32I-NEXT:    ret
1345;
1346; RV64I-LABEL: minnum_f64:
1347; RV64I:       # %bb.0:
1348; RV64I-NEXT:    addi sp, sp, -16
1349; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1350; RV64I-NEXT:    call fmin
1351; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1352; RV64I-NEXT:    addi sp, sp, 16
1353; RV64I-NEXT:    ret
1354  %1 = call double @llvm.experimental.constrained.minnum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp
1355  ret double %1
1356}
1357
1358declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata)
1359
1360define double @maxnum_f64(double %a, double %b) nounwind strictfp {
1361; RV32IFD-LABEL: maxnum_f64:
1362; RV32IFD:       # %bb.0:
1363; RV32IFD-NEXT:    addi sp, sp, -16
1364; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1365; RV32IFD-NEXT:    call fmax
1366; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1367; RV32IFD-NEXT:    addi sp, sp, 16
1368; RV32IFD-NEXT:    ret
1369;
1370; RV64IFD-LABEL: maxnum_f64:
1371; RV64IFD:       # %bb.0:
1372; RV64IFD-NEXT:    addi sp, sp, -16
1373; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1374; RV64IFD-NEXT:    call fmax
1375; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1376; RV64IFD-NEXT:    addi sp, sp, 16
1377; RV64IFD-NEXT:    ret
1378;
1379; RV32IZFINXZDINX-LABEL: maxnum_f64:
1380; RV32IZFINXZDINX:       # %bb.0:
1381; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1382; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1383; RV32IZFINXZDINX-NEXT:    call fmax
1384; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1385; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1386; RV32IZFINXZDINX-NEXT:    ret
1387;
1388; RV64IZFINXZDINX-LABEL: maxnum_f64:
1389; RV64IZFINXZDINX:       # %bb.0:
1390; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
1391; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1392; RV64IZFINXZDINX-NEXT:    call fmax
1393; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1394; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1395; RV64IZFINXZDINX-NEXT:    ret
1396;
1397; RV32I-LABEL: maxnum_f64:
1398; RV32I:       # %bb.0:
1399; RV32I-NEXT:    addi sp, sp, -16
1400; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1401; RV32I-NEXT:    call fmax
1402; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1403; RV32I-NEXT:    addi sp, sp, 16
1404; RV32I-NEXT:    ret
1405;
1406; RV64I-LABEL: maxnum_f64:
1407; RV64I:       # %bb.0:
1408; RV64I-NEXT:    addi sp, sp, -16
1409; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1410; RV64I-NEXT:    call fmax
1411; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1412; RV64I-NEXT:    addi sp, sp, 16
1413; RV64I-NEXT:    ret
1414  %1 = call double @llvm.experimental.constrained.maxnum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp
1415  ret double %1
1416}
1417
1418; TODO: FMINNAN and FMAXNAN aren't handled in
1419; SelectionDAGLegalize::ExpandNode.
1420
1421; declare double @llvm.experimental.constrained.minimum.f64(double, double, metadata)
1422
1423; define double @fminimum_f64(double %a, double %b) nounwind strictfp {
1424;   %1 = call double @llvm.experimental.constrained.minimum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp
1425;   ret double %1
1426; }
1427
1428; declare double @llvm.experimental.constrained.maximum.f64(double, double, metadata)
1429
1430; define double @fmaximum_f64(double %a, double %b) nounwind strictfp {
1431;   %1 = call double @llvm.experimental.constrained.maximum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp
1432;   ret double %1
1433; }
1434
1435declare double @llvm.experimental.constrained.floor.f64(double, metadata)
1436
1437define double @floor_f64(double %a) nounwind strictfp {
1438; RV32IFD-LABEL: floor_f64:
1439; RV32IFD:       # %bb.0:
1440; RV32IFD-NEXT:    addi sp, sp, -16
1441; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1442; RV32IFD-NEXT:    call floor
1443; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1444; RV32IFD-NEXT:    addi sp, sp, 16
1445; RV32IFD-NEXT:    ret
1446;
1447; RV64IFD-LABEL: floor_f64:
1448; RV64IFD:       # %bb.0:
1449; RV64IFD-NEXT:    addi sp, sp, -16
1450; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1451; RV64IFD-NEXT:    call floor
1452; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1453; RV64IFD-NEXT:    addi sp, sp, 16
1454; RV64IFD-NEXT:    ret
1455;
1456; RV32IZFINXZDINX-LABEL: floor_f64:
1457; RV32IZFINXZDINX:       # %bb.0:
1458; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1459; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1460; RV32IZFINXZDINX-NEXT:    call floor
1461; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1462; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1463; RV32IZFINXZDINX-NEXT:    ret
1464;
1465; RV64IZFINXZDINX-LABEL: floor_f64:
1466; RV64IZFINXZDINX:       # %bb.0:
1467; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
1468; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1469; RV64IZFINXZDINX-NEXT:    call floor
1470; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1471; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1472; RV64IZFINXZDINX-NEXT:    ret
1473;
1474; RV32I-LABEL: floor_f64:
1475; RV32I:       # %bb.0:
1476; RV32I-NEXT:    addi sp, sp, -16
1477; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1478; RV32I-NEXT:    call floor
1479; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1480; RV32I-NEXT:    addi sp, sp, 16
1481; RV32I-NEXT:    ret
1482;
1483; RV64I-LABEL: floor_f64:
1484; RV64I:       # %bb.0:
1485; RV64I-NEXT:    addi sp, sp, -16
1486; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1487; RV64I-NEXT:    call floor
1488; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1489; RV64I-NEXT:    addi sp, sp, 16
1490; RV64I-NEXT:    ret
1491  %1 = call double @llvm.experimental.constrained.floor.f64(double %a, metadata !"fpexcept.strict") strictfp
1492  ret double %1
1493}
1494
1495declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
1496
1497define double @ceil_f64(double %a) nounwind strictfp {
1498; RV32IFD-LABEL: ceil_f64:
1499; RV32IFD:       # %bb.0:
1500; RV32IFD-NEXT:    addi sp, sp, -16
1501; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1502; RV32IFD-NEXT:    call ceil
1503; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1504; RV32IFD-NEXT:    addi sp, sp, 16
1505; RV32IFD-NEXT:    ret
1506;
1507; RV64IFD-LABEL: ceil_f64:
1508; RV64IFD:       # %bb.0:
1509; RV64IFD-NEXT:    addi sp, sp, -16
1510; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1511; RV64IFD-NEXT:    call ceil
1512; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1513; RV64IFD-NEXT:    addi sp, sp, 16
1514; RV64IFD-NEXT:    ret
1515;
1516; RV32IZFINXZDINX-LABEL: ceil_f64:
1517; RV32IZFINXZDINX:       # %bb.0:
1518; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1519; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1520; RV32IZFINXZDINX-NEXT:    call ceil
1521; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1522; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1523; RV32IZFINXZDINX-NEXT:    ret
1524;
1525; RV64IZFINXZDINX-LABEL: ceil_f64:
1526; RV64IZFINXZDINX:       # %bb.0:
1527; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
1528; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1529; RV64IZFINXZDINX-NEXT:    call ceil
1530; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1531; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1532; RV64IZFINXZDINX-NEXT:    ret
1533;
1534; RV32I-LABEL: ceil_f64:
1535; RV32I:       # %bb.0:
1536; RV32I-NEXT:    addi sp, sp, -16
1537; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1538; RV32I-NEXT:    call ceil
1539; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1540; RV32I-NEXT:    addi sp, sp, 16
1541; RV32I-NEXT:    ret
1542;
1543; RV64I-LABEL: ceil_f64:
1544; RV64I:       # %bb.0:
1545; RV64I-NEXT:    addi sp, sp, -16
1546; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1547; RV64I-NEXT:    call ceil
1548; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1549; RV64I-NEXT:    addi sp, sp, 16
1550; RV64I-NEXT:    ret
1551  %1 = call double @llvm.experimental.constrained.ceil.f64(double %a, metadata !"fpexcept.strict") strictfp
1552  ret double %1
1553}
1554
1555declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
1556
1557define double @trunc_f64(double %a) nounwind strictfp {
1558; RV32IFD-LABEL: trunc_f64:
1559; RV32IFD:       # %bb.0:
1560; RV32IFD-NEXT:    addi sp, sp, -16
1561; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1562; RV32IFD-NEXT:    call trunc
1563; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1564; RV32IFD-NEXT:    addi sp, sp, 16
1565; RV32IFD-NEXT:    ret
1566;
1567; RV64IFD-LABEL: trunc_f64:
1568; RV64IFD:       # %bb.0:
1569; RV64IFD-NEXT:    addi sp, sp, -16
1570; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1571; RV64IFD-NEXT:    call trunc
1572; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1573; RV64IFD-NEXT:    addi sp, sp, 16
1574; RV64IFD-NEXT:    ret
1575;
1576; RV32IZFINXZDINX-LABEL: trunc_f64:
1577; RV32IZFINXZDINX:       # %bb.0:
1578; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1579; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1580; RV32IZFINXZDINX-NEXT:    call trunc
1581; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1582; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1583; RV32IZFINXZDINX-NEXT:    ret
1584;
1585; RV64IZFINXZDINX-LABEL: trunc_f64:
1586; RV64IZFINXZDINX:       # %bb.0:
1587; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
1588; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1589; RV64IZFINXZDINX-NEXT:    call trunc
1590; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1591; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1592; RV64IZFINXZDINX-NEXT:    ret
1593;
1594; RV32I-LABEL: trunc_f64:
1595; RV32I:       # %bb.0:
1596; RV32I-NEXT:    addi sp, sp, -16
1597; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1598; RV32I-NEXT:    call trunc
1599; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1600; RV32I-NEXT:    addi sp, sp, 16
1601; RV32I-NEXT:    ret
1602;
1603; RV64I-LABEL: trunc_f64:
1604; RV64I:       # %bb.0:
1605; RV64I-NEXT:    addi sp, sp, -16
1606; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1607; RV64I-NEXT:    call trunc
1608; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1609; RV64I-NEXT:    addi sp, sp, 16
1610; RV64I-NEXT:    ret
1611  %1 = call double @llvm.experimental.constrained.trunc.f64(double %a, metadata !"fpexcept.strict") strictfp
1612  ret double %1
1613}
1614
1615declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
1616
1617define double @rint_f64(double %a) nounwind strictfp {
1618; RV32IFD-LABEL: rint_f64:
1619; RV32IFD:       # %bb.0:
1620; RV32IFD-NEXT:    addi sp, sp, -16
1621; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1622; RV32IFD-NEXT:    call rint
1623; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1624; RV32IFD-NEXT:    addi sp, sp, 16
1625; RV32IFD-NEXT:    ret
1626;
1627; RV64IFD-LABEL: rint_f64:
1628; RV64IFD:       # %bb.0:
1629; RV64IFD-NEXT:    addi sp, sp, -16
1630; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1631; RV64IFD-NEXT:    call rint
1632; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1633; RV64IFD-NEXT:    addi sp, sp, 16
1634; RV64IFD-NEXT:    ret
1635;
1636; RV32IZFINXZDINX-LABEL: rint_f64:
1637; RV32IZFINXZDINX:       # %bb.0:
1638; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1639; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1640; RV32IZFINXZDINX-NEXT:    call rint
1641; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1642; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1643; RV32IZFINXZDINX-NEXT:    ret
1644;
1645; RV64IZFINXZDINX-LABEL: rint_f64:
1646; RV64IZFINXZDINX:       # %bb.0:
1647; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
1648; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1649; RV64IZFINXZDINX-NEXT:    call rint
1650; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1651; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1652; RV64IZFINXZDINX-NEXT:    ret
1653;
1654; RV32I-LABEL: rint_f64:
1655; RV32I:       # %bb.0:
1656; RV32I-NEXT:    addi sp, sp, -16
1657; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1658; RV32I-NEXT:    call rint
1659; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1660; RV32I-NEXT:    addi sp, sp, 16
1661; RV32I-NEXT:    ret
1662;
1663; RV64I-LABEL: rint_f64:
1664; RV64I:       # %bb.0:
1665; RV64I-NEXT:    addi sp, sp, -16
1666; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1667; RV64I-NEXT:    call rint
1668; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1669; RV64I-NEXT:    addi sp, sp, 16
1670; RV64I-NEXT:    ret
1671  %1 = call double @llvm.experimental.constrained.rint.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1672  ret double %1
1673}
1674
1675declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
1676
1677define double @nearbyint_f64(double %a) nounwind strictfp {
1678; RV32IFD-LABEL: nearbyint_f64:
1679; RV32IFD:       # %bb.0:
1680; RV32IFD-NEXT:    addi sp, sp, -16
1681; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1682; RV32IFD-NEXT:    call nearbyint
1683; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1684; RV32IFD-NEXT:    addi sp, sp, 16
1685; RV32IFD-NEXT:    ret
1686;
1687; RV64IFD-LABEL: nearbyint_f64:
1688; RV64IFD:       # %bb.0:
1689; RV64IFD-NEXT:    addi sp, sp, -16
1690; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1691; RV64IFD-NEXT:    call nearbyint
1692; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1693; RV64IFD-NEXT:    addi sp, sp, 16
1694; RV64IFD-NEXT:    ret
1695;
1696; RV32IZFINXZDINX-LABEL: nearbyint_f64:
1697; RV32IZFINXZDINX:       # %bb.0:
1698; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1699; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1700; RV32IZFINXZDINX-NEXT:    call nearbyint
1701; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1702; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1703; RV32IZFINXZDINX-NEXT:    ret
1704;
1705; RV64IZFINXZDINX-LABEL: nearbyint_f64:
1706; RV64IZFINXZDINX:       # %bb.0:
1707; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
1708; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1709; RV64IZFINXZDINX-NEXT:    call nearbyint
1710; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1711; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1712; RV64IZFINXZDINX-NEXT:    ret
1713;
1714; RV32I-LABEL: nearbyint_f64:
1715; RV32I:       # %bb.0:
1716; RV32I-NEXT:    addi sp, sp, -16
1717; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1718; RV32I-NEXT:    call nearbyint
1719; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1720; RV32I-NEXT:    addi sp, sp, 16
1721; RV32I-NEXT:    ret
1722;
1723; RV64I-LABEL: nearbyint_f64:
1724; RV64I:       # %bb.0:
1725; RV64I-NEXT:    addi sp, sp, -16
1726; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1727; RV64I-NEXT:    call nearbyint
1728; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1729; RV64I-NEXT:    addi sp, sp, 16
1730; RV64I-NEXT:    ret
1731  %1 = call double @llvm.experimental.constrained.nearbyint.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1732  ret double %1
1733}
1734
1735declare double @llvm.experimental.constrained.round.f64(double, metadata)
1736
1737define double @round_f64(double %a) nounwind strictfp {
1738; RV32IFD-LABEL: round_f64:
1739; RV32IFD:       # %bb.0:
1740; RV32IFD-NEXT:    addi sp, sp, -16
1741; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1742; RV32IFD-NEXT:    call round
1743; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1744; RV32IFD-NEXT:    addi sp, sp, 16
1745; RV32IFD-NEXT:    ret
1746;
1747; RV64IFD-LABEL: round_f64:
1748; RV64IFD:       # %bb.0:
1749; RV64IFD-NEXT:    addi sp, sp, -16
1750; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1751; RV64IFD-NEXT:    call round
1752; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1753; RV64IFD-NEXT:    addi sp, sp, 16
1754; RV64IFD-NEXT:    ret
1755;
1756; RV32IZFINXZDINX-LABEL: round_f64:
1757; RV32IZFINXZDINX:       # %bb.0:
1758; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1759; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1760; RV32IZFINXZDINX-NEXT:    call round
1761; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1762; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1763; RV32IZFINXZDINX-NEXT:    ret
1764;
1765; RV64IZFINXZDINX-LABEL: round_f64:
1766; RV64IZFINXZDINX:       # %bb.0:
1767; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
1768; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1769; RV64IZFINXZDINX-NEXT:    call round
1770; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1771; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1772; RV64IZFINXZDINX-NEXT:    ret
1773;
1774; RV32I-LABEL: round_f64:
1775; RV32I:       # %bb.0:
1776; RV32I-NEXT:    addi sp, sp, -16
1777; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1778; RV32I-NEXT:    call round
1779; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1780; RV32I-NEXT:    addi sp, sp, 16
1781; RV32I-NEXT:    ret
1782;
1783; RV64I-LABEL: round_f64:
1784; RV64I:       # %bb.0:
1785; RV64I-NEXT:    addi sp, sp, -16
1786; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1787; RV64I-NEXT:    call round
1788; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1789; RV64I-NEXT:    addi sp, sp, 16
1790; RV64I-NEXT:    ret
1791  %1 = call double @llvm.experimental.constrained.round.f64(double %a, metadata !"fpexcept.strict") strictfp
1792  ret double %1
1793}
1794
1795declare double @llvm.experimental.constrained.roundeven.f64(double, metadata)
1796
1797define double @roundeven_f64(double %a) nounwind strictfp {
1798; RV32IFD-LABEL: roundeven_f64:
1799; RV32IFD:       # %bb.0:
1800; RV32IFD-NEXT:    addi sp, sp, -16
1801; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1802; RV32IFD-NEXT:    call roundeven
1803; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1804; RV32IFD-NEXT:    addi sp, sp, 16
1805; RV32IFD-NEXT:    ret
1806;
1807; RV64IFD-LABEL: roundeven_f64:
1808; RV64IFD:       # %bb.0:
1809; RV64IFD-NEXT:    addi sp, sp, -16
1810; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1811; RV64IFD-NEXT:    call roundeven
1812; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1813; RV64IFD-NEXT:    addi sp, sp, 16
1814; RV64IFD-NEXT:    ret
1815;
1816; RV32IZFINXZDINX-LABEL: roundeven_f64:
1817; RV32IZFINXZDINX:       # %bb.0:
1818; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1819; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1820; RV32IZFINXZDINX-NEXT:    call roundeven
1821; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1822; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1823; RV32IZFINXZDINX-NEXT:    ret
1824;
1825; RV64IZFINXZDINX-LABEL: roundeven_f64:
1826; RV64IZFINXZDINX:       # %bb.0:
1827; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
1828; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1829; RV64IZFINXZDINX-NEXT:    call roundeven
1830; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1831; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
1832; RV64IZFINXZDINX-NEXT:    ret
1833;
1834; RV32I-LABEL: roundeven_f64:
1835; RV32I:       # %bb.0:
1836; RV32I-NEXT:    addi sp, sp, -16
1837; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1838; RV32I-NEXT:    call roundeven
1839; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1840; RV32I-NEXT:    addi sp, sp, 16
1841; RV32I-NEXT:    ret
1842;
1843; RV64I-LABEL: roundeven_f64:
1844; RV64I:       # %bb.0:
1845; RV64I-NEXT:    addi sp, sp, -16
1846; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1847; RV64I-NEXT:    call roundeven
1848; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1849; RV64I-NEXT:    addi sp, sp, 16
1850; RV64I-NEXT:    ret
1851  %1 = call double @llvm.experimental.constrained.roundeven.f64(double %a, metadata !"fpexcept.strict") strictfp
1852  ret double %1
1853}
1854
1855declare iXLen @llvm.experimental.constrained.lrint.iXLen.f64(double, metadata, metadata)
1856
1857define iXLen @lrint_f64(double %a) nounwind strictfp {
1858; RV32IFD-LABEL: lrint_f64:
1859; RV32IFD:       # %bb.0:
1860; RV32IFD-NEXT:    fcvt.w.d a0, fa0
1861; RV32IFD-NEXT:    ret
1862;
1863; RV64IFD-LABEL: lrint_f64:
1864; RV64IFD:       # %bb.0:
1865; RV64IFD-NEXT:    fcvt.l.d a0, fa0
1866; RV64IFD-NEXT:    ret
1867;
1868; RV32IZFINXZDINX-LABEL: lrint_f64:
1869; RV32IZFINXZDINX:       # %bb.0:
1870; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0
1871; RV32IZFINXZDINX-NEXT:    ret
1872;
1873; RV64IZFINXZDINX-LABEL: lrint_f64:
1874; RV64IZFINXZDINX:       # %bb.0:
1875; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0
1876; RV64IZFINXZDINX-NEXT:    ret
1877;
1878; RV32I-LABEL: lrint_f64:
1879; RV32I:       # %bb.0:
1880; RV32I-NEXT:    addi sp, sp, -16
1881; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1882; RV32I-NEXT:    call lrint
1883; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1884; RV32I-NEXT:    addi sp, sp, 16
1885; RV32I-NEXT:    ret
1886;
1887; RV64I-LABEL: lrint_f64:
1888; RV64I:       # %bb.0:
1889; RV64I-NEXT:    addi sp, sp, -16
1890; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1891; RV64I-NEXT:    call lrint
1892; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1893; RV64I-NEXT:    addi sp, sp, 16
1894; RV64I-NEXT:    ret
1895  %1 = call iXLen @llvm.experimental.constrained.lrint.iXLen.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1896  ret iXLen %1
1897}
1898
1899declare iXLen @llvm.experimental.constrained.lround.iXLen.f64(double, metadata)
1900
1901define iXLen @lround_f64(double %a) nounwind strictfp {
1902; RV32IFD-LABEL: lround_f64:
1903; RV32IFD:       # %bb.0:
1904; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rmm
1905; RV32IFD-NEXT:    ret
1906;
1907; RV64IFD-LABEL: lround_f64:
1908; RV64IFD:       # %bb.0:
1909; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
1910; RV64IFD-NEXT:    ret
1911;
1912; RV32IZFINXZDINX-LABEL: lround_f64:
1913; RV32IZFINXZDINX:       # %bb.0:
1914; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rmm
1915; RV32IZFINXZDINX-NEXT:    ret
1916;
1917; RV64IZFINXZDINX-LABEL: lround_f64:
1918; RV64IZFINXZDINX:       # %bb.0:
1919; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rmm
1920; RV64IZFINXZDINX-NEXT:    ret
1921;
1922; RV32I-LABEL: lround_f64:
1923; RV32I:       # %bb.0:
1924; RV32I-NEXT:    addi sp, sp, -16
1925; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1926; RV32I-NEXT:    call lround
1927; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1928; RV32I-NEXT:    addi sp, sp, 16
1929; RV32I-NEXT:    ret
1930;
1931; RV64I-LABEL: lround_f64:
1932; RV64I:       # %bb.0:
1933; RV64I-NEXT:    addi sp, sp, -16
1934; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1935; RV64I-NEXT:    call lround
1936; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1937; RV64I-NEXT:    addi sp, sp, 16
1938; RV64I-NEXT:    ret
1939  %1 = call iXLen @llvm.experimental.constrained.lround.iXLen.f64(double %a, metadata !"fpexcept.strict") strictfp
1940  ret iXLen %1
1941}
1942
1943declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
1944
1945define i64 @llrint_f64(double %a) nounwind strictfp {
1946; RV32IFD-LABEL: llrint_f64:
1947; RV32IFD:       # %bb.0:
1948; RV32IFD-NEXT:    addi sp, sp, -16
1949; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1950; RV32IFD-NEXT:    call llrint
1951; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1952; RV32IFD-NEXT:    addi sp, sp, 16
1953; RV32IFD-NEXT:    ret
1954;
1955; RV64IFD-LABEL: llrint_f64:
1956; RV64IFD:       # %bb.0:
1957; RV64IFD-NEXT:    fcvt.l.d a0, fa0
1958; RV64IFD-NEXT:    ret
1959;
1960; RV32IZFINXZDINX-LABEL: llrint_f64:
1961; RV32IZFINXZDINX:       # %bb.0:
1962; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1963; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1964; RV32IZFINXZDINX-NEXT:    call llrint
1965; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1966; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1967; RV32IZFINXZDINX-NEXT:    ret
1968;
1969; RV64IZFINXZDINX-LABEL: llrint_f64:
1970; RV64IZFINXZDINX:       # %bb.0:
1971; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0
1972; RV64IZFINXZDINX-NEXT:    ret
1973;
1974; RV32I-LABEL: llrint_f64:
1975; RV32I:       # %bb.0:
1976; RV32I-NEXT:    addi sp, sp, -16
1977; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1978; RV32I-NEXT:    call llrint
1979; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1980; RV32I-NEXT:    addi sp, sp, 16
1981; RV32I-NEXT:    ret
1982;
1983; RV64I-LABEL: llrint_f64:
1984; RV64I:       # %bb.0:
1985; RV64I-NEXT:    addi sp, sp, -16
1986; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1987; RV64I-NEXT:    call llrint
1988; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1989; RV64I-NEXT:    addi sp, sp, 16
1990; RV64I-NEXT:    ret
1991  %1 = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1992  ret i64 %1
1993}
1994
1995declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
1996
1997define i64 @llround_f64(double %a) nounwind strictfp {
1998; RV32IFD-LABEL: llround_f64:
1999; RV32IFD:       # %bb.0:
2000; RV32IFD-NEXT:    addi sp, sp, -16
2001; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
2002; RV32IFD-NEXT:    call llround
2003; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
2004; RV32IFD-NEXT:    addi sp, sp, 16
2005; RV32IFD-NEXT:    ret
2006;
2007; RV64IFD-LABEL: llround_f64:
2008; RV64IFD:       # %bb.0:
2009; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
2010; RV64IFD-NEXT:    ret
2011;
2012; RV32IZFINXZDINX-LABEL: llround_f64:
2013; RV32IZFINXZDINX:       # %bb.0:
2014; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
2015; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
2016; RV32IZFINXZDINX-NEXT:    call llround
2017; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
2018; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
2019; RV32IZFINXZDINX-NEXT:    ret
2020;
2021; RV64IZFINXZDINX-LABEL: llround_f64:
2022; RV64IZFINXZDINX:       # %bb.0:
2023; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rmm
2024; RV64IZFINXZDINX-NEXT:    ret
2025;
2026; RV32I-LABEL: llround_f64:
2027; RV32I:       # %bb.0:
2028; RV32I-NEXT:    addi sp, sp, -16
2029; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
2030; RV32I-NEXT:    call llround
2031; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
2032; RV32I-NEXT:    addi sp, sp, 16
2033; RV32I-NEXT:    ret
2034;
2035; RV64I-LABEL: llround_f64:
2036; RV64I:       # %bb.0:
2037; RV64I-NEXT:    addi sp, sp, -16
2038; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
2039; RV64I-NEXT:    call llround
2040; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
2041; RV64I-NEXT:    addi sp, sp, 16
2042; RV64I-NEXT:    ret
2043  %1 = call i64 @llvm.experimental.constrained.llround.i64.f64(double %a, metadata !"fpexcept.strict") strictfp
2044  ret i64 %1
2045}
2046
2047define double @ldexp_f64(double %x, i32 signext %y) nounwind {
2048; RV32IFD-LABEL: ldexp_f64:
2049; RV32IFD:       # %bb.0:
2050; RV32IFD-NEXT:    addi sp, sp, -16
2051; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
2052; RV32IFD-NEXT:    call ldexp
2053; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
2054; RV32IFD-NEXT:    addi sp, sp, 16
2055; RV32IFD-NEXT:    ret
2056;
2057; RV64IFD-LABEL: ldexp_f64:
2058; RV64IFD:       # %bb.0:
2059; RV64IFD-NEXT:    addi sp, sp, -16
2060; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
2061; RV64IFD-NEXT:    call ldexp
2062; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
2063; RV64IFD-NEXT:    addi sp, sp, 16
2064; RV64IFD-NEXT:    ret
2065;
2066; RV32IZFINXZDINX-LABEL: ldexp_f64:
2067; RV32IZFINXZDINX:       # %bb.0:
2068; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
2069; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
2070; RV32IZFINXZDINX-NEXT:    call ldexp
2071; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
2072; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
2073; RV32IZFINXZDINX-NEXT:    ret
2074;
2075; RV64IZFINXZDINX-LABEL: ldexp_f64:
2076; RV64IZFINXZDINX:       # %bb.0:
2077; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
2078; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
2079; RV64IZFINXZDINX-NEXT:    call ldexp
2080; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
2081; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
2082; RV64IZFINXZDINX-NEXT:    ret
2083;
2084; RV32I-LABEL: ldexp_f64:
2085; RV32I:       # %bb.0:
2086; RV32I-NEXT:    addi sp, sp, -16
2087; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
2088; RV32I-NEXT:    call ldexp
2089; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
2090; RV32I-NEXT:    addi sp, sp, 16
2091; RV32I-NEXT:    ret
2092;
2093; RV64I-LABEL: ldexp_f64:
2094; RV64I:       # %bb.0:
2095; RV64I-NEXT:    addi sp, sp, -16
2096; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
2097; RV64I-NEXT:    call ldexp
2098; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
2099; RV64I-NEXT:    addi sp, sp, 16
2100; RV64I-NEXT:    ret
2101  %z = call double @llvm.experimental.constrained.ldexp.f64.i32(double %x, i32 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
2102  ret double %z
2103}
2104