xref: /llvm-project/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll (revision 537e0e1ff639ed4f8fa4dadbc84f4a6a12e1d20a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -mattr=+d \
3; RUN:   -target-abi=ilp32d \
4; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
5; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+d \
6; RUN:   -target-abi=lp64d \
7; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
8; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel  \
9; RUN:   | FileCheck -check-prefix=RV32I %s
10; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel  \
11; RUN:   | FileCheck -check-prefix=RV64I %s
12
13declare double @llvm.sqrt.f64(double)
14
15define double @sqrt_f64(double %a) nounwind {
16; CHECKIFD-LABEL: sqrt_f64:
17; CHECKIFD:       # %bb.0:
18; CHECKIFD-NEXT:    fsqrt.d fa0, fa0
19; CHECKIFD-NEXT:    ret
20;
21; RV32I-LABEL: sqrt_f64:
22; RV32I:       # %bb.0:
23; RV32I-NEXT:    addi sp, sp, -16
24; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
25; RV32I-NEXT:    call sqrt
26; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
27; RV32I-NEXT:    addi sp, sp, 16
28; RV32I-NEXT:    ret
29;
30; RV64I-LABEL: sqrt_f64:
31; RV64I:       # %bb.0:
32; RV64I-NEXT:    addi sp, sp, -16
33; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
34; RV64I-NEXT:    call sqrt
35; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
36; RV64I-NEXT:    addi sp, sp, 16
37; RV64I-NEXT:    ret
38  %1 = call double @llvm.sqrt.f64(double %a)
39  ret double %1
40}
41
42define double @powi_f64(double %a, i32 %b) nounwind {
43; RV32IFD-LABEL: powi_f64:
44; RV32IFD:       # %bb.0:
45; RV32IFD-NEXT:    addi sp, sp, -16
46; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
47; RV32IFD-NEXT:    call __powidf2
48; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
49; RV32IFD-NEXT:    addi sp, sp, 16
50; RV32IFD-NEXT:    ret
51;
52; RV64IFD-LABEL: powi_f64:
53; RV64IFD:       # %bb.0:
54; RV64IFD-NEXT:    addi sp, sp, -16
55; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
56; RV64IFD-NEXT:    sext.w a0, a0
57; RV64IFD-NEXT:    call __powidf2
58; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
59; RV64IFD-NEXT:    addi sp, sp, 16
60; RV64IFD-NEXT:    ret
61;
62; RV32I-LABEL: powi_f64:
63; RV32I:       # %bb.0:
64; RV32I-NEXT:    addi sp, sp, -16
65; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
66; RV32I-NEXT:    call __powidf2
67; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
68; RV32I-NEXT:    addi sp, sp, 16
69; RV32I-NEXT:    ret
70;
71; RV64I-LABEL: powi_f64:
72; RV64I:       # %bb.0:
73; RV64I-NEXT:    addi sp, sp, -16
74; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
75; RV64I-NEXT:    sext.w a1, a1
76; RV64I-NEXT:    call __powidf2
77; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
78; RV64I-NEXT:    addi sp, sp, 16
79; RV64I-NEXT:    ret
80  %1 = call double @llvm.powi.f64.i32(double %a, i32 %b)
81  ret double %1
82}
83
84declare double @llvm.sin.f64(double)
85
86define double @sin_f64(double %a) nounwind {
87; RV32IFD-LABEL: sin_f64:
88; RV32IFD:       # %bb.0:
89; RV32IFD-NEXT:    addi sp, sp, -16
90; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
91; RV32IFD-NEXT:    call sin
92; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
93; RV32IFD-NEXT:    addi sp, sp, 16
94; RV32IFD-NEXT:    ret
95;
96; RV64IFD-LABEL: sin_f64:
97; RV64IFD:       # %bb.0:
98; RV64IFD-NEXT:    addi sp, sp, -16
99; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
100; RV64IFD-NEXT:    call sin
101; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
102; RV64IFD-NEXT:    addi sp, sp, 16
103; RV64IFD-NEXT:    ret
104;
105; RV32I-LABEL: sin_f64:
106; RV32I:       # %bb.0:
107; RV32I-NEXT:    addi sp, sp, -16
108; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
109; RV32I-NEXT:    call sin
110; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
111; RV32I-NEXT:    addi sp, sp, 16
112; RV32I-NEXT:    ret
113;
114; RV64I-LABEL: sin_f64:
115; RV64I:       # %bb.0:
116; RV64I-NEXT:    addi sp, sp, -16
117; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
118; RV64I-NEXT:    call sin
119; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
120; RV64I-NEXT:    addi sp, sp, 16
121; RV64I-NEXT:    ret
122  %1 = call double @llvm.sin.f64(double %a)
123  ret double %1
124}
125
126declare double @llvm.cos.f64(double)
127
128define double @cos_f64(double %a) nounwind {
129; RV32IFD-LABEL: cos_f64:
130; RV32IFD:       # %bb.0:
131; RV32IFD-NEXT:    addi sp, sp, -16
132; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
133; RV32IFD-NEXT:    call cos
134; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
135; RV32IFD-NEXT:    addi sp, sp, 16
136; RV32IFD-NEXT:    ret
137;
138; RV64IFD-LABEL: cos_f64:
139; RV64IFD:       # %bb.0:
140; RV64IFD-NEXT:    addi sp, sp, -16
141; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
142; RV64IFD-NEXT:    call cos
143; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
144; RV64IFD-NEXT:    addi sp, sp, 16
145; RV64IFD-NEXT:    ret
146;
147; RV32I-LABEL: cos_f64:
148; RV32I:       # %bb.0:
149; RV32I-NEXT:    addi sp, sp, -16
150; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
151; RV32I-NEXT:    call cos
152; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
153; RV32I-NEXT:    addi sp, sp, 16
154; RV32I-NEXT:    ret
155;
156; RV64I-LABEL: cos_f64:
157; RV64I:       # %bb.0:
158; RV64I-NEXT:    addi sp, sp, -16
159; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
160; RV64I-NEXT:    call cos
161; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
162; RV64I-NEXT:    addi sp, sp, 16
163; RV64I-NEXT:    ret
164  %1 = call double @llvm.cos.f64(double %a)
165  ret double %1
166}
167
168; The sin+cos combination results in an FSINCOS SelectionDAG node.
169define double @sincos_f64(double %a) nounwind {
170; RV32IFD-LABEL: sincos_f64:
171; RV32IFD:       # %bb.0:
172; RV32IFD-NEXT:    addi sp, sp, -32
173; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
174; RV32IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
175; RV32IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
176; RV32IFD-NEXT:    fmv.d fs0, fa0
177; RV32IFD-NEXT:    call sin
178; RV32IFD-NEXT:    fmv.d fs1, fa0
179; RV32IFD-NEXT:    fmv.d fa0, fs0
180; RV32IFD-NEXT:    call cos
181; RV32IFD-NEXT:    fadd.d fa0, fs1, fa0
182; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
183; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
184; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
185; RV32IFD-NEXT:    addi sp, sp, 32
186; RV32IFD-NEXT:    ret
187;
188; RV64IFD-LABEL: sincos_f64:
189; RV64IFD:       # %bb.0:
190; RV64IFD-NEXT:    addi sp, sp, -32
191; RV64IFD-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
192; RV64IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
193; RV64IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
194; RV64IFD-NEXT:    fmv.d fs0, fa0
195; RV64IFD-NEXT:    call sin
196; RV64IFD-NEXT:    fmv.d fs1, fa0
197; RV64IFD-NEXT:    fmv.d fa0, fs0
198; RV64IFD-NEXT:    call cos
199; RV64IFD-NEXT:    fadd.d fa0, fs1, fa0
200; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
201; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
202; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
203; RV64IFD-NEXT:    addi sp, sp, 32
204; RV64IFD-NEXT:    ret
205;
206; RV32I-LABEL: sincos_f64:
207; RV32I:       # %bb.0:
208; RV32I-NEXT:    addi sp, sp, -32
209; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
210; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
211; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
212; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
213; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
214; RV32I-NEXT:    mv s0, a0
215; RV32I-NEXT:    mv s1, a1
216; RV32I-NEXT:    call sin
217; RV32I-NEXT:    mv s2, a0
218; RV32I-NEXT:    mv s3, a1
219; RV32I-NEXT:    mv a0, s0
220; RV32I-NEXT:    mv a1, s1
221; RV32I-NEXT:    call cos
222; RV32I-NEXT:    mv a2, a0
223; RV32I-NEXT:    mv a3, a1
224; RV32I-NEXT:    mv a0, s2
225; RV32I-NEXT:    mv a1, s3
226; RV32I-NEXT:    call __adddf3
227; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
228; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
229; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
230; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
231; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
232; RV32I-NEXT:    addi sp, sp, 32
233; RV32I-NEXT:    ret
234;
235; RV64I-LABEL: sincos_f64:
236; RV64I:       # %bb.0:
237; RV64I-NEXT:    addi sp, sp, -32
238; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
239; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
240; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
241; RV64I-NEXT:    mv s0, a0
242; RV64I-NEXT:    call sin
243; RV64I-NEXT:    mv s1, a0
244; RV64I-NEXT:    mv a0, s0
245; RV64I-NEXT:    call cos
246; RV64I-NEXT:    mv a1, a0
247; RV64I-NEXT:    mv a0, s1
248; RV64I-NEXT:    call __adddf3
249; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
250; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
251; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
252; RV64I-NEXT:    addi sp, sp, 32
253; RV64I-NEXT:    ret
254  %1 = call double @llvm.sin.f64(double %a)
255  %2 = call double @llvm.cos.f64(double %a)
256  %3 = fadd double %1, %2
257  ret double %3
258}
259
260declare double @llvm.pow.f64(double, double)
261
262define double @pow_f64(double %a, double %b) nounwind {
263; RV32IFD-LABEL: pow_f64:
264; RV32IFD:       # %bb.0:
265; RV32IFD-NEXT:    addi sp, sp, -16
266; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
267; RV32IFD-NEXT:    call pow
268; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
269; RV32IFD-NEXT:    addi sp, sp, 16
270; RV32IFD-NEXT:    ret
271;
272; RV64IFD-LABEL: pow_f64:
273; RV64IFD:       # %bb.0:
274; RV64IFD-NEXT:    addi sp, sp, -16
275; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
276; RV64IFD-NEXT:    call pow
277; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
278; RV64IFD-NEXT:    addi sp, sp, 16
279; RV64IFD-NEXT:    ret
280;
281; RV32I-LABEL: pow_f64:
282; RV32I:       # %bb.0:
283; RV32I-NEXT:    addi sp, sp, -16
284; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
285; RV32I-NEXT:    call pow
286; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
287; RV32I-NEXT:    addi sp, sp, 16
288; RV32I-NEXT:    ret
289;
290; RV64I-LABEL: pow_f64:
291; RV64I:       # %bb.0:
292; RV64I-NEXT:    addi sp, sp, -16
293; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
294; RV64I-NEXT:    call pow
295; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
296; RV64I-NEXT:    addi sp, sp, 16
297; RV64I-NEXT:    ret
298  %1 = call double @llvm.pow.f64(double %a, double %b)
299  ret double %1
300}
301
302declare double @llvm.exp.f64(double)
303
304define double @exp_f64(double %a) nounwind {
305; RV32IFD-LABEL: exp_f64:
306; RV32IFD:       # %bb.0:
307; RV32IFD-NEXT:    addi sp, sp, -16
308; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
309; RV32IFD-NEXT:    call exp
310; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
311; RV32IFD-NEXT:    addi sp, sp, 16
312; RV32IFD-NEXT:    ret
313;
314; RV64IFD-LABEL: exp_f64:
315; RV64IFD:       # %bb.0:
316; RV64IFD-NEXT:    addi sp, sp, -16
317; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
318; RV64IFD-NEXT:    call exp
319; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
320; RV64IFD-NEXT:    addi sp, sp, 16
321; RV64IFD-NEXT:    ret
322;
323; RV32I-LABEL: exp_f64:
324; RV32I:       # %bb.0:
325; RV32I-NEXT:    addi sp, sp, -16
326; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
327; RV32I-NEXT:    call exp
328; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
329; RV32I-NEXT:    addi sp, sp, 16
330; RV32I-NEXT:    ret
331;
332; RV64I-LABEL: exp_f64:
333; RV64I:       # %bb.0:
334; RV64I-NEXT:    addi sp, sp, -16
335; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
336; RV64I-NEXT:    call exp
337; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
338; RV64I-NEXT:    addi sp, sp, 16
339; RV64I-NEXT:    ret
340  %1 = call double @llvm.exp.f64(double %a)
341  ret double %1
342}
343
344declare double @llvm.exp2.f64(double)
345
346define double @exp2_f64(double %a) nounwind {
347; RV32IFD-LABEL: exp2_f64:
348; RV32IFD:       # %bb.0:
349; RV32IFD-NEXT:    addi sp, sp, -16
350; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
351; RV32IFD-NEXT:    call exp2
352; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
353; RV32IFD-NEXT:    addi sp, sp, 16
354; RV32IFD-NEXT:    ret
355;
356; RV64IFD-LABEL: exp2_f64:
357; RV64IFD:       # %bb.0:
358; RV64IFD-NEXT:    addi sp, sp, -16
359; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
360; RV64IFD-NEXT:    call exp2
361; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
362; RV64IFD-NEXT:    addi sp, sp, 16
363; RV64IFD-NEXT:    ret
364;
365; RV32I-LABEL: exp2_f64:
366; RV32I:       # %bb.0:
367; RV32I-NEXT:    addi sp, sp, -16
368; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
369; RV32I-NEXT:    call exp2
370; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
371; RV32I-NEXT:    addi sp, sp, 16
372; RV32I-NEXT:    ret
373;
374; RV64I-LABEL: exp2_f64:
375; RV64I:       # %bb.0:
376; RV64I-NEXT:    addi sp, sp, -16
377; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
378; RV64I-NEXT:    call exp2
379; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
380; RV64I-NEXT:    addi sp, sp, 16
381; RV64I-NEXT:    ret
382  %1 = call double @llvm.exp2.f64(double %a)
383  ret double %1
384}
385
386define double @exp10_f64(double %a) nounwind {
387; RV32IFD-LABEL: exp10_f64:
388; RV32IFD:       # %bb.0:
389; RV32IFD-NEXT:    addi sp, sp, -16
390; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
391; RV32IFD-NEXT:    call exp10
392; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
393; RV32IFD-NEXT:    addi sp, sp, 16
394; RV32IFD-NEXT:    ret
395;
396; RV64IFD-LABEL: exp10_f64:
397; RV64IFD:       # %bb.0:
398; RV64IFD-NEXT:    addi sp, sp, -16
399; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
400; RV64IFD-NEXT:    call exp10
401; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
402; RV64IFD-NEXT:    addi sp, sp, 16
403; RV64IFD-NEXT:    ret
404;
405; RV32I-LABEL: exp10_f64:
406; RV32I:       # %bb.0:
407; RV32I-NEXT:    addi sp, sp, -16
408; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
409; RV32I-NEXT:    call exp10
410; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
411; RV32I-NEXT:    addi sp, sp, 16
412; RV32I-NEXT:    ret
413;
414; RV64I-LABEL: exp10_f64:
415; RV64I:       # %bb.0:
416; RV64I-NEXT:    addi sp, sp, -16
417; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
418; RV64I-NEXT:    call exp10
419; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
420; RV64I-NEXT:    addi sp, sp, 16
421; RV64I-NEXT:    ret
422  %1 = call double @llvm.exp10.f64(double %a)
423  ret double %1
424}
425
426declare double @llvm.log.f64(double)
427
428define double @log_f64(double %a) nounwind {
429; RV32IFD-LABEL: log_f64:
430; RV32IFD:       # %bb.0:
431; RV32IFD-NEXT:    addi sp, sp, -16
432; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
433; RV32IFD-NEXT:    call log
434; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
435; RV32IFD-NEXT:    addi sp, sp, 16
436; RV32IFD-NEXT:    ret
437;
438; RV64IFD-LABEL: log_f64:
439; RV64IFD:       # %bb.0:
440; RV64IFD-NEXT:    addi sp, sp, -16
441; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
442; RV64IFD-NEXT:    call log
443; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
444; RV64IFD-NEXT:    addi sp, sp, 16
445; RV64IFD-NEXT:    ret
446;
447; RV32I-LABEL: log_f64:
448; RV32I:       # %bb.0:
449; RV32I-NEXT:    addi sp, sp, -16
450; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
451; RV32I-NEXT:    call log
452; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
453; RV32I-NEXT:    addi sp, sp, 16
454; RV32I-NEXT:    ret
455;
456; RV64I-LABEL: log_f64:
457; RV64I:       # %bb.0:
458; RV64I-NEXT:    addi sp, sp, -16
459; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
460; RV64I-NEXT:    call log
461; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
462; RV64I-NEXT:    addi sp, sp, 16
463; RV64I-NEXT:    ret
464  %1 = call double @llvm.log.f64(double %a)
465  ret double %1
466}
467
468declare double @llvm.log10.f64(double)
469
470define double @log10_f64(double %a) nounwind {
471; RV32IFD-LABEL: log10_f64:
472; RV32IFD:       # %bb.0:
473; RV32IFD-NEXT:    addi sp, sp, -16
474; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
475; RV32IFD-NEXT:    call log10
476; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
477; RV32IFD-NEXT:    addi sp, sp, 16
478; RV32IFD-NEXT:    ret
479;
480; RV64IFD-LABEL: log10_f64:
481; RV64IFD:       # %bb.0:
482; RV64IFD-NEXT:    addi sp, sp, -16
483; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
484; RV64IFD-NEXT:    call log10
485; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
486; RV64IFD-NEXT:    addi sp, sp, 16
487; RV64IFD-NEXT:    ret
488;
489; RV32I-LABEL: log10_f64:
490; RV32I:       # %bb.0:
491; RV32I-NEXT:    addi sp, sp, -16
492; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
493; RV32I-NEXT:    call log10
494; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
495; RV32I-NEXT:    addi sp, sp, 16
496; RV32I-NEXT:    ret
497;
498; RV64I-LABEL: log10_f64:
499; RV64I:       # %bb.0:
500; RV64I-NEXT:    addi sp, sp, -16
501; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
502; RV64I-NEXT:    call log10
503; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
504; RV64I-NEXT:    addi sp, sp, 16
505; RV64I-NEXT:    ret
506  %1 = call double @llvm.log10.f64(double %a)
507  ret double %1
508}
509
510declare double @llvm.log2.f64(double)
511
512define double @log2_f64(double %a) nounwind {
513; RV32IFD-LABEL: log2_f64:
514; RV32IFD:       # %bb.0:
515; RV32IFD-NEXT:    addi sp, sp, -16
516; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
517; RV32IFD-NEXT:    call log2
518; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
519; RV32IFD-NEXT:    addi sp, sp, 16
520; RV32IFD-NEXT:    ret
521;
522; RV64IFD-LABEL: log2_f64:
523; RV64IFD:       # %bb.0:
524; RV64IFD-NEXT:    addi sp, sp, -16
525; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
526; RV64IFD-NEXT:    call log2
527; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
528; RV64IFD-NEXT:    addi sp, sp, 16
529; RV64IFD-NEXT:    ret
530;
531; RV32I-LABEL: log2_f64:
532; RV32I:       # %bb.0:
533; RV32I-NEXT:    addi sp, sp, -16
534; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
535; RV32I-NEXT:    call log2
536; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
537; RV32I-NEXT:    addi sp, sp, 16
538; RV32I-NEXT:    ret
539;
540; RV64I-LABEL: log2_f64:
541; RV64I:       # %bb.0:
542; RV64I-NEXT:    addi sp, sp, -16
543; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
544; RV64I-NEXT:    call log2
545; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
546; RV64I-NEXT:    addi sp, sp, 16
547; RV64I-NEXT:    ret
548  %1 = call double @llvm.log2.f64(double %a)
549  ret double %1
550}
551
552declare double @llvm.fma.f64(double, double, double)
553
554define double @fma_f64(double %a, double %b, double %c) nounwind {
555; CHECKIFD-LABEL: fma_f64:
556; CHECKIFD:       # %bb.0:
557; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
558; CHECKIFD-NEXT:    ret
559;
560; RV32I-LABEL: fma_f64:
561; RV32I:       # %bb.0:
562; RV32I-NEXT:    addi sp, sp, -16
563; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
564; RV32I-NEXT:    call fma
565; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
566; RV32I-NEXT:    addi sp, sp, 16
567; RV32I-NEXT:    ret
568;
569; RV64I-LABEL: fma_f64:
570; RV64I:       # %bb.0:
571; RV64I-NEXT:    addi sp, sp, -16
572; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
573; RV64I-NEXT:    call fma
574; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
575; RV64I-NEXT:    addi sp, sp, 16
576; RV64I-NEXT:    ret
577  %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
578  ret double %1
579}
580
581declare double @llvm.fmuladd.f64(double, double, double)
582
583define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
584; CHECKIFD-LABEL: fmuladd_f64:
585; CHECKIFD:       # %bb.0:
586; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
587; CHECKIFD-NEXT:    ret
588;
589; RV32I-LABEL: fmuladd_f64:
590; RV32I:       # %bb.0:
591; RV32I-NEXT:    addi sp, sp, -16
592; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
593; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
594; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
595; RV32I-NEXT:    mv s0, a4
596; RV32I-NEXT:    mv s1, a5
597; RV32I-NEXT:    call __muldf3
598; RV32I-NEXT:    mv a2, s0
599; RV32I-NEXT:    mv a3, s1
600; RV32I-NEXT:    call __adddf3
601; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
602; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
603; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
604; RV32I-NEXT:    addi sp, sp, 16
605; RV32I-NEXT:    ret
606;
607; RV64I-LABEL: fmuladd_f64:
608; RV64I:       # %bb.0:
609; RV64I-NEXT:    addi sp, sp, -16
610; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
611; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
612; RV64I-NEXT:    mv s0, a2
613; RV64I-NEXT:    call __muldf3
614; RV64I-NEXT:    mv a1, s0
615; RV64I-NEXT:    call __adddf3
616; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
617; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
618; RV64I-NEXT:    addi sp, sp, 16
619; RV64I-NEXT:    ret
620  %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
621  ret double %1
622}
623
624declare double @llvm.fabs.f64(double)
625
626define double @fabs_f64(double %a) nounwind {
627; CHECKIFD-LABEL: fabs_f64:
628; CHECKIFD:       # %bb.0:
629; CHECKIFD-NEXT:    fabs.d fa0, fa0
630; CHECKIFD-NEXT:    ret
631;
632; RV32I-LABEL: fabs_f64:
633; RV32I:       # %bb.0:
634; RV32I-NEXT:    slli a1, a1, 1
635; RV32I-NEXT:    srli a1, a1, 1
636; RV32I-NEXT:    ret
637;
638; RV64I-LABEL: fabs_f64:
639; RV64I:       # %bb.0:
640; RV64I-NEXT:    slli a0, a0, 1
641; RV64I-NEXT:    srli a0, a0, 1
642; RV64I-NEXT:    ret
643  %1 = call double @llvm.fabs.f64(double %a)
644  ret double %1
645}
646
647declare double @llvm.minnum.f64(double, double)
648
649define double @minnum_f64(double %a, double %b) nounwind {
650; CHECKIFD-LABEL: minnum_f64:
651; CHECKIFD:       # %bb.0:
652; CHECKIFD-NEXT:    fmin.d fa0, fa0, fa1
653; CHECKIFD-NEXT:    ret
654;
655; RV32I-LABEL: minnum_f64:
656; RV32I:       # %bb.0:
657; RV32I-NEXT:    addi sp, sp, -16
658; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
659; RV32I-NEXT:    call fmin
660; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
661; RV32I-NEXT:    addi sp, sp, 16
662; RV32I-NEXT:    ret
663;
664; RV64I-LABEL: minnum_f64:
665; RV64I:       # %bb.0:
666; RV64I-NEXT:    addi sp, sp, -16
667; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
668; RV64I-NEXT:    call fmin
669; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
670; RV64I-NEXT:    addi sp, sp, 16
671; RV64I-NEXT:    ret
672  %1 = call double @llvm.minnum.f64(double %a, double %b)
673  ret double %1
674}
675
676declare double @llvm.maxnum.f64(double, double)
677
678define double @maxnum_f64(double %a, double %b) nounwind {
679; CHECKIFD-LABEL: maxnum_f64:
680; CHECKIFD:       # %bb.0:
681; CHECKIFD-NEXT:    fmax.d fa0, fa0, fa1
682; CHECKIFD-NEXT:    ret
683;
684; RV32I-LABEL: maxnum_f64:
685; RV32I:       # %bb.0:
686; RV32I-NEXT:    addi sp, sp, -16
687; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
688; RV32I-NEXT:    call fmax
689; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
690; RV32I-NEXT:    addi sp, sp, 16
691; RV32I-NEXT:    ret
692;
693; RV64I-LABEL: maxnum_f64:
694; RV64I:       # %bb.0:
695; RV64I-NEXT:    addi sp, sp, -16
696; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
697; RV64I-NEXT:    call fmax
698; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
699; RV64I-NEXT:    addi sp, sp, 16
700; RV64I-NEXT:    ret
701  %1 = call double @llvm.maxnum.f64(double %a, double %b)
702  ret double %1
703}
704
705declare double @llvm.copysign.f64(double, double)
706
707define double @copysign_f64(double %a, double %b) nounwind {
708; CHECKIFD-LABEL: copysign_f64:
709; CHECKIFD:       # %bb.0:
710; CHECKIFD-NEXT:    fsgnj.d fa0, fa0, fa1
711; CHECKIFD-NEXT:    ret
712;
713; RV32I-LABEL: copysign_f64:
714; RV32I:       # %bb.0:
715; RV32I-NEXT:    lui a2, 524288
716; RV32I-NEXT:    slli a1, a1, 1
717; RV32I-NEXT:    srli a1, a1, 1
718; RV32I-NEXT:    and a2, a3, a2
719; RV32I-NEXT:    or a1, a1, a2
720; RV32I-NEXT:    ret
721;
722; RV64I-LABEL: copysign_f64:
723; RV64I:       # %bb.0:
724; RV64I-NEXT:    slli a0, a0, 1
725; RV64I-NEXT:    srli a1, a1, 63
726; RV64I-NEXT:    srli a0, a0, 1
727; RV64I-NEXT:    slli a1, a1, 63
728; RV64I-NEXT:    or a0, a0, a1
729; RV64I-NEXT:    ret
730  %1 = call double @llvm.copysign.f64(double %a, double %b)
731  ret double %1
732}
733
734declare double @llvm.floor.f64(double)
735
736define double @floor_f64(double %a) nounwind {
737; RV32IFD-LABEL: floor_f64:
738; RV32IFD:       # %bb.0:
739; RV32IFD-NEXT:    addi sp, sp, -16
740; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
741; RV32IFD-NEXT:    call floor
742; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
743; RV32IFD-NEXT:    addi sp, sp, 16
744; RV32IFD-NEXT:    ret
745;
746; RV64IFD-LABEL: floor_f64:
747; RV64IFD:       # %bb.0:
748; RV64IFD-NEXT:    addi sp, sp, -16
749; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
750; RV64IFD-NEXT:    call floor
751; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
752; RV64IFD-NEXT:    addi sp, sp, 16
753; RV64IFD-NEXT:    ret
754;
755; RV32I-LABEL: floor_f64:
756; RV32I:       # %bb.0:
757; RV32I-NEXT:    addi sp, sp, -16
758; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
759; RV32I-NEXT:    call floor
760; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
761; RV32I-NEXT:    addi sp, sp, 16
762; RV32I-NEXT:    ret
763;
764; RV64I-LABEL: floor_f64:
765; RV64I:       # %bb.0:
766; RV64I-NEXT:    addi sp, sp, -16
767; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
768; RV64I-NEXT:    call floor
769; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
770; RV64I-NEXT:    addi sp, sp, 16
771; RV64I-NEXT:    ret
772  %1 = call double @llvm.floor.f64(double %a)
773  ret double %1
774}
775
776declare double @llvm.ceil.f64(double)
777
778define double @ceil_f64(double %a) nounwind {
779; RV32IFD-LABEL: ceil_f64:
780; RV32IFD:       # %bb.0:
781; RV32IFD-NEXT:    addi sp, sp, -16
782; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
783; RV32IFD-NEXT:    call ceil
784; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
785; RV32IFD-NEXT:    addi sp, sp, 16
786; RV32IFD-NEXT:    ret
787;
788; RV64IFD-LABEL: ceil_f64:
789; RV64IFD:       # %bb.0:
790; RV64IFD-NEXT:    addi sp, sp, -16
791; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
792; RV64IFD-NEXT:    call ceil
793; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
794; RV64IFD-NEXT:    addi sp, sp, 16
795; RV64IFD-NEXT:    ret
796;
797; RV32I-LABEL: ceil_f64:
798; RV32I:       # %bb.0:
799; RV32I-NEXT:    addi sp, sp, -16
800; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
801; RV32I-NEXT:    call ceil
802; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
803; RV32I-NEXT:    addi sp, sp, 16
804; RV32I-NEXT:    ret
805;
806; RV64I-LABEL: ceil_f64:
807; RV64I:       # %bb.0:
808; RV64I-NEXT:    addi sp, sp, -16
809; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
810; RV64I-NEXT:    call ceil
811; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
812; RV64I-NEXT:    addi sp, sp, 16
813; RV64I-NEXT:    ret
814  %1 = call double @llvm.ceil.f64(double %a)
815  ret double %1
816}
817
818declare double @llvm.trunc.f64(double)
819
820define double @trunc_f64(double %a) nounwind {
821; RV32IFD-LABEL: trunc_f64:
822; RV32IFD:       # %bb.0:
823; RV32IFD-NEXT:    addi sp, sp, -16
824; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
825; RV32IFD-NEXT:    call trunc
826; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
827; RV32IFD-NEXT:    addi sp, sp, 16
828; RV32IFD-NEXT:    ret
829;
830; RV64IFD-LABEL: trunc_f64:
831; RV64IFD:       # %bb.0:
832; RV64IFD-NEXT:    addi sp, sp, -16
833; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
834; RV64IFD-NEXT:    call trunc
835; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
836; RV64IFD-NEXT:    addi sp, sp, 16
837; RV64IFD-NEXT:    ret
838;
839; RV32I-LABEL: trunc_f64:
840; RV32I:       # %bb.0:
841; RV32I-NEXT:    addi sp, sp, -16
842; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
843; RV32I-NEXT:    call trunc
844; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
845; RV32I-NEXT:    addi sp, sp, 16
846; RV32I-NEXT:    ret
847;
848; RV64I-LABEL: trunc_f64:
849; RV64I:       # %bb.0:
850; RV64I-NEXT:    addi sp, sp, -16
851; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
852; RV64I-NEXT:    call trunc
853; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
854; RV64I-NEXT:    addi sp, sp, 16
855; RV64I-NEXT:    ret
856  %1 = call double @llvm.trunc.f64(double %a)
857  ret double %1
858}
859
860declare double @llvm.rint.f64(double)
861
862define double @rint_f64(double %a) nounwind {
863; RV32IFD-LABEL: rint_f64:
864; RV32IFD:       # %bb.0:
865; RV32IFD-NEXT:    addi sp, sp, -16
866; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
867; RV32IFD-NEXT:    call rint
868; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
869; RV32IFD-NEXT:    addi sp, sp, 16
870; RV32IFD-NEXT:    ret
871;
872; RV64IFD-LABEL: rint_f64:
873; RV64IFD:       # %bb.0:
874; RV64IFD-NEXT:    addi sp, sp, -16
875; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
876; RV64IFD-NEXT:    call rint
877; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
878; RV64IFD-NEXT:    addi sp, sp, 16
879; RV64IFD-NEXT:    ret
880;
881; RV32I-LABEL: rint_f64:
882; RV32I:       # %bb.0:
883; RV32I-NEXT:    addi sp, sp, -16
884; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
885; RV32I-NEXT:    call rint
886; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
887; RV32I-NEXT:    addi sp, sp, 16
888; RV32I-NEXT:    ret
889;
890; RV64I-LABEL: rint_f64:
891; RV64I:       # %bb.0:
892; RV64I-NEXT:    addi sp, sp, -16
893; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
894; RV64I-NEXT:    call rint
895; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
896; RV64I-NEXT:    addi sp, sp, 16
897; RV64I-NEXT:    ret
898  %1 = call double @llvm.rint.f64(double %a)
899  ret double %1
900}
901
902declare double @llvm.nearbyint.f64(double)
903
904define double @nearbyint_f64(double %a) nounwind {
905; RV32IFD-LABEL: nearbyint_f64:
906; RV32IFD:       # %bb.0:
907; RV32IFD-NEXT:    addi sp, sp, -16
908; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
909; RV32IFD-NEXT:    call nearbyint
910; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
911; RV32IFD-NEXT:    addi sp, sp, 16
912; RV32IFD-NEXT:    ret
913;
914; RV64IFD-LABEL: nearbyint_f64:
915; RV64IFD:       # %bb.0:
916; RV64IFD-NEXT:    addi sp, sp, -16
917; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
918; RV64IFD-NEXT:    call nearbyint
919; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
920; RV64IFD-NEXT:    addi sp, sp, 16
921; RV64IFD-NEXT:    ret
922;
923; RV32I-LABEL: nearbyint_f64:
924; RV32I:       # %bb.0:
925; RV32I-NEXT:    addi sp, sp, -16
926; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
927; RV32I-NEXT:    call nearbyint
928; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
929; RV32I-NEXT:    addi sp, sp, 16
930; RV32I-NEXT:    ret
931;
932; RV64I-LABEL: nearbyint_f64:
933; RV64I:       # %bb.0:
934; RV64I-NEXT:    addi sp, sp, -16
935; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
936; RV64I-NEXT:    call nearbyint
937; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
938; RV64I-NEXT:    addi sp, sp, 16
939; RV64I-NEXT:    ret
940  %1 = call double @llvm.nearbyint.f64(double %a)
941  ret double %1
942}
943
944declare double @llvm.round.f64(double)
945
946define double @round_f64(double %a) nounwind {
947; RV32IFD-LABEL: round_f64:
948; RV32IFD:       # %bb.0:
949; RV32IFD-NEXT:    addi sp, sp, -16
950; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
951; RV32IFD-NEXT:    call round
952; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
953; RV32IFD-NEXT:    addi sp, sp, 16
954; RV32IFD-NEXT:    ret
955;
956; RV64IFD-LABEL: round_f64:
957; RV64IFD:       # %bb.0:
958; RV64IFD-NEXT:    addi sp, sp, -16
959; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
960; RV64IFD-NEXT:    call round
961; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
962; RV64IFD-NEXT:    addi sp, sp, 16
963; RV64IFD-NEXT:    ret
964;
965; RV32I-LABEL: round_f64:
966; RV32I:       # %bb.0:
967; RV32I-NEXT:    addi sp, sp, -16
968; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
969; RV32I-NEXT:    call round
970; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
971; RV32I-NEXT:    addi sp, sp, 16
972; RV32I-NEXT:    ret
973;
974; RV64I-LABEL: round_f64:
975; RV64I:       # %bb.0:
976; RV64I-NEXT:    addi sp, sp, -16
977; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
978; RV64I-NEXT:    call round
979; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
980; RV64I-NEXT:    addi sp, sp, 16
981; RV64I-NEXT:    ret
982  %1 = call double @llvm.round.f64(double %a)
983  ret double %1
984}
985
986declare double @llvm.roundeven.f64(double)
987
988define double @roundeven_f64(double %a) nounwind {
989; RV32IFD-LABEL: roundeven_f64:
990; RV32IFD:       # %bb.0:
991; RV32IFD-NEXT:    addi sp, sp, -16
992; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
993; RV32IFD-NEXT:    call roundeven
994; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
995; RV32IFD-NEXT:    addi sp, sp, 16
996; RV32IFD-NEXT:    ret
997;
998; RV64IFD-LABEL: roundeven_f64:
999; RV64IFD:       # %bb.0:
1000; RV64IFD-NEXT:    addi sp, sp, -16
1001; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1002; RV64IFD-NEXT:    call roundeven
1003; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1004; RV64IFD-NEXT:    addi sp, sp, 16
1005; RV64IFD-NEXT:    ret
1006;
1007; RV32I-LABEL: roundeven_f64:
1008; RV32I:       # %bb.0:
1009; RV32I-NEXT:    addi sp, sp, -16
1010; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1011; RV32I-NEXT:    call roundeven
1012; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1013; RV32I-NEXT:    addi sp, sp, 16
1014; RV32I-NEXT:    ret
1015;
1016; RV64I-LABEL: roundeven_f64:
1017; RV64I:       # %bb.0:
1018; RV64I-NEXT:    addi sp, sp, -16
1019; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1020; RV64I-NEXT:    call roundeven
1021; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1022; RV64I-NEXT:    addi sp, sp, 16
1023; RV64I-NEXT:    ret
1024  %1 = call double @llvm.roundeven.f64(double %a)
1025  ret double %1
1026}
1027
1028declare i1 @llvm.is.fpclass.f64(double, i32)
1029define i1 @isnan_d_fpclass(double %x) {
1030; CHECKIFD-LABEL: isnan_d_fpclass:
1031; CHECKIFD:       # %bb.0:
1032; CHECKIFD-NEXT:    fclass.d a0, fa0
1033; CHECKIFD-NEXT:    andi a0, a0, 768
1034; CHECKIFD-NEXT:    snez a0, a0
1035; CHECKIFD-NEXT:    ret
1036;
1037; RV32I-LABEL: isnan_d_fpclass:
1038; RV32I:       # %bb.0:
1039; RV32I-NEXT:    lui a2, 524032
1040; RV32I-NEXT:    slli a1, a1, 1
1041; RV32I-NEXT:    srli a1, a1, 1
1042; RV32I-NEXT:    beq a1, a2, .LBB25_2
1043; RV32I-NEXT:  # %bb.1:
1044; RV32I-NEXT:    sltu a0, a2, a1
1045; RV32I-NEXT:    ret
1046; RV32I-NEXT:  .LBB25_2:
1047; RV32I-NEXT:    snez a0, a0
1048; RV32I-NEXT:    ret
1049;
1050; RV64I-LABEL: isnan_d_fpclass:
1051; RV64I:       # %bb.0:
1052; RV64I-NEXT:    li a1, 2047
1053; RV64I-NEXT:    slli a0, a0, 1
1054; RV64I-NEXT:    slli a1, a1, 52
1055; RV64I-NEXT:    srli a0, a0, 1
1056; RV64I-NEXT:    sltu a0, a1, a0
1057; RV64I-NEXT:    ret
1058  %1 = call i1 @llvm.is.fpclass.f64(double %x, i32 3)  ; nan
1059  ret i1 %1
1060}
1061
1062define double @tan_f64(double %a) nounwind {
1063; RV32IFD-LABEL: tan_f64:
1064; RV32IFD:       # %bb.0:
1065; RV32IFD-NEXT:    addi sp, sp, -16
1066; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1067; RV32IFD-NEXT:    call tan
1068; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1069; RV32IFD-NEXT:    addi sp, sp, 16
1070; RV32IFD-NEXT:    ret
1071;
1072; RV64IFD-LABEL: tan_f64:
1073; RV64IFD:       # %bb.0:
1074; RV64IFD-NEXT:    addi sp, sp, -16
1075; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1076; RV64IFD-NEXT:    call tan
1077; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1078; RV64IFD-NEXT:    addi sp, sp, 16
1079; RV64IFD-NEXT:    ret
1080;
1081; RV32I-LABEL: tan_f64:
1082; RV32I:       # %bb.0:
1083; RV32I-NEXT:    addi sp, sp, -16
1084; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1085; RV32I-NEXT:    call tan
1086; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1087; RV32I-NEXT:    addi sp, sp, 16
1088; RV32I-NEXT:    ret
1089;
1090; RV64I-LABEL: tan_f64:
1091; RV64I:       # %bb.0:
1092; RV64I-NEXT:    addi sp, sp, -16
1093; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1094; RV64I-NEXT:    call tan
1095; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1096; RV64I-NEXT:    addi sp, sp, 16
1097; RV64I-NEXT:    ret
1098  %1 = call double @llvm.tan.f64(double %a)
1099  ret double %1
1100}
1101
1102define double @ldexp_double(double %x, i32 %y) nounwind {
1103; RV32IFD-LABEL: ldexp_double:
1104; RV32IFD:       # %bb.0:
1105; RV32IFD-NEXT:    addi sp, sp, -16
1106; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1107; RV32IFD-NEXT:    call ldexp
1108; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1109; RV32IFD-NEXT:    addi sp, sp, 16
1110; RV32IFD-NEXT:    ret
1111;
1112; RV64IFD-LABEL: ldexp_double:
1113; RV64IFD:       # %bb.0:
1114; RV64IFD-NEXT:    addi sp, sp, -16
1115; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1116; RV64IFD-NEXT:    sext.w a0, a0
1117; RV64IFD-NEXT:    call ldexp
1118; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1119; RV64IFD-NEXT:    addi sp, sp, 16
1120; RV64IFD-NEXT:    ret
1121;
1122; RV32I-LABEL: ldexp_double:
1123; RV32I:       # %bb.0:
1124; RV32I-NEXT:    addi sp, sp, -16
1125; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1126; RV32I-NEXT:    call ldexp
1127; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1128; RV32I-NEXT:    addi sp, sp, 16
1129; RV32I-NEXT:    ret
1130;
1131; RV64I-LABEL: ldexp_double:
1132; RV64I:       # %bb.0:
1133; RV64I-NEXT:    addi sp, sp, -16
1134; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1135; RV64I-NEXT:    sext.w a1, a1
1136; RV64I-NEXT:    call ldexp
1137; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1138; RV64I-NEXT:    addi sp, sp, 16
1139; RV64I-NEXT:    ret
1140  %z = call double @llvm.ldexp.f64.i32(double %x, i32 %y)
1141  ret double %z
1142}
1143
1144define double @asin_f64(double %a) nounwind {
1145; RV32IFD-LABEL: asin_f64:
1146; RV32IFD:       # %bb.0:
1147; RV32IFD-NEXT:    addi sp, sp, -16
1148; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1149; RV32IFD-NEXT:    call asin
1150; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1151; RV32IFD-NEXT:    addi sp, sp, 16
1152; RV32IFD-NEXT:    ret
1153;
1154; RV64IFD-LABEL: asin_f64:
1155; RV64IFD:       # %bb.0:
1156; RV64IFD-NEXT:    addi sp, sp, -16
1157; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1158; RV64IFD-NEXT:    call asin
1159; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1160; RV64IFD-NEXT:    addi sp, sp, 16
1161; RV64IFD-NEXT:    ret
1162;
1163; RV32I-LABEL: asin_f64:
1164; RV32I:       # %bb.0:
1165; RV32I-NEXT:    addi sp, sp, -16
1166; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1167; RV32I-NEXT:    call asin
1168; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1169; RV32I-NEXT:    addi sp, sp, 16
1170; RV32I-NEXT:    ret
1171;
1172; RV64I-LABEL: asin_f64:
1173; RV64I:       # %bb.0:
1174; RV64I-NEXT:    addi sp, sp, -16
1175; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1176; RV64I-NEXT:    call asin
1177; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1178; RV64I-NEXT:    addi sp, sp, 16
1179; RV64I-NEXT:    ret
1180  %1 = call double @llvm.asin.f64(double %a)
1181  ret double %1
1182}
1183
1184define double @acos_f64(double %a) nounwind {
1185; RV32IFD-LABEL: acos_f64:
1186; RV32IFD:       # %bb.0:
1187; RV32IFD-NEXT:    addi sp, sp, -16
1188; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1189; RV32IFD-NEXT:    call acos
1190; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1191; RV32IFD-NEXT:    addi sp, sp, 16
1192; RV32IFD-NEXT:    ret
1193;
1194; RV64IFD-LABEL: acos_f64:
1195; RV64IFD:       # %bb.0:
1196; RV64IFD-NEXT:    addi sp, sp, -16
1197; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1198; RV64IFD-NEXT:    call acos
1199; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1200; RV64IFD-NEXT:    addi sp, sp, 16
1201; RV64IFD-NEXT:    ret
1202;
1203; RV32I-LABEL: acos_f64:
1204; RV32I:       # %bb.0:
1205; RV32I-NEXT:    addi sp, sp, -16
1206; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1207; RV32I-NEXT:    call acos
1208; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1209; RV32I-NEXT:    addi sp, sp, 16
1210; RV32I-NEXT:    ret
1211;
1212; RV64I-LABEL: acos_f64:
1213; RV64I:       # %bb.0:
1214; RV64I-NEXT:    addi sp, sp, -16
1215; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1216; RV64I-NEXT:    call acos
1217; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1218; RV64I-NEXT:    addi sp, sp, 16
1219; RV64I-NEXT:    ret
1220  %1 = call double @llvm.acos.f64(double %a)
1221  ret double %1
1222}
1223
1224define double @atan_f64(double %a) nounwind {
1225; RV32IFD-LABEL: atan_f64:
1226; RV32IFD:       # %bb.0:
1227; RV32IFD-NEXT:    addi sp, sp, -16
1228; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1229; RV32IFD-NEXT:    call atan
1230; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1231; RV32IFD-NEXT:    addi sp, sp, 16
1232; RV32IFD-NEXT:    ret
1233;
1234; RV64IFD-LABEL: atan_f64:
1235; RV64IFD:       # %bb.0:
1236; RV64IFD-NEXT:    addi sp, sp, -16
1237; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1238; RV64IFD-NEXT:    call atan
1239; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1240; RV64IFD-NEXT:    addi sp, sp, 16
1241; RV64IFD-NEXT:    ret
1242;
1243; RV32I-LABEL: atan_f64:
1244; RV32I:       # %bb.0:
1245; RV32I-NEXT:    addi sp, sp, -16
1246; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1247; RV32I-NEXT:    call atan
1248; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1249; RV32I-NEXT:    addi sp, sp, 16
1250; RV32I-NEXT:    ret
1251;
1252; RV64I-LABEL: atan_f64:
1253; RV64I:       # %bb.0:
1254; RV64I-NEXT:    addi sp, sp, -16
1255; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1256; RV64I-NEXT:    call atan
1257; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1258; RV64I-NEXT:    addi sp, sp, 16
1259; RV64I-NEXT:    ret
1260  %1 = call double @llvm.atan.f64(double %a)
1261  ret double %1
1262}
1263
1264define double @atan2_f64(double %a, double %b) nounwind {
1265; RV32IFD-LABEL: atan2_f64:
1266; RV32IFD:       # %bb.0:
1267; RV32IFD-NEXT:    addi sp, sp, -16
1268; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1269; RV32IFD-NEXT:    call atan2
1270; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1271; RV32IFD-NEXT:    addi sp, sp, 16
1272; RV32IFD-NEXT:    ret
1273;
1274; RV64IFD-LABEL: atan2_f64:
1275; RV64IFD:       # %bb.0:
1276; RV64IFD-NEXT:    addi sp, sp, -16
1277; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1278; RV64IFD-NEXT:    call atan2
1279; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1280; RV64IFD-NEXT:    addi sp, sp, 16
1281; RV64IFD-NEXT:    ret
1282;
1283; RV32I-LABEL: atan2_f64:
1284; RV32I:       # %bb.0:
1285; RV32I-NEXT:    addi sp, sp, -16
1286; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1287; RV32I-NEXT:    call atan2
1288; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1289; RV32I-NEXT:    addi sp, sp, 16
1290; RV32I-NEXT:    ret
1291;
1292; RV64I-LABEL: atan2_f64:
1293; RV64I:       # %bb.0:
1294; RV64I-NEXT:    addi sp, sp, -16
1295; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1296; RV64I-NEXT:    call atan2
1297; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1298; RV64I-NEXT:    addi sp, sp, 16
1299; RV64I-NEXT:    ret
1300  %1 = call double @llvm.atan2.f64(double %a, double %b)
1301  ret double %1
1302}
1303
1304define double @sinh_f64(double %a) nounwind {
1305; RV32IFD-LABEL: sinh_f64:
1306; RV32IFD:       # %bb.0:
1307; RV32IFD-NEXT:    addi sp, sp, -16
1308; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1309; RV32IFD-NEXT:    call sinh
1310; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1311; RV32IFD-NEXT:    addi sp, sp, 16
1312; RV32IFD-NEXT:    ret
1313;
1314; RV64IFD-LABEL: sinh_f64:
1315; RV64IFD:       # %bb.0:
1316; RV64IFD-NEXT:    addi sp, sp, -16
1317; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1318; RV64IFD-NEXT:    call sinh
1319; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1320; RV64IFD-NEXT:    addi sp, sp, 16
1321; RV64IFD-NEXT:    ret
1322;
1323; RV32I-LABEL: sinh_f64:
1324; RV32I:       # %bb.0:
1325; RV32I-NEXT:    addi sp, sp, -16
1326; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1327; RV32I-NEXT:    call sinh
1328; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1329; RV32I-NEXT:    addi sp, sp, 16
1330; RV32I-NEXT:    ret
1331;
1332; RV64I-LABEL: sinh_f64:
1333; RV64I:       # %bb.0:
1334; RV64I-NEXT:    addi sp, sp, -16
1335; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1336; RV64I-NEXT:    call sinh
1337; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1338; RV64I-NEXT:    addi sp, sp, 16
1339; RV64I-NEXT:    ret
1340  %1 = call double @llvm.sinh.f64(double %a)
1341  ret double %1
1342}
1343
1344define double @cosh_f64(double %a) nounwind {
1345; RV32IFD-LABEL: cosh_f64:
1346; RV32IFD:       # %bb.0:
1347; RV32IFD-NEXT:    addi sp, sp, -16
1348; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1349; RV32IFD-NEXT:    call cosh
1350; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1351; RV32IFD-NEXT:    addi sp, sp, 16
1352; RV32IFD-NEXT:    ret
1353;
1354; RV64IFD-LABEL: cosh_f64:
1355; RV64IFD:       # %bb.0:
1356; RV64IFD-NEXT:    addi sp, sp, -16
1357; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1358; RV64IFD-NEXT:    call cosh
1359; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1360; RV64IFD-NEXT:    addi sp, sp, 16
1361; RV64IFD-NEXT:    ret
1362;
1363; RV32I-LABEL: cosh_f64:
1364; RV32I:       # %bb.0:
1365; RV32I-NEXT:    addi sp, sp, -16
1366; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1367; RV32I-NEXT:    call cosh
1368; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1369; RV32I-NEXT:    addi sp, sp, 16
1370; RV32I-NEXT:    ret
1371;
1372; RV64I-LABEL: cosh_f64:
1373; RV64I:       # %bb.0:
1374; RV64I-NEXT:    addi sp, sp, -16
1375; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1376; RV64I-NEXT:    call cosh
1377; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1378; RV64I-NEXT:    addi sp, sp, 16
1379; RV64I-NEXT:    ret
1380  %1 = call double @llvm.cosh.f64(double %a)
1381  ret double %1
1382}
1383
1384define double @tanh_f64(double %a) nounwind {
1385; RV32IFD-LABEL: tanh_f64:
1386; RV32IFD:       # %bb.0:
1387; RV32IFD-NEXT:    addi sp, sp, -16
1388; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1389; RV32IFD-NEXT:    call tanh
1390; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1391; RV32IFD-NEXT:    addi sp, sp, 16
1392; RV32IFD-NEXT:    ret
1393;
1394; RV64IFD-LABEL: tanh_f64:
1395; RV64IFD:       # %bb.0:
1396; RV64IFD-NEXT:    addi sp, sp, -16
1397; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1398; RV64IFD-NEXT:    call tanh
1399; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1400; RV64IFD-NEXT:    addi sp, sp, 16
1401; RV64IFD-NEXT:    ret
1402;
1403; RV32I-LABEL: tanh_f64:
1404; RV32I:       # %bb.0:
1405; RV32I-NEXT:    addi sp, sp, -16
1406; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1407; RV32I-NEXT:    call tanh
1408; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1409; RV32I-NEXT:    addi sp, sp, 16
1410; RV32I-NEXT:    ret
1411;
1412; RV64I-LABEL: tanh_f64:
1413; RV64I:       # %bb.0:
1414; RV64I-NEXT:    addi sp, sp, -16
1415; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1416; RV64I-NEXT:    call tanh
1417; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1418; RV64I-NEXT:    addi sp, sp, 16
1419; RV64I-NEXT:    ret
1420  %1 = call double @llvm.tanh.f64(double %a)
1421  ret double %1
1422}
1423