xref: /llvm-project/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll (revision 1bc9de247477b58a14547a31047d1c9a365e2d5d)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+f \
3; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=ilp32f \
4; RUN:   | FileCheck -check-prefixes=CHECKIF,RV32IF %s
5; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+f \
6; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=lp64f \
7; RUN:   | FileCheck -check-prefixes=CHECKIF,RV64IF %s
8; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zfinx \
9; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=ilp32 \
10; RUN:   | FileCheck -check-prefixes=CHECKIZFINX,RV32IZFINX %s
11; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zfinx \
12; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=lp64 \
13; RUN:   | FileCheck -check-prefixes=CHECKIZFINX,RV64IZFINX %s
14; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \
15; RUN:   -verify-machineinstrs -disable-strictnode-mutation \
16; RUN:   | FileCheck -check-prefix=RV32I %s
17; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \
18; RUN:   -verify-machineinstrs -disable-strictnode-mutation \
19; RUN:   | FileCheck -check-prefix=RV64I %s
20
21declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
22
23define float @sqrt_f32(float %a) nounwind strictfp {
24; CHECKIF-LABEL: sqrt_f32:
25; CHECKIF:       # %bb.0:
26; CHECKIF-NEXT:    fsqrt.s fa0, fa0
27; CHECKIF-NEXT:    ret
28;
29; CHECKIZFINX-LABEL: sqrt_f32:
30; CHECKIZFINX:       # %bb.0:
31; CHECKIZFINX-NEXT:    fsqrt.s a0, a0
32; CHECKIZFINX-NEXT:    ret
33;
34; RV32I-LABEL: sqrt_f32:
35; RV32I:       # %bb.0:
36; RV32I-NEXT:    addi sp, sp, -16
37; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
38; RV32I-NEXT:    call sqrtf
39; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
40; RV32I-NEXT:    addi sp, sp, 16
41; RV32I-NEXT:    ret
42;
43; RV64I-LABEL: sqrt_f32:
44; RV64I:       # %bb.0:
45; RV64I-NEXT:    addi sp, sp, -16
46; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
47; RV64I-NEXT:    call sqrtf
48; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
49; RV64I-NEXT:    addi sp, sp, 16
50; RV64I-NEXT:    ret
51  %1 = call float @llvm.experimental.constrained.sqrt.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
52  ret float %1
53}
54
55declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata)
56
57define float @powi_f32(float %a, i32 %b) nounwind strictfp {
58; RV32IF-LABEL: powi_f32:
59; RV32IF:       # %bb.0:
60; RV32IF-NEXT:    addi sp, sp, -16
61; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
62; RV32IF-NEXT:    call __powisf2
63; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
64; RV32IF-NEXT:    addi sp, sp, 16
65; RV32IF-NEXT:    ret
66;
67; RV64IF-LABEL: powi_f32:
68; RV64IF:       # %bb.0:
69; RV64IF-NEXT:    addi sp, sp, -16
70; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
71; RV64IF-NEXT:    sext.w a0, a0
72; RV64IF-NEXT:    call __powisf2
73; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
74; RV64IF-NEXT:    addi sp, sp, 16
75; RV64IF-NEXT:    ret
76;
77; RV32IZFINX-LABEL: powi_f32:
78; RV32IZFINX:       # %bb.0:
79; RV32IZFINX-NEXT:    addi sp, sp, -16
80; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
81; RV32IZFINX-NEXT:    call __powisf2
82; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
83; RV32IZFINX-NEXT:    addi sp, sp, 16
84; RV32IZFINX-NEXT:    ret
85;
86; RV64IZFINX-LABEL: powi_f32:
87; RV64IZFINX:       # %bb.0:
88; RV64IZFINX-NEXT:    addi sp, sp, -16
89; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
90; RV64IZFINX-NEXT:    sext.w a1, a1
91; RV64IZFINX-NEXT:    call __powisf2
92; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
93; RV64IZFINX-NEXT:    addi sp, sp, 16
94; RV64IZFINX-NEXT:    ret
95;
96; RV32I-LABEL: powi_f32:
97; RV32I:       # %bb.0:
98; RV32I-NEXT:    addi sp, sp, -16
99; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
100; RV32I-NEXT:    call __powisf2
101; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
102; RV32I-NEXT:    addi sp, sp, 16
103; RV32I-NEXT:    ret
104;
105; RV64I-LABEL: powi_f32:
106; RV64I:       # %bb.0:
107; RV64I-NEXT:    addi sp, sp, -16
108; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
109; RV64I-NEXT:    sext.w a1, a1
110; RV64I-NEXT:    call __powisf2
111; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
112; RV64I-NEXT:    addi sp, sp, 16
113; RV64I-NEXT:    ret
114  %1 = call float @llvm.experimental.constrained.powi.f32(float %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
115  ret float %1
116}
117
118declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata)
119
120define float @sin_f32(float %a) nounwind strictfp {
121; RV32IF-LABEL: sin_f32:
122; RV32IF:       # %bb.0:
123; RV32IF-NEXT:    addi sp, sp, -16
124; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
125; RV32IF-NEXT:    call sinf
126; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
127; RV32IF-NEXT:    addi sp, sp, 16
128; RV32IF-NEXT:    ret
129;
130; RV64IF-LABEL: sin_f32:
131; RV64IF:       # %bb.0:
132; RV64IF-NEXT:    addi sp, sp, -16
133; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
134; RV64IF-NEXT:    call sinf
135; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
136; RV64IF-NEXT:    addi sp, sp, 16
137; RV64IF-NEXT:    ret
138;
139; RV32IZFINX-LABEL: sin_f32:
140; RV32IZFINX:       # %bb.0:
141; RV32IZFINX-NEXT:    addi sp, sp, -16
142; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
143; RV32IZFINX-NEXT:    call sinf
144; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
145; RV32IZFINX-NEXT:    addi sp, sp, 16
146; RV32IZFINX-NEXT:    ret
147;
148; RV64IZFINX-LABEL: sin_f32:
149; RV64IZFINX:       # %bb.0:
150; RV64IZFINX-NEXT:    addi sp, sp, -16
151; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
152; RV64IZFINX-NEXT:    call sinf
153; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
154; RV64IZFINX-NEXT:    addi sp, sp, 16
155; RV64IZFINX-NEXT:    ret
156;
157; RV32I-LABEL: sin_f32:
158; RV32I:       # %bb.0:
159; RV32I-NEXT:    addi sp, sp, -16
160; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
161; RV32I-NEXT:    call sinf
162; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
163; RV32I-NEXT:    addi sp, sp, 16
164; RV32I-NEXT:    ret
165;
166; RV64I-LABEL: sin_f32:
167; RV64I:       # %bb.0:
168; RV64I-NEXT:    addi sp, sp, -16
169; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
170; RV64I-NEXT:    call sinf
171; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
172; RV64I-NEXT:    addi sp, sp, 16
173; RV64I-NEXT:    ret
174  %1 = call float @llvm.experimental.constrained.sin.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
175  ret float %1
176}
177
178declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata)
179
180define float @cos_f32(float %a) nounwind strictfp {
181; RV32IF-LABEL: cos_f32:
182; RV32IF:       # %bb.0:
183; RV32IF-NEXT:    addi sp, sp, -16
184; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
185; RV32IF-NEXT:    call cosf
186; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
187; RV32IF-NEXT:    addi sp, sp, 16
188; RV32IF-NEXT:    ret
189;
190; RV64IF-LABEL: cos_f32:
191; RV64IF:       # %bb.0:
192; RV64IF-NEXT:    addi sp, sp, -16
193; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
194; RV64IF-NEXT:    call cosf
195; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
196; RV64IF-NEXT:    addi sp, sp, 16
197; RV64IF-NEXT:    ret
198;
199; RV32IZFINX-LABEL: cos_f32:
200; RV32IZFINX:       # %bb.0:
201; RV32IZFINX-NEXT:    addi sp, sp, -16
202; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
203; RV32IZFINX-NEXT:    call cosf
204; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
205; RV32IZFINX-NEXT:    addi sp, sp, 16
206; RV32IZFINX-NEXT:    ret
207;
208; RV64IZFINX-LABEL: cos_f32:
209; RV64IZFINX:       # %bb.0:
210; RV64IZFINX-NEXT:    addi sp, sp, -16
211; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
212; RV64IZFINX-NEXT:    call cosf
213; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
214; RV64IZFINX-NEXT:    addi sp, sp, 16
215; RV64IZFINX-NEXT:    ret
216;
217; RV32I-LABEL: cos_f32:
218; RV32I:       # %bb.0:
219; RV32I-NEXT:    addi sp, sp, -16
220; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
221; RV32I-NEXT:    call cosf
222; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
223; RV32I-NEXT:    addi sp, sp, 16
224; RV32I-NEXT:    ret
225;
226; RV64I-LABEL: cos_f32:
227; RV64I:       # %bb.0:
228; RV64I-NEXT:    addi sp, sp, -16
229; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
230; RV64I-NEXT:    call cosf
231; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
232; RV64I-NEXT:    addi sp, sp, 16
233; RV64I-NEXT:    ret
234  %1 = call float @llvm.experimental.constrained.cos.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
235  ret float %1
236}
237
238; The sin+cos combination results in an FSINCOS SelectionDAG node.
239define float @sincos_f32(float %a) nounwind strictfp {
240; RV32IF-LABEL: sincos_f32:
241; RV32IF:       # %bb.0:
242; RV32IF-NEXT:    addi sp, sp, -16
243; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
244; RV32IF-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
245; RV32IF-NEXT:    fsw fs1, 4(sp) # 4-byte Folded Spill
246; RV32IF-NEXT:    fmv.s fs0, fa0
247; RV32IF-NEXT:    call sinf
248; RV32IF-NEXT:    fmv.s fs1, fa0
249; RV32IF-NEXT:    fmv.s fa0, fs0
250; RV32IF-NEXT:    call cosf
251; RV32IF-NEXT:    fadd.s fa0, fs1, fa0
252; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
253; RV32IF-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
254; RV32IF-NEXT:    flw fs1, 4(sp) # 4-byte Folded Reload
255; RV32IF-NEXT:    addi sp, sp, 16
256; RV32IF-NEXT:    ret
257;
258; RV64IF-LABEL: sincos_f32:
259; RV64IF:       # %bb.0:
260; RV64IF-NEXT:    addi sp, sp, -16
261; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
262; RV64IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
263; RV64IF-NEXT:    fsw fs1, 0(sp) # 4-byte Folded Spill
264; RV64IF-NEXT:    fmv.s fs0, fa0
265; RV64IF-NEXT:    call sinf
266; RV64IF-NEXT:    fmv.s fs1, fa0
267; RV64IF-NEXT:    fmv.s fa0, fs0
268; RV64IF-NEXT:    call cosf
269; RV64IF-NEXT:    fadd.s fa0, fs1, fa0
270; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
271; RV64IF-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
272; RV64IF-NEXT:    flw fs1, 0(sp) # 4-byte Folded Reload
273; RV64IF-NEXT:    addi sp, sp, 16
274; RV64IF-NEXT:    ret
275;
276; RV32IZFINX-LABEL: sincos_f32:
277; RV32IZFINX:       # %bb.0:
278; RV32IZFINX-NEXT:    addi sp, sp, -16
279; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
280; RV32IZFINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
281; RV32IZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
282; RV32IZFINX-NEXT:    mv s0, a0
283; RV32IZFINX-NEXT:    call sinf
284; RV32IZFINX-NEXT:    mv s1, a0
285; RV32IZFINX-NEXT:    mv a0, s0
286; RV32IZFINX-NEXT:    call cosf
287; RV32IZFINX-NEXT:    fadd.s a0, s1, a0
288; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
289; RV32IZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
290; RV32IZFINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
291; RV32IZFINX-NEXT:    addi sp, sp, 16
292; RV32IZFINX-NEXT:    ret
293;
294; RV64IZFINX-LABEL: sincos_f32:
295; RV64IZFINX:       # %bb.0:
296; RV64IZFINX-NEXT:    addi sp, sp, -32
297; RV64IZFINX-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
298; RV64IZFINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
299; RV64IZFINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
300; RV64IZFINX-NEXT:    mv s0, a0
301; RV64IZFINX-NEXT:    call sinf
302; RV64IZFINX-NEXT:    mv s1, a0
303; RV64IZFINX-NEXT:    mv a0, s0
304; RV64IZFINX-NEXT:    call cosf
305; RV64IZFINX-NEXT:    fadd.s a0, s1, a0
306; RV64IZFINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
307; RV64IZFINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
308; RV64IZFINX-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
309; RV64IZFINX-NEXT:    addi sp, sp, 32
310; RV64IZFINX-NEXT:    ret
311;
312; RV32I-LABEL: sincos_f32:
313; RV32I:       # %bb.0:
314; RV32I-NEXT:    addi sp, sp, -16
315; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
316; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
317; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
318; RV32I-NEXT:    mv s0, a0
319; RV32I-NEXT:    call sinf
320; RV32I-NEXT:    mv s1, a0
321; RV32I-NEXT:    mv a0, s0
322; RV32I-NEXT:    call cosf
323; RV32I-NEXT:    mv a1, a0
324; RV32I-NEXT:    mv a0, s1
325; RV32I-NEXT:    call __addsf3
326; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
327; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
328; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
329; RV32I-NEXT:    addi sp, sp, 16
330; RV32I-NEXT:    ret
331;
332; RV64I-LABEL: sincos_f32:
333; RV64I:       # %bb.0:
334; RV64I-NEXT:    addi sp, sp, -32
335; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
336; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
337; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
338; RV64I-NEXT:    mv s0, a0
339; RV64I-NEXT:    call sinf
340; RV64I-NEXT:    mv s1, a0
341; RV64I-NEXT:    mv a0, s0
342; RV64I-NEXT:    call cosf
343; RV64I-NEXT:    mv a1, a0
344; RV64I-NEXT:    mv a0, s1
345; RV64I-NEXT:    call __addsf3
346; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
347; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
348; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
349; RV64I-NEXT:    addi sp, sp, 32
350; RV64I-NEXT:    ret
351  %1 = call float @llvm.experimental.constrained.sin.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
352  %2 = call float @llvm.experimental.constrained.cos.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
353  %3 = fadd float %1, %2
354  ret float %3
355}
356
357declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata)
358
359define float @tan_f32(float %a) nounwind strictfp {
360; RV32IF-LABEL: tan_f32:
361; RV32IF:       # %bb.0:
362; RV32IF-NEXT:    addi sp, sp, -16
363; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
364; RV32IF-NEXT:    call tanf
365; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
366; RV32IF-NEXT:    addi sp, sp, 16
367; RV32IF-NEXT:    ret
368;
369; RV64IF-LABEL: tan_f32:
370; RV64IF:       # %bb.0:
371; RV64IF-NEXT:    addi sp, sp, -16
372; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
373; RV64IF-NEXT:    call tanf
374; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
375; RV64IF-NEXT:    addi sp, sp, 16
376; RV64IF-NEXT:    ret
377;
378; RV32IZFINX-LABEL: tan_f32:
379; RV32IZFINX:       # %bb.0:
380; RV32IZFINX-NEXT:    addi sp, sp, -16
381; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
382; RV32IZFINX-NEXT:    call tanf
383; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
384; RV32IZFINX-NEXT:    addi sp, sp, 16
385; RV32IZFINX-NEXT:    ret
386;
387; RV64IZFINX-LABEL: tan_f32:
388; RV64IZFINX:       # %bb.0:
389; RV64IZFINX-NEXT:    addi sp, sp, -16
390; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
391; RV64IZFINX-NEXT:    call tanf
392; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
393; RV64IZFINX-NEXT:    addi sp, sp, 16
394; RV64IZFINX-NEXT:    ret
395;
396; RV32I-LABEL: tan_f32:
397; RV32I:       # %bb.0:
398; RV32I-NEXT:    addi sp, sp, -16
399; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
400; RV32I-NEXT:    call tanf
401; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
402; RV32I-NEXT:    addi sp, sp, 16
403; RV32I-NEXT:    ret
404;
405; RV64I-LABEL: tan_f32:
406; RV64I:       # %bb.0:
407; RV64I-NEXT:    addi sp, sp, -16
408; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
409; RV64I-NEXT:    call tanf
410; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
411; RV64I-NEXT:    addi sp, sp, 16
412; RV64I-NEXT:    ret
413  %1 = call float @llvm.experimental.constrained.tan.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
414  ret float %1
415}
416
417define float @asin_f32(float %a) nounwind strictfp {
418; RV32IF-LABEL: asin_f32:
419; RV32IF:       # %bb.0:
420; RV32IF-NEXT:    addi sp, sp, -16
421; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
422; RV32IF-NEXT:    call asinf
423; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
424; RV32IF-NEXT:    addi sp, sp, 16
425; RV32IF-NEXT:    ret
426;
427; RV64IF-LABEL: asin_f32:
428; RV64IF:       # %bb.0:
429; RV64IF-NEXT:    addi sp, sp, -16
430; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
431; RV64IF-NEXT:    call asinf
432; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
433; RV64IF-NEXT:    addi sp, sp, 16
434; RV64IF-NEXT:    ret
435;
436; RV32IZFINX-LABEL: asin_f32:
437; RV32IZFINX:       # %bb.0:
438; RV32IZFINX-NEXT:    addi sp, sp, -16
439; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
440; RV32IZFINX-NEXT:    call asinf
441; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
442; RV32IZFINX-NEXT:    addi sp, sp, 16
443; RV32IZFINX-NEXT:    ret
444;
445; RV64IZFINX-LABEL: asin_f32:
446; RV64IZFINX:       # %bb.0:
447; RV64IZFINX-NEXT:    addi sp, sp, -16
448; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
449; RV64IZFINX-NEXT:    call asinf
450; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
451; RV64IZFINX-NEXT:    addi sp, sp, 16
452; RV64IZFINX-NEXT:    ret
453;
454; RV32I-LABEL: asin_f32:
455; RV32I:       # %bb.0:
456; RV32I-NEXT:    addi sp, sp, -16
457; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
458; RV32I-NEXT:    call asinf
459; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
460; RV32I-NEXT:    addi sp, sp, 16
461; RV32I-NEXT:    ret
462;
463; RV64I-LABEL: asin_f32:
464; RV64I:       # %bb.0:
465; RV64I-NEXT:    addi sp, sp, -16
466; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
467; RV64I-NEXT:    call asinf
468; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
469; RV64I-NEXT:    addi sp, sp, 16
470; RV64I-NEXT:    ret
471  %1 = call float @llvm.experimental.constrained.asin.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
472  ret float %1
473}
474
475define float @acos_f32(float %a) nounwind strictfp {
476; RV32IF-LABEL: acos_f32:
477; RV32IF:       # %bb.0:
478; RV32IF-NEXT:    addi sp, sp, -16
479; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
480; RV32IF-NEXT:    call acosf
481; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
482; RV32IF-NEXT:    addi sp, sp, 16
483; RV32IF-NEXT:    ret
484;
485; RV64IF-LABEL: acos_f32:
486; RV64IF:       # %bb.0:
487; RV64IF-NEXT:    addi sp, sp, -16
488; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
489; RV64IF-NEXT:    call acosf
490; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
491; RV64IF-NEXT:    addi sp, sp, 16
492; RV64IF-NEXT:    ret
493;
494; RV32IZFINX-LABEL: acos_f32:
495; RV32IZFINX:       # %bb.0:
496; RV32IZFINX-NEXT:    addi sp, sp, -16
497; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
498; RV32IZFINX-NEXT:    call acosf
499; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
500; RV32IZFINX-NEXT:    addi sp, sp, 16
501; RV32IZFINX-NEXT:    ret
502;
503; RV64IZFINX-LABEL: acos_f32:
504; RV64IZFINX:       # %bb.0:
505; RV64IZFINX-NEXT:    addi sp, sp, -16
506; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
507; RV64IZFINX-NEXT:    call acosf
508; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
509; RV64IZFINX-NEXT:    addi sp, sp, 16
510; RV64IZFINX-NEXT:    ret
511;
512; RV32I-LABEL: acos_f32:
513; RV32I:       # %bb.0:
514; RV32I-NEXT:    addi sp, sp, -16
515; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
516; RV32I-NEXT:    call acosf
517; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
518; RV32I-NEXT:    addi sp, sp, 16
519; RV32I-NEXT:    ret
520;
521; RV64I-LABEL: acos_f32:
522; RV64I:       # %bb.0:
523; RV64I-NEXT:    addi sp, sp, -16
524; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
525; RV64I-NEXT:    call acosf
526; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
527; RV64I-NEXT:    addi sp, sp, 16
528; RV64I-NEXT:    ret
529  %1 = call float @llvm.experimental.constrained.acos.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
530  ret float %1
531}
532
533define float @atan_f32(float %a) nounwind strictfp {
534; RV32IF-LABEL: atan_f32:
535; RV32IF:       # %bb.0:
536; RV32IF-NEXT:    addi sp, sp, -16
537; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
538; RV32IF-NEXT:    call atanf
539; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
540; RV32IF-NEXT:    addi sp, sp, 16
541; RV32IF-NEXT:    ret
542;
543; RV64IF-LABEL: atan_f32:
544; RV64IF:       # %bb.0:
545; RV64IF-NEXT:    addi sp, sp, -16
546; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
547; RV64IF-NEXT:    call atanf
548; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
549; RV64IF-NEXT:    addi sp, sp, 16
550; RV64IF-NEXT:    ret
551;
552; RV32IZFINX-LABEL: atan_f32:
553; RV32IZFINX:       # %bb.0:
554; RV32IZFINX-NEXT:    addi sp, sp, -16
555; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
556; RV32IZFINX-NEXT:    call atanf
557; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
558; RV32IZFINX-NEXT:    addi sp, sp, 16
559; RV32IZFINX-NEXT:    ret
560;
561; RV64IZFINX-LABEL: atan_f32:
562; RV64IZFINX:       # %bb.0:
563; RV64IZFINX-NEXT:    addi sp, sp, -16
564; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
565; RV64IZFINX-NEXT:    call atanf
566; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
567; RV64IZFINX-NEXT:    addi sp, sp, 16
568; RV64IZFINX-NEXT:    ret
569;
570; RV32I-LABEL: atan_f32:
571; RV32I:       # %bb.0:
572; RV32I-NEXT:    addi sp, sp, -16
573; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
574; RV32I-NEXT:    call atanf
575; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
576; RV32I-NEXT:    addi sp, sp, 16
577; RV32I-NEXT:    ret
578;
579; RV64I-LABEL: atan_f32:
580; RV64I:       # %bb.0:
581; RV64I-NEXT:    addi sp, sp, -16
582; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
583; RV64I-NEXT:    call atanf
584; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
585; RV64I-NEXT:    addi sp, sp, 16
586; RV64I-NEXT:    ret
587  %1 = call float @llvm.experimental.constrained.atan.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
588  ret float %1
589}
590
591declare float @llvm.experimental.constrained.atan2.f32(float, float, metadata, metadata)
592
593define float @atan2_f32(float %a, float %b) nounwind strictfp {
594; RV32IF-LABEL: atan2_f32:
595; RV32IF:       # %bb.0:
596; RV32IF-NEXT:    addi sp, sp, -16
597; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
598; RV32IF-NEXT:    call atan2f
599; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
600; RV32IF-NEXT:    addi sp, sp, 16
601; RV32IF-NEXT:    ret
602;
603; RV64IF-LABEL: atan2_f32:
604; RV64IF:       # %bb.0:
605; RV64IF-NEXT:    addi sp, sp, -16
606; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
607; RV64IF-NEXT:    call atan2f
608; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
609; RV64IF-NEXT:    addi sp, sp, 16
610; RV64IF-NEXT:    ret
611;
612; RV32IZFINX-LABEL: atan2_f32:
613; RV32IZFINX:       # %bb.0:
614; RV32IZFINX-NEXT:    addi sp, sp, -16
615; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
616; RV32IZFINX-NEXT:    call atan2f
617; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
618; RV32IZFINX-NEXT:    addi sp, sp, 16
619; RV32IZFINX-NEXT:    ret
620;
621; RV64IZFINX-LABEL: atan2_f32:
622; RV64IZFINX:       # %bb.0:
623; RV64IZFINX-NEXT:    addi sp, sp, -16
624; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
625; RV64IZFINX-NEXT:    call atan2f
626; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
627; RV64IZFINX-NEXT:    addi sp, sp, 16
628; RV64IZFINX-NEXT:    ret
629;
630; RV32I-LABEL: atan2_f32:
631; RV32I:       # %bb.0:
632; RV32I-NEXT:    addi sp, sp, -16
633; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
634; RV32I-NEXT:    call atan2f
635; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
636; RV32I-NEXT:    addi sp, sp, 16
637; RV32I-NEXT:    ret
638;
639; RV64I-LABEL: atan2_f32:
640; RV64I:       # %bb.0:
641; RV64I-NEXT:    addi sp, sp, -16
642; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
643; RV64I-NEXT:    call atan2f
644; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
645; RV64I-NEXT:    addi sp, sp, 16
646; RV64I-NEXT:    ret
647  %1 = call float @llvm.experimental.constrained.atan2.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
648  ret float %1
649}
650
651define float @sinh_f32(float %a) nounwind strictfp {
652; RV32IF-LABEL: sinh_f32:
653; RV32IF:       # %bb.0:
654; RV32IF-NEXT:    addi sp, sp, -16
655; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
656; RV32IF-NEXT:    call sinhf
657; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
658; RV32IF-NEXT:    addi sp, sp, 16
659; RV32IF-NEXT:    ret
660;
661; RV64IF-LABEL: sinh_f32:
662; RV64IF:       # %bb.0:
663; RV64IF-NEXT:    addi sp, sp, -16
664; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
665; RV64IF-NEXT:    call sinhf
666; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
667; RV64IF-NEXT:    addi sp, sp, 16
668; RV64IF-NEXT:    ret
669;
670; RV32IZFINX-LABEL: sinh_f32:
671; RV32IZFINX:       # %bb.0:
672; RV32IZFINX-NEXT:    addi sp, sp, -16
673; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
674; RV32IZFINX-NEXT:    call sinhf
675; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
676; RV32IZFINX-NEXT:    addi sp, sp, 16
677; RV32IZFINX-NEXT:    ret
678;
679; RV64IZFINX-LABEL: sinh_f32:
680; RV64IZFINX:       # %bb.0:
681; RV64IZFINX-NEXT:    addi sp, sp, -16
682; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
683; RV64IZFINX-NEXT:    call sinhf
684; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
685; RV64IZFINX-NEXT:    addi sp, sp, 16
686; RV64IZFINX-NEXT:    ret
687;
688; RV32I-LABEL: sinh_f32:
689; RV32I:       # %bb.0:
690; RV32I-NEXT:    addi sp, sp, -16
691; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
692; RV32I-NEXT:    call sinhf
693; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
694; RV32I-NEXT:    addi sp, sp, 16
695; RV32I-NEXT:    ret
696;
697; RV64I-LABEL: sinh_f32:
698; RV64I:       # %bb.0:
699; RV64I-NEXT:    addi sp, sp, -16
700; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
701; RV64I-NEXT:    call sinhf
702; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
703; RV64I-NEXT:    addi sp, sp, 16
704; RV64I-NEXT:    ret
705  %1 = call float @llvm.experimental.constrained.sinh.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
706  ret float %1
707}
708
709define float @cosh_f32(float %a) nounwind strictfp {
710; RV32IF-LABEL: cosh_f32:
711; RV32IF:       # %bb.0:
712; RV32IF-NEXT:    addi sp, sp, -16
713; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
714; RV32IF-NEXT:    call coshf
715; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
716; RV32IF-NEXT:    addi sp, sp, 16
717; RV32IF-NEXT:    ret
718;
719; RV64IF-LABEL: cosh_f32:
720; RV64IF:       # %bb.0:
721; RV64IF-NEXT:    addi sp, sp, -16
722; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
723; RV64IF-NEXT:    call coshf
724; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
725; RV64IF-NEXT:    addi sp, sp, 16
726; RV64IF-NEXT:    ret
727;
728; RV32IZFINX-LABEL: cosh_f32:
729; RV32IZFINX:       # %bb.0:
730; RV32IZFINX-NEXT:    addi sp, sp, -16
731; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
732; RV32IZFINX-NEXT:    call coshf
733; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
734; RV32IZFINX-NEXT:    addi sp, sp, 16
735; RV32IZFINX-NEXT:    ret
736;
737; RV64IZFINX-LABEL: cosh_f32:
738; RV64IZFINX:       # %bb.0:
739; RV64IZFINX-NEXT:    addi sp, sp, -16
740; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
741; RV64IZFINX-NEXT:    call coshf
742; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
743; RV64IZFINX-NEXT:    addi sp, sp, 16
744; RV64IZFINX-NEXT:    ret
745;
746; RV32I-LABEL: cosh_f32:
747; RV32I:       # %bb.0:
748; RV32I-NEXT:    addi sp, sp, -16
749; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
750; RV32I-NEXT:    call coshf
751; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
752; RV32I-NEXT:    addi sp, sp, 16
753; RV32I-NEXT:    ret
754;
755; RV64I-LABEL: cosh_f32:
756; RV64I:       # %bb.0:
757; RV64I-NEXT:    addi sp, sp, -16
758; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
759; RV64I-NEXT:    call coshf
760; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
761; RV64I-NEXT:    addi sp, sp, 16
762; RV64I-NEXT:    ret
763  %1 = call float @llvm.experimental.constrained.cosh.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
764  ret float %1
765}
766
767define float @tanh_f32(float %a) nounwind strictfp {
768; RV32IF-LABEL: tanh_f32:
769; RV32IF:       # %bb.0:
770; RV32IF-NEXT:    addi sp, sp, -16
771; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
772; RV32IF-NEXT:    call tanhf
773; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
774; RV32IF-NEXT:    addi sp, sp, 16
775; RV32IF-NEXT:    ret
776;
777; RV64IF-LABEL: tanh_f32:
778; RV64IF:       # %bb.0:
779; RV64IF-NEXT:    addi sp, sp, -16
780; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
781; RV64IF-NEXT:    call tanhf
782; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
783; RV64IF-NEXT:    addi sp, sp, 16
784; RV64IF-NEXT:    ret
785;
786; RV32IZFINX-LABEL: tanh_f32:
787; RV32IZFINX:       # %bb.0:
788; RV32IZFINX-NEXT:    addi sp, sp, -16
789; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
790; RV32IZFINX-NEXT:    call tanhf
791; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
792; RV32IZFINX-NEXT:    addi sp, sp, 16
793; RV32IZFINX-NEXT:    ret
794;
795; RV64IZFINX-LABEL: tanh_f32:
796; RV64IZFINX:       # %bb.0:
797; RV64IZFINX-NEXT:    addi sp, sp, -16
798; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
799; RV64IZFINX-NEXT:    call tanhf
800; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
801; RV64IZFINX-NEXT:    addi sp, sp, 16
802; RV64IZFINX-NEXT:    ret
803;
804; RV32I-LABEL: tanh_f32:
805; RV32I:       # %bb.0:
806; RV32I-NEXT:    addi sp, sp, -16
807; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
808; RV32I-NEXT:    call tanhf
809; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
810; RV32I-NEXT:    addi sp, sp, 16
811; RV32I-NEXT:    ret
812;
813; RV64I-LABEL: tanh_f32:
814; RV64I:       # %bb.0:
815; RV64I-NEXT:    addi sp, sp, -16
816; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
817; RV64I-NEXT:    call tanhf
818; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
819; RV64I-NEXT:    addi sp, sp, 16
820; RV64I-NEXT:    ret
821  %1 = call float @llvm.experimental.constrained.tanh.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
822  ret float %1
823}
824
825declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata)
826
827define float @pow_f32(float %a, float %b) nounwind strictfp {
828; RV32IF-LABEL: pow_f32:
829; RV32IF:       # %bb.0:
830; RV32IF-NEXT:    addi sp, sp, -16
831; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
832; RV32IF-NEXT:    call powf
833; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
834; RV32IF-NEXT:    addi sp, sp, 16
835; RV32IF-NEXT:    ret
836;
837; RV64IF-LABEL: pow_f32:
838; RV64IF:       # %bb.0:
839; RV64IF-NEXT:    addi sp, sp, -16
840; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
841; RV64IF-NEXT:    call powf
842; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
843; RV64IF-NEXT:    addi sp, sp, 16
844; RV64IF-NEXT:    ret
845;
846; RV32IZFINX-LABEL: pow_f32:
847; RV32IZFINX:       # %bb.0:
848; RV32IZFINX-NEXT:    addi sp, sp, -16
849; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
850; RV32IZFINX-NEXT:    call powf
851; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
852; RV32IZFINX-NEXT:    addi sp, sp, 16
853; RV32IZFINX-NEXT:    ret
854;
855; RV64IZFINX-LABEL: pow_f32:
856; RV64IZFINX:       # %bb.0:
857; RV64IZFINX-NEXT:    addi sp, sp, -16
858; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
859; RV64IZFINX-NEXT:    call powf
860; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
861; RV64IZFINX-NEXT:    addi sp, sp, 16
862; RV64IZFINX-NEXT:    ret
863;
864; RV32I-LABEL: pow_f32:
865; RV32I:       # %bb.0:
866; RV32I-NEXT:    addi sp, sp, -16
867; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
868; RV32I-NEXT:    call powf
869; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
870; RV32I-NEXT:    addi sp, sp, 16
871; RV32I-NEXT:    ret
872;
873; RV64I-LABEL: pow_f32:
874; RV64I:       # %bb.0:
875; RV64I-NEXT:    addi sp, sp, -16
876; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
877; RV64I-NEXT:    call powf
878; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
879; RV64I-NEXT:    addi sp, sp, 16
880; RV64I-NEXT:    ret
881  %1 = call float @llvm.experimental.constrained.pow.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
882  ret float %1
883}
884
885declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata)
886
887define float @exp_f32(float %a) nounwind strictfp {
888; RV32IF-LABEL: exp_f32:
889; RV32IF:       # %bb.0:
890; RV32IF-NEXT:    addi sp, sp, -16
891; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
892; RV32IF-NEXT:    call expf
893; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
894; RV32IF-NEXT:    addi sp, sp, 16
895; RV32IF-NEXT:    ret
896;
897; RV64IF-LABEL: exp_f32:
898; RV64IF:       # %bb.0:
899; RV64IF-NEXT:    addi sp, sp, -16
900; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
901; RV64IF-NEXT:    call expf
902; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
903; RV64IF-NEXT:    addi sp, sp, 16
904; RV64IF-NEXT:    ret
905;
906; RV32IZFINX-LABEL: exp_f32:
907; RV32IZFINX:       # %bb.0:
908; RV32IZFINX-NEXT:    addi sp, sp, -16
909; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
910; RV32IZFINX-NEXT:    call expf
911; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
912; RV32IZFINX-NEXT:    addi sp, sp, 16
913; RV32IZFINX-NEXT:    ret
914;
915; RV64IZFINX-LABEL: exp_f32:
916; RV64IZFINX:       # %bb.0:
917; RV64IZFINX-NEXT:    addi sp, sp, -16
918; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
919; RV64IZFINX-NEXT:    call expf
920; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
921; RV64IZFINX-NEXT:    addi sp, sp, 16
922; RV64IZFINX-NEXT:    ret
923;
924; RV32I-LABEL: exp_f32:
925; RV32I:       # %bb.0:
926; RV32I-NEXT:    addi sp, sp, -16
927; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
928; RV32I-NEXT:    call expf
929; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
930; RV32I-NEXT:    addi sp, sp, 16
931; RV32I-NEXT:    ret
932;
933; RV64I-LABEL: exp_f32:
934; RV64I:       # %bb.0:
935; RV64I-NEXT:    addi sp, sp, -16
936; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
937; RV64I-NEXT:    call expf
938; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
939; RV64I-NEXT:    addi sp, sp, 16
940; RV64I-NEXT:    ret
941  %1 = call float @llvm.experimental.constrained.exp.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
942  ret float %1
943}
944
945declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata)
946
947define float @exp2_f32(float %a) nounwind strictfp {
948; RV32IF-LABEL: exp2_f32:
949; RV32IF:       # %bb.0:
950; RV32IF-NEXT:    addi sp, sp, -16
951; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
952; RV32IF-NEXT:    call exp2f
953; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
954; RV32IF-NEXT:    addi sp, sp, 16
955; RV32IF-NEXT:    ret
956;
957; RV64IF-LABEL: exp2_f32:
958; RV64IF:       # %bb.0:
959; RV64IF-NEXT:    addi sp, sp, -16
960; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
961; RV64IF-NEXT:    call exp2f
962; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
963; RV64IF-NEXT:    addi sp, sp, 16
964; RV64IF-NEXT:    ret
965;
966; RV32IZFINX-LABEL: exp2_f32:
967; RV32IZFINX:       # %bb.0:
968; RV32IZFINX-NEXT:    addi sp, sp, -16
969; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
970; RV32IZFINX-NEXT:    call exp2f
971; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
972; RV32IZFINX-NEXT:    addi sp, sp, 16
973; RV32IZFINX-NEXT:    ret
974;
975; RV64IZFINX-LABEL: exp2_f32:
976; RV64IZFINX:       # %bb.0:
977; RV64IZFINX-NEXT:    addi sp, sp, -16
978; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
979; RV64IZFINX-NEXT:    call exp2f
980; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
981; RV64IZFINX-NEXT:    addi sp, sp, 16
982; RV64IZFINX-NEXT:    ret
983;
984; RV32I-LABEL: exp2_f32:
985; RV32I:       # %bb.0:
986; RV32I-NEXT:    addi sp, sp, -16
987; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
988; RV32I-NEXT:    call exp2f
989; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
990; RV32I-NEXT:    addi sp, sp, 16
991; RV32I-NEXT:    ret
992;
993; RV64I-LABEL: exp2_f32:
994; RV64I:       # %bb.0:
995; RV64I-NEXT:    addi sp, sp, -16
996; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
997; RV64I-NEXT:    call exp2f
998; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
999; RV64I-NEXT:    addi sp, sp, 16
1000; RV64I-NEXT:    ret
1001  %1 = call float @llvm.experimental.constrained.exp2.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1002  ret float %1
1003}
1004
1005declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata)
1006
1007define float @log_f32(float %a) nounwind strictfp {
1008; RV32IF-LABEL: log_f32:
1009; RV32IF:       # %bb.0:
1010; RV32IF-NEXT:    addi sp, sp, -16
1011; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1012; RV32IF-NEXT:    call logf
1013; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1014; RV32IF-NEXT:    addi sp, sp, 16
1015; RV32IF-NEXT:    ret
1016;
1017; RV64IF-LABEL: log_f32:
1018; RV64IF:       # %bb.0:
1019; RV64IF-NEXT:    addi sp, sp, -16
1020; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1021; RV64IF-NEXT:    call logf
1022; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1023; RV64IF-NEXT:    addi sp, sp, 16
1024; RV64IF-NEXT:    ret
1025;
1026; RV32IZFINX-LABEL: log_f32:
1027; RV32IZFINX:       # %bb.0:
1028; RV32IZFINX-NEXT:    addi sp, sp, -16
1029; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1030; RV32IZFINX-NEXT:    call logf
1031; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1032; RV32IZFINX-NEXT:    addi sp, sp, 16
1033; RV32IZFINX-NEXT:    ret
1034;
1035; RV64IZFINX-LABEL: log_f32:
1036; RV64IZFINX:       # %bb.0:
1037; RV64IZFINX-NEXT:    addi sp, sp, -16
1038; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1039; RV64IZFINX-NEXT:    call logf
1040; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1041; RV64IZFINX-NEXT:    addi sp, sp, 16
1042; RV64IZFINX-NEXT:    ret
1043;
1044; RV32I-LABEL: log_f32:
1045; RV32I:       # %bb.0:
1046; RV32I-NEXT:    addi sp, sp, -16
1047; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1048; RV32I-NEXT:    call logf
1049; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1050; RV32I-NEXT:    addi sp, sp, 16
1051; RV32I-NEXT:    ret
1052;
1053; RV64I-LABEL: log_f32:
1054; RV64I:       # %bb.0:
1055; RV64I-NEXT:    addi sp, sp, -16
1056; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1057; RV64I-NEXT:    call logf
1058; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1059; RV64I-NEXT:    addi sp, sp, 16
1060; RV64I-NEXT:    ret
1061  %1 = call float @llvm.experimental.constrained.log.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1062  ret float %1
1063}
1064
1065declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata)
1066
1067define float @log10_f32(float %a) nounwind strictfp {
1068; RV32IF-LABEL: log10_f32:
1069; RV32IF:       # %bb.0:
1070; RV32IF-NEXT:    addi sp, sp, -16
1071; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1072; RV32IF-NEXT:    call log10f
1073; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1074; RV32IF-NEXT:    addi sp, sp, 16
1075; RV32IF-NEXT:    ret
1076;
1077; RV64IF-LABEL: log10_f32:
1078; RV64IF:       # %bb.0:
1079; RV64IF-NEXT:    addi sp, sp, -16
1080; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1081; RV64IF-NEXT:    call log10f
1082; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1083; RV64IF-NEXT:    addi sp, sp, 16
1084; RV64IF-NEXT:    ret
1085;
1086; RV32IZFINX-LABEL: log10_f32:
1087; RV32IZFINX:       # %bb.0:
1088; RV32IZFINX-NEXT:    addi sp, sp, -16
1089; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1090; RV32IZFINX-NEXT:    call log10f
1091; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1092; RV32IZFINX-NEXT:    addi sp, sp, 16
1093; RV32IZFINX-NEXT:    ret
1094;
1095; RV64IZFINX-LABEL: log10_f32:
1096; RV64IZFINX:       # %bb.0:
1097; RV64IZFINX-NEXT:    addi sp, sp, -16
1098; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1099; RV64IZFINX-NEXT:    call log10f
1100; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1101; RV64IZFINX-NEXT:    addi sp, sp, 16
1102; RV64IZFINX-NEXT:    ret
1103;
1104; RV32I-LABEL: log10_f32:
1105; RV32I:       # %bb.0:
1106; RV32I-NEXT:    addi sp, sp, -16
1107; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1108; RV32I-NEXT:    call log10f
1109; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1110; RV32I-NEXT:    addi sp, sp, 16
1111; RV32I-NEXT:    ret
1112;
1113; RV64I-LABEL: log10_f32:
1114; RV64I:       # %bb.0:
1115; RV64I-NEXT:    addi sp, sp, -16
1116; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1117; RV64I-NEXT:    call log10f
1118; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1119; RV64I-NEXT:    addi sp, sp, 16
1120; RV64I-NEXT:    ret
1121  %1 = call float @llvm.experimental.constrained.log10.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1122  ret float %1
1123}
1124
1125declare float @llvm.experimental.constrained.log2.f32(float, metadata, metadata)
1126
1127define float @log2_f32(float %a) nounwind strictfp {
1128; RV32IF-LABEL: log2_f32:
1129; RV32IF:       # %bb.0:
1130; RV32IF-NEXT:    addi sp, sp, -16
1131; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1132; RV32IF-NEXT:    call log2f
1133; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1134; RV32IF-NEXT:    addi sp, sp, 16
1135; RV32IF-NEXT:    ret
1136;
1137; RV64IF-LABEL: log2_f32:
1138; RV64IF:       # %bb.0:
1139; RV64IF-NEXT:    addi sp, sp, -16
1140; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1141; RV64IF-NEXT:    call log2f
1142; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1143; RV64IF-NEXT:    addi sp, sp, 16
1144; RV64IF-NEXT:    ret
1145;
1146; RV32IZFINX-LABEL: log2_f32:
1147; RV32IZFINX:       # %bb.0:
1148; RV32IZFINX-NEXT:    addi sp, sp, -16
1149; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1150; RV32IZFINX-NEXT:    call log2f
1151; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1152; RV32IZFINX-NEXT:    addi sp, sp, 16
1153; RV32IZFINX-NEXT:    ret
1154;
1155; RV64IZFINX-LABEL: log2_f32:
1156; RV64IZFINX:       # %bb.0:
1157; RV64IZFINX-NEXT:    addi sp, sp, -16
1158; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1159; RV64IZFINX-NEXT:    call log2f
1160; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1161; RV64IZFINX-NEXT:    addi sp, sp, 16
1162; RV64IZFINX-NEXT:    ret
1163;
1164; RV32I-LABEL: log2_f32:
1165; RV32I:       # %bb.0:
1166; RV32I-NEXT:    addi sp, sp, -16
1167; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1168; RV32I-NEXT:    call log2f
1169; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1170; RV32I-NEXT:    addi sp, sp, 16
1171; RV32I-NEXT:    ret
1172;
1173; RV64I-LABEL: log2_f32:
1174; RV64I:       # %bb.0:
1175; RV64I-NEXT:    addi sp, sp, -16
1176; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1177; RV64I-NEXT:    call log2f
1178; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1179; RV64I-NEXT:    addi sp, sp, 16
1180; RV64I-NEXT:    ret
1181  %1 = call float @llvm.experimental.constrained.log2.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1182  ret float %1
1183}
1184
1185declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata)
1186
1187define float @fma_f32(float %a, float %b, float %c) nounwind strictfp {
1188; CHECKIF-LABEL: fma_f32:
1189; CHECKIF:       # %bb.0:
1190; CHECKIF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
1191; CHECKIF-NEXT:    ret
1192;
1193; CHECKIZFINX-LABEL: fma_f32:
1194; CHECKIZFINX:       # %bb.0:
1195; CHECKIZFINX-NEXT:    fmadd.s a0, a0, a1, a2
1196; CHECKIZFINX-NEXT:    ret
1197;
1198; RV32I-LABEL: fma_f32:
1199; RV32I:       # %bb.0:
1200; RV32I-NEXT:    addi sp, sp, -16
1201; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1202; RV32I-NEXT:    call fmaf
1203; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1204; RV32I-NEXT:    addi sp, sp, 16
1205; RV32I-NEXT:    ret
1206;
1207; RV64I-LABEL: fma_f32:
1208; RV64I:       # %bb.0:
1209; RV64I-NEXT:    addi sp, sp, -16
1210; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1211; RV64I-NEXT:    call fmaf
1212; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1213; RV64I-NEXT:    addi sp, sp, 16
1214; RV64I-NEXT:    ret
1215  %1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %b, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1216  ret float %1
1217}
1218
1219declare float @llvm.experimental.constrained.fmuladd.f32(float, float, float, metadata, metadata)
1220
1221define float @fmuladd_f32(float %a, float %b, float %c) nounwind strictfp {
1222; CHECKIF-LABEL: fmuladd_f32:
1223; CHECKIF:       # %bb.0:
1224; CHECKIF-NEXT:    fmadd.s fa0, fa0, fa1, fa2
1225; CHECKIF-NEXT:    ret
1226;
1227; CHECKIZFINX-LABEL: fmuladd_f32:
1228; CHECKIZFINX:       # %bb.0:
1229; CHECKIZFINX-NEXT:    fmadd.s a0, a0, a1, a2
1230; CHECKIZFINX-NEXT:    ret
1231;
1232; RV32I-LABEL: fmuladd_f32:
1233; RV32I:       # %bb.0:
1234; RV32I-NEXT:    addi sp, sp, -16
1235; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1236; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1237; RV32I-NEXT:    mv s0, a2
1238; RV32I-NEXT:    call __mulsf3
1239; RV32I-NEXT:    mv a1, s0
1240; RV32I-NEXT:    call __addsf3
1241; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1242; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1243; RV32I-NEXT:    addi sp, sp, 16
1244; RV32I-NEXT:    ret
1245;
1246; RV64I-LABEL: fmuladd_f32:
1247; RV64I:       # %bb.0:
1248; RV64I-NEXT:    addi sp, sp, -16
1249; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1250; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
1251; RV64I-NEXT:    mv s0, a2
1252; RV64I-NEXT:    call __mulsf3
1253; RV64I-NEXT:    mv a1, s0
1254; RV64I-NEXT:    call __addsf3
1255; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1256; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
1257; RV64I-NEXT:    addi sp, sp, 16
1258; RV64I-NEXT:    ret
1259  %1 = call float @llvm.experimental.constrained.fmuladd.f32(float %a, float %b, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1260  ret float %1
1261}
1262
1263declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata)
1264
1265define float @minnum_f32(float %a, float %b) nounwind strictfp {
1266; RV32IF-LABEL: minnum_f32:
1267; RV32IF:       # %bb.0:
1268; RV32IF-NEXT:    addi sp, sp, -16
1269; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1270; RV32IF-NEXT:    call fminf
1271; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1272; RV32IF-NEXT:    addi sp, sp, 16
1273; RV32IF-NEXT:    ret
1274;
1275; RV64IF-LABEL: minnum_f32:
1276; RV64IF:       # %bb.0:
1277; RV64IF-NEXT:    addi sp, sp, -16
1278; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1279; RV64IF-NEXT:    call fminf
1280; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1281; RV64IF-NEXT:    addi sp, sp, 16
1282; RV64IF-NEXT:    ret
1283;
1284; RV32IZFINX-LABEL: minnum_f32:
1285; RV32IZFINX:       # %bb.0:
1286; RV32IZFINX-NEXT:    addi sp, sp, -16
1287; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1288; RV32IZFINX-NEXT:    call fminf
1289; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1290; RV32IZFINX-NEXT:    addi sp, sp, 16
1291; RV32IZFINX-NEXT:    ret
1292;
1293; RV64IZFINX-LABEL: minnum_f32:
1294; RV64IZFINX:       # %bb.0:
1295; RV64IZFINX-NEXT:    addi sp, sp, -16
1296; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1297; RV64IZFINX-NEXT:    call fminf
1298; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1299; RV64IZFINX-NEXT:    addi sp, sp, 16
1300; RV64IZFINX-NEXT:    ret
1301;
1302; RV32I-LABEL: minnum_f32:
1303; RV32I:       # %bb.0:
1304; RV32I-NEXT:    addi sp, sp, -16
1305; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1306; RV32I-NEXT:    call fminf
1307; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1308; RV32I-NEXT:    addi sp, sp, 16
1309; RV32I-NEXT:    ret
1310;
1311; RV64I-LABEL: minnum_f32:
1312; RV64I:       # %bb.0:
1313; RV64I-NEXT:    addi sp, sp, -16
1314; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1315; RV64I-NEXT:    call fminf
1316; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1317; RV64I-NEXT:    addi sp, sp, 16
1318; RV64I-NEXT:    ret
1319  %1 = call float @llvm.experimental.constrained.minnum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp
1320  ret float %1
1321}
1322
1323declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata)
1324
1325define float @maxnum_f32(float %a, float %b) nounwind strictfp {
1326; RV32IF-LABEL: maxnum_f32:
1327; RV32IF:       # %bb.0:
1328; RV32IF-NEXT:    addi sp, sp, -16
1329; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1330; RV32IF-NEXT:    call fmaxf
1331; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1332; RV32IF-NEXT:    addi sp, sp, 16
1333; RV32IF-NEXT:    ret
1334;
1335; RV64IF-LABEL: maxnum_f32:
1336; RV64IF:       # %bb.0:
1337; RV64IF-NEXT:    addi sp, sp, -16
1338; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1339; RV64IF-NEXT:    call fmaxf
1340; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1341; RV64IF-NEXT:    addi sp, sp, 16
1342; RV64IF-NEXT:    ret
1343;
1344; RV32IZFINX-LABEL: maxnum_f32:
1345; RV32IZFINX:       # %bb.0:
1346; RV32IZFINX-NEXT:    addi sp, sp, -16
1347; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1348; RV32IZFINX-NEXT:    call fmaxf
1349; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1350; RV32IZFINX-NEXT:    addi sp, sp, 16
1351; RV32IZFINX-NEXT:    ret
1352;
1353; RV64IZFINX-LABEL: maxnum_f32:
1354; RV64IZFINX:       # %bb.0:
1355; RV64IZFINX-NEXT:    addi sp, sp, -16
1356; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1357; RV64IZFINX-NEXT:    call fmaxf
1358; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1359; RV64IZFINX-NEXT:    addi sp, sp, 16
1360; RV64IZFINX-NEXT:    ret
1361;
1362; RV32I-LABEL: maxnum_f32:
1363; RV32I:       # %bb.0:
1364; RV32I-NEXT:    addi sp, sp, -16
1365; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1366; RV32I-NEXT:    call fmaxf
1367; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1368; RV32I-NEXT:    addi sp, sp, 16
1369; RV32I-NEXT:    ret
1370;
1371; RV64I-LABEL: maxnum_f32:
1372; RV64I:       # %bb.0:
1373; RV64I-NEXT:    addi sp, sp, -16
1374; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1375; RV64I-NEXT:    call fmaxf
1376; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1377; RV64I-NEXT:    addi sp, sp, 16
1378; RV64I-NEXT:    ret
1379  %1 = call float @llvm.experimental.constrained.maxnum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp
1380  ret float %1
1381}
1382
1383; TODO: FMINNAN and FMAXNAN aren't handled in
1384; SelectionDAGLegalize::ExpandNode.
1385
1386; declare float @llvm.experimental.constrained.minimum.f32(float, float, metadata)
1387
1388; define float @fminimum_f32(float %a, float %b) nounwind strictfp {
1389;   %1 = call float @llvm.experimental.constrained.minimum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp
1390;   ret float %1
1391; }
1392
1393; declare float @llvm.experimental.constrained.maximum.f32(float, float, metadata)
1394
1395; define float @fmaximum_f32(float %a, float %b) nounwind strictfp {
1396;   %1 = call float @llvm.experimental.constrained.maximum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp
1397;   ret float %1
1398; }
1399
1400declare float @llvm.experimental.constrained.floor.f32(float, metadata)
1401
1402define float @floor_f32(float %a) nounwind strictfp {
1403; RV32IF-LABEL: floor_f32:
1404; RV32IF:       # %bb.0:
1405; RV32IF-NEXT:    addi sp, sp, -16
1406; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1407; RV32IF-NEXT:    call floorf
1408; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1409; RV32IF-NEXT:    addi sp, sp, 16
1410; RV32IF-NEXT:    ret
1411;
1412; RV64IF-LABEL: floor_f32:
1413; RV64IF:       # %bb.0:
1414; RV64IF-NEXT:    addi sp, sp, -16
1415; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1416; RV64IF-NEXT:    call floorf
1417; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1418; RV64IF-NEXT:    addi sp, sp, 16
1419; RV64IF-NEXT:    ret
1420;
1421; RV32IZFINX-LABEL: floor_f32:
1422; RV32IZFINX:       # %bb.0:
1423; RV32IZFINX-NEXT:    addi sp, sp, -16
1424; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1425; RV32IZFINX-NEXT:    call floorf
1426; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1427; RV32IZFINX-NEXT:    addi sp, sp, 16
1428; RV32IZFINX-NEXT:    ret
1429;
1430; RV64IZFINX-LABEL: floor_f32:
1431; RV64IZFINX:       # %bb.0:
1432; RV64IZFINX-NEXT:    addi sp, sp, -16
1433; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1434; RV64IZFINX-NEXT:    call floorf
1435; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1436; RV64IZFINX-NEXT:    addi sp, sp, 16
1437; RV64IZFINX-NEXT:    ret
1438;
1439; RV32I-LABEL: floor_f32:
1440; RV32I:       # %bb.0:
1441; RV32I-NEXT:    addi sp, sp, -16
1442; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1443; RV32I-NEXT:    call floorf
1444; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1445; RV32I-NEXT:    addi sp, sp, 16
1446; RV32I-NEXT:    ret
1447;
1448; RV64I-LABEL: floor_f32:
1449; RV64I:       # %bb.0:
1450; RV64I-NEXT:    addi sp, sp, -16
1451; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1452; RV64I-NEXT:    call floorf
1453; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1454; RV64I-NEXT:    addi sp, sp, 16
1455; RV64I-NEXT:    ret
1456  %1 = call float @llvm.experimental.constrained.floor.f32(float %a, metadata !"fpexcept.strict") strictfp
1457  ret float %1
1458}
1459
1460declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
1461
1462define float @ceil_f32(float %a) nounwind strictfp {
1463; RV32IF-LABEL: ceil_f32:
1464; RV32IF:       # %bb.0:
1465; RV32IF-NEXT:    addi sp, sp, -16
1466; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1467; RV32IF-NEXT:    call ceilf
1468; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1469; RV32IF-NEXT:    addi sp, sp, 16
1470; RV32IF-NEXT:    ret
1471;
1472; RV64IF-LABEL: ceil_f32:
1473; RV64IF:       # %bb.0:
1474; RV64IF-NEXT:    addi sp, sp, -16
1475; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1476; RV64IF-NEXT:    call ceilf
1477; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1478; RV64IF-NEXT:    addi sp, sp, 16
1479; RV64IF-NEXT:    ret
1480;
1481; RV32IZFINX-LABEL: ceil_f32:
1482; RV32IZFINX:       # %bb.0:
1483; RV32IZFINX-NEXT:    addi sp, sp, -16
1484; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1485; RV32IZFINX-NEXT:    call ceilf
1486; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1487; RV32IZFINX-NEXT:    addi sp, sp, 16
1488; RV32IZFINX-NEXT:    ret
1489;
1490; RV64IZFINX-LABEL: ceil_f32:
1491; RV64IZFINX:       # %bb.0:
1492; RV64IZFINX-NEXT:    addi sp, sp, -16
1493; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1494; RV64IZFINX-NEXT:    call ceilf
1495; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1496; RV64IZFINX-NEXT:    addi sp, sp, 16
1497; RV64IZFINX-NEXT:    ret
1498;
1499; RV32I-LABEL: ceil_f32:
1500; RV32I:       # %bb.0:
1501; RV32I-NEXT:    addi sp, sp, -16
1502; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1503; RV32I-NEXT:    call ceilf
1504; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1505; RV32I-NEXT:    addi sp, sp, 16
1506; RV32I-NEXT:    ret
1507;
1508; RV64I-LABEL: ceil_f32:
1509; RV64I:       # %bb.0:
1510; RV64I-NEXT:    addi sp, sp, -16
1511; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1512; RV64I-NEXT:    call ceilf
1513; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1514; RV64I-NEXT:    addi sp, sp, 16
1515; RV64I-NEXT:    ret
1516  %1 = call float @llvm.experimental.constrained.ceil.f32(float %a, metadata !"fpexcept.strict") strictfp
1517  ret float %1
1518}
1519
1520declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
1521
1522define float @trunc_f32(float %a) nounwind strictfp {
1523; RV32IF-LABEL: trunc_f32:
1524; RV32IF:       # %bb.0:
1525; RV32IF-NEXT:    addi sp, sp, -16
1526; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1527; RV32IF-NEXT:    call truncf
1528; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1529; RV32IF-NEXT:    addi sp, sp, 16
1530; RV32IF-NEXT:    ret
1531;
1532; RV64IF-LABEL: trunc_f32:
1533; RV64IF:       # %bb.0:
1534; RV64IF-NEXT:    addi sp, sp, -16
1535; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1536; RV64IF-NEXT:    call truncf
1537; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1538; RV64IF-NEXT:    addi sp, sp, 16
1539; RV64IF-NEXT:    ret
1540;
1541; RV32IZFINX-LABEL: trunc_f32:
1542; RV32IZFINX:       # %bb.0:
1543; RV32IZFINX-NEXT:    addi sp, sp, -16
1544; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1545; RV32IZFINX-NEXT:    call truncf
1546; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1547; RV32IZFINX-NEXT:    addi sp, sp, 16
1548; RV32IZFINX-NEXT:    ret
1549;
1550; RV64IZFINX-LABEL: trunc_f32:
1551; RV64IZFINX:       # %bb.0:
1552; RV64IZFINX-NEXT:    addi sp, sp, -16
1553; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1554; RV64IZFINX-NEXT:    call truncf
1555; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1556; RV64IZFINX-NEXT:    addi sp, sp, 16
1557; RV64IZFINX-NEXT:    ret
1558;
1559; RV32I-LABEL: trunc_f32:
1560; RV32I:       # %bb.0:
1561; RV32I-NEXT:    addi sp, sp, -16
1562; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1563; RV32I-NEXT:    call truncf
1564; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1565; RV32I-NEXT:    addi sp, sp, 16
1566; RV32I-NEXT:    ret
1567;
1568; RV64I-LABEL: trunc_f32:
1569; RV64I:       # %bb.0:
1570; RV64I-NEXT:    addi sp, sp, -16
1571; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1572; RV64I-NEXT:    call truncf
1573; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1574; RV64I-NEXT:    addi sp, sp, 16
1575; RV64I-NEXT:    ret
1576  %1 = call float @llvm.experimental.constrained.trunc.f32(float %a, metadata !"fpexcept.strict") strictfp
1577  ret float %1
1578}
1579
1580declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
1581
1582define float @rint_f32(float %a) nounwind strictfp {
1583; RV32IF-LABEL: rint_f32:
1584; RV32IF:       # %bb.0:
1585; RV32IF-NEXT:    addi sp, sp, -16
1586; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1587; RV32IF-NEXT:    call rintf
1588; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1589; RV32IF-NEXT:    addi sp, sp, 16
1590; RV32IF-NEXT:    ret
1591;
1592; RV64IF-LABEL: rint_f32:
1593; RV64IF:       # %bb.0:
1594; RV64IF-NEXT:    addi sp, sp, -16
1595; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1596; RV64IF-NEXT:    call rintf
1597; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1598; RV64IF-NEXT:    addi sp, sp, 16
1599; RV64IF-NEXT:    ret
1600;
1601; RV32IZFINX-LABEL: rint_f32:
1602; RV32IZFINX:       # %bb.0:
1603; RV32IZFINX-NEXT:    addi sp, sp, -16
1604; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1605; RV32IZFINX-NEXT:    call rintf
1606; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1607; RV32IZFINX-NEXT:    addi sp, sp, 16
1608; RV32IZFINX-NEXT:    ret
1609;
1610; RV64IZFINX-LABEL: rint_f32:
1611; RV64IZFINX:       # %bb.0:
1612; RV64IZFINX-NEXT:    addi sp, sp, -16
1613; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1614; RV64IZFINX-NEXT:    call rintf
1615; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1616; RV64IZFINX-NEXT:    addi sp, sp, 16
1617; RV64IZFINX-NEXT:    ret
1618;
1619; RV32I-LABEL: rint_f32:
1620; RV32I:       # %bb.0:
1621; RV32I-NEXT:    addi sp, sp, -16
1622; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1623; RV32I-NEXT:    call rintf
1624; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1625; RV32I-NEXT:    addi sp, sp, 16
1626; RV32I-NEXT:    ret
1627;
1628; RV64I-LABEL: rint_f32:
1629; RV64I:       # %bb.0:
1630; RV64I-NEXT:    addi sp, sp, -16
1631; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1632; RV64I-NEXT:    call rintf
1633; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1634; RV64I-NEXT:    addi sp, sp, 16
1635; RV64I-NEXT:    ret
1636  %1 = call float @llvm.experimental.constrained.rint.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1637  ret float %1
1638}
1639
1640declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
1641
1642define float @nearbyint_f32(float %a) nounwind strictfp {
1643; RV32IF-LABEL: nearbyint_f32:
1644; RV32IF:       # %bb.0:
1645; RV32IF-NEXT:    addi sp, sp, -16
1646; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1647; RV32IF-NEXT:    call nearbyintf
1648; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1649; RV32IF-NEXT:    addi sp, sp, 16
1650; RV32IF-NEXT:    ret
1651;
1652; RV64IF-LABEL: nearbyint_f32:
1653; RV64IF:       # %bb.0:
1654; RV64IF-NEXT:    addi sp, sp, -16
1655; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1656; RV64IF-NEXT:    call nearbyintf
1657; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1658; RV64IF-NEXT:    addi sp, sp, 16
1659; RV64IF-NEXT:    ret
1660;
1661; RV32IZFINX-LABEL: nearbyint_f32:
1662; RV32IZFINX:       # %bb.0:
1663; RV32IZFINX-NEXT:    addi sp, sp, -16
1664; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1665; RV32IZFINX-NEXT:    call nearbyintf
1666; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1667; RV32IZFINX-NEXT:    addi sp, sp, 16
1668; RV32IZFINX-NEXT:    ret
1669;
1670; RV64IZFINX-LABEL: nearbyint_f32:
1671; RV64IZFINX:       # %bb.0:
1672; RV64IZFINX-NEXT:    addi sp, sp, -16
1673; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1674; RV64IZFINX-NEXT:    call nearbyintf
1675; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1676; RV64IZFINX-NEXT:    addi sp, sp, 16
1677; RV64IZFINX-NEXT:    ret
1678;
1679; RV32I-LABEL: nearbyint_f32:
1680; RV32I:       # %bb.0:
1681; RV32I-NEXT:    addi sp, sp, -16
1682; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1683; RV32I-NEXT:    call nearbyintf
1684; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1685; RV32I-NEXT:    addi sp, sp, 16
1686; RV32I-NEXT:    ret
1687;
1688; RV64I-LABEL: nearbyint_f32:
1689; RV64I:       # %bb.0:
1690; RV64I-NEXT:    addi sp, sp, -16
1691; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1692; RV64I-NEXT:    call nearbyintf
1693; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1694; RV64I-NEXT:    addi sp, sp, 16
1695; RV64I-NEXT:    ret
1696  %1 = call float @llvm.experimental.constrained.nearbyint.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1697  ret float %1
1698}
1699
1700declare float @llvm.experimental.constrained.round.f32(float, metadata)
1701
1702define float @round_f32(float %a) nounwind strictfp {
1703; RV32IF-LABEL: round_f32:
1704; RV32IF:       # %bb.0:
1705; RV32IF-NEXT:    addi sp, sp, -16
1706; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1707; RV32IF-NEXT:    call roundf
1708; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1709; RV32IF-NEXT:    addi sp, sp, 16
1710; RV32IF-NEXT:    ret
1711;
1712; RV64IF-LABEL: round_f32:
1713; RV64IF:       # %bb.0:
1714; RV64IF-NEXT:    addi sp, sp, -16
1715; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1716; RV64IF-NEXT:    call roundf
1717; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1718; RV64IF-NEXT:    addi sp, sp, 16
1719; RV64IF-NEXT:    ret
1720;
1721; RV32IZFINX-LABEL: round_f32:
1722; RV32IZFINX:       # %bb.0:
1723; RV32IZFINX-NEXT:    addi sp, sp, -16
1724; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1725; RV32IZFINX-NEXT:    call roundf
1726; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1727; RV32IZFINX-NEXT:    addi sp, sp, 16
1728; RV32IZFINX-NEXT:    ret
1729;
1730; RV64IZFINX-LABEL: round_f32:
1731; RV64IZFINX:       # %bb.0:
1732; RV64IZFINX-NEXT:    addi sp, sp, -16
1733; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1734; RV64IZFINX-NEXT:    call roundf
1735; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1736; RV64IZFINX-NEXT:    addi sp, sp, 16
1737; RV64IZFINX-NEXT:    ret
1738;
1739; RV32I-LABEL: round_f32:
1740; RV32I:       # %bb.0:
1741; RV32I-NEXT:    addi sp, sp, -16
1742; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1743; RV32I-NEXT:    call roundf
1744; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1745; RV32I-NEXT:    addi sp, sp, 16
1746; RV32I-NEXT:    ret
1747;
1748; RV64I-LABEL: round_f32:
1749; RV64I:       # %bb.0:
1750; RV64I-NEXT:    addi sp, sp, -16
1751; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1752; RV64I-NEXT:    call roundf
1753; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1754; RV64I-NEXT:    addi sp, sp, 16
1755; RV64I-NEXT:    ret
1756  %1 = call float @llvm.experimental.constrained.round.f32(float %a, metadata !"fpexcept.strict") strictfp
1757  ret float %1
1758}
1759
1760declare float @llvm.experimental.constrained.roundeven.f32(float, metadata)
1761
1762define float @roundeven_f32(float %a) nounwind strictfp {
1763; RV32IF-LABEL: roundeven_f32:
1764; RV32IF:       # %bb.0:
1765; RV32IF-NEXT:    addi sp, sp, -16
1766; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1767; RV32IF-NEXT:    call roundevenf
1768; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1769; RV32IF-NEXT:    addi sp, sp, 16
1770; RV32IF-NEXT:    ret
1771;
1772; RV64IF-LABEL: roundeven_f32:
1773; RV64IF:       # %bb.0:
1774; RV64IF-NEXT:    addi sp, sp, -16
1775; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1776; RV64IF-NEXT:    call roundevenf
1777; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1778; RV64IF-NEXT:    addi sp, sp, 16
1779; RV64IF-NEXT:    ret
1780;
1781; RV32IZFINX-LABEL: roundeven_f32:
1782; RV32IZFINX:       # %bb.0:
1783; RV32IZFINX-NEXT:    addi sp, sp, -16
1784; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1785; RV32IZFINX-NEXT:    call roundevenf
1786; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1787; RV32IZFINX-NEXT:    addi sp, sp, 16
1788; RV32IZFINX-NEXT:    ret
1789;
1790; RV64IZFINX-LABEL: roundeven_f32:
1791; RV64IZFINX:       # %bb.0:
1792; RV64IZFINX-NEXT:    addi sp, sp, -16
1793; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1794; RV64IZFINX-NEXT:    call roundevenf
1795; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1796; RV64IZFINX-NEXT:    addi sp, sp, 16
1797; RV64IZFINX-NEXT:    ret
1798;
1799; RV32I-LABEL: roundeven_f32:
1800; RV32I:       # %bb.0:
1801; RV32I-NEXT:    addi sp, sp, -16
1802; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1803; RV32I-NEXT:    call roundevenf
1804; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1805; RV32I-NEXT:    addi sp, sp, 16
1806; RV32I-NEXT:    ret
1807;
1808; RV64I-LABEL: roundeven_f32:
1809; RV64I:       # %bb.0:
1810; RV64I-NEXT:    addi sp, sp, -16
1811; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1812; RV64I-NEXT:    call roundevenf
1813; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1814; RV64I-NEXT:    addi sp, sp, 16
1815; RV64I-NEXT:    ret
1816  %1 = call float @llvm.experimental.constrained.roundeven.f32(float %a, metadata !"fpexcept.strict") strictfp
1817  ret float %1
1818}
1819
1820declare iXLen @llvm.experimental.constrained.lrint.iXLen.f32(float, metadata, metadata)
1821
1822define iXLen @lrint_f32(float %a) nounwind strictfp {
1823; RV32IF-LABEL: lrint_f32:
1824; RV32IF:       # %bb.0:
1825; RV32IF-NEXT:    fcvt.w.s a0, fa0
1826; RV32IF-NEXT:    ret
1827;
1828; RV64IF-LABEL: lrint_f32:
1829; RV64IF:       # %bb.0:
1830; RV64IF-NEXT:    fcvt.l.s a0, fa0
1831; RV64IF-NEXT:    ret
1832;
1833; RV32IZFINX-LABEL: lrint_f32:
1834; RV32IZFINX:       # %bb.0:
1835; RV32IZFINX-NEXT:    fcvt.w.s a0, a0
1836; RV32IZFINX-NEXT:    ret
1837;
1838; RV64IZFINX-LABEL: lrint_f32:
1839; RV64IZFINX:       # %bb.0:
1840; RV64IZFINX-NEXT:    fcvt.l.s a0, a0
1841; RV64IZFINX-NEXT:    ret
1842;
1843; RV32I-LABEL: lrint_f32:
1844; RV32I:       # %bb.0:
1845; RV32I-NEXT:    addi sp, sp, -16
1846; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1847; RV32I-NEXT:    call lrintf
1848; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1849; RV32I-NEXT:    addi sp, sp, 16
1850; RV32I-NEXT:    ret
1851;
1852; RV64I-LABEL: lrint_f32:
1853; RV64I:       # %bb.0:
1854; RV64I-NEXT:    addi sp, sp, -16
1855; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1856; RV64I-NEXT:    call lrintf
1857; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1858; RV64I-NEXT:    addi sp, sp, 16
1859; RV64I-NEXT:    ret
1860  %1 = call iXLen @llvm.experimental.constrained.lrint.iXLen.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1861  ret iXLen %1
1862}
1863
1864declare iXLen @llvm.experimental.constrained.lround.iXLen.f32(float, metadata)
1865
1866define iXLen @lround_f32(float %a) nounwind strictfp {
1867; RV32IF-LABEL: lround_f32:
1868; RV32IF:       # %bb.0:
1869; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
1870; RV32IF-NEXT:    ret
1871;
1872; RV64IF-LABEL: lround_f32:
1873; RV64IF:       # %bb.0:
1874; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
1875; RV64IF-NEXT:    ret
1876;
1877; RV32IZFINX-LABEL: lround_f32:
1878; RV32IZFINX:       # %bb.0:
1879; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rmm
1880; RV32IZFINX-NEXT:    ret
1881;
1882; RV64IZFINX-LABEL: lround_f32:
1883; RV64IZFINX:       # %bb.0:
1884; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rmm
1885; RV64IZFINX-NEXT:    ret
1886;
1887; RV32I-LABEL: lround_f32:
1888; RV32I:       # %bb.0:
1889; RV32I-NEXT:    addi sp, sp, -16
1890; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1891; RV32I-NEXT:    call lroundf
1892; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1893; RV32I-NEXT:    addi sp, sp, 16
1894; RV32I-NEXT:    ret
1895;
1896; RV64I-LABEL: lround_f32:
1897; RV64I:       # %bb.0:
1898; RV64I-NEXT:    addi sp, sp, -16
1899; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1900; RV64I-NEXT:    call lroundf
1901; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1902; RV64I-NEXT:    addi sp, sp, 16
1903; RV64I-NEXT:    ret
1904  %1 = call iXLen @llvm.experimental.constrained.lround.iXLen.f32(float %a, metadata !"fpexcept.strict") strictfp
1905  ret iXLen %1
1906}
1907
1908declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata)
1909
1910define i64 @llrint_f32(float %a) nounwind strictfp {
1911; RV32IF-LABEL: llrint_f32:
1912; RV32IF:       # %bb.0:
1913; RV32IF-NEXT:    addi sp, sp, -16
1914; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1915; RV32IF-NEXT:    call llrintf
1916; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1917; RV32IF-NEXT:    addi sp, sp, 16
1918; RV32IF-NEXT:    ret
1919;
1920; RV64IF-LABEL: llrint_f32:
1921; RV64IF:       # %bb.0:
1922; RV64IF-NEXT:    fcvt.l.s a0, fa0
1923; RV64IF-NEXT:    ret
1924;
1925; RV32IZFINX-LABEL: llrint_f32:
1926; RV32IZFINX:       # %bb.0:
1927; RV32IZFINX-NEXT:    addi sp, sp, -16
1928; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1929; RV32IZFINX-NEXT:    call llrintf
1930; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1931; RV32IZFINX-NEXT:    addi sp, sp, 16
1932; RV32IZFINX-NEXT:    ret
1933;
1934; RV64IZFINX-LABEL: llrint_f32:
1935; RV64IZFINX:       # %bb.0:
1936; RV64IZFINX-NEXT:    fcvt.l.s a0, a0
1937; RV64IZFINX-NEXT:    ret
1938;
1939; RV32I-LABEL: llrint_f32:
1940; RV32I:       # %bb.0:
1941; RV32I-NEXT:    addi sp, sp, -16
1942; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1943; RV32I-NEXT:    call llrintf
1944; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1945; RV32I-NEXT:    addi sp, sp, 16
1946; RV32I-NEXT:    ret
1947;
1948; RV64I-LABEL: llrint_f32:
1949; RV64I:       # %bb.0:
1950; RV64I-NEXT:    addi sp, sp, -16
1951; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1952; RV64I-NEXT:    call llrintf
1953; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1954; RV64I-NEXT:    addi sp, sp, 16
1955; RV64I-NEXT:    ret
1956  %1 = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
1957  ret i64 %1
1958}
1959
1960declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
1961
1962define i64 @llround_f32(float %a) nounwind strictfp {
1963; RV32IF-LABEL: llround_f32:
1964; RV32IF:       # %bb.0:
1965; RV32IF-NEXT:    addi sp, sp, -16
1966; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1967; RV32IF-NEXT:    call llroundf
1968; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1969; RV32IF-NEXT:    addi sp, sp, 16
1970; RV32IF-NEXT:    ret
1971;
1972; RV64IF-LABEL: llround_f32:
1973; RV64IF:       # %bb.0:
1974; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
1975; RV64IF-NEXT:    ret
1976;
1977; RV32IZFINX-LABEL: llround_f32:
1978; RV32IZFINX:       # %bb.0:
1979; RV32IZFINX-NEXT:    addi sp, sp, -16
1980; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1981; RV32IZFINX-NEXT:    call llroundf
1982; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1983; RV32IZFINX-NEXT:    addi sp, sp, 16
1984; RV32IZFINX-NEXT:    ret
1985;
1986; RV64IZFINX-LABEL: llround_f32:
1987; RV64IZFINX:       # %bb.0:
1988; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rmm
1989; RV64IZFINX-NEXT:    ret
1990;
1991; RV32I-LABEL: llround_f32:
1992; RV32I:       # %bb.0:
1993; RV32I-NEXT:    addi sp, sp, -16
1994; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1995; RV32I-NEXT:    call llroundf
1996; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1997; RV32I-NEXT:    addi sp, sp, 16
1998; RV32I-NEXT:    ret
1999;
2000; RV64I-LABEL: llround_f32:
2001; RV64I:       # %bb.0:
2002; RV64I-NEXT:    addi sp, sp, -16
2003; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
2004; RV64I-NEXT:    call llroundf
2005; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
2006; RV64I-NEXT:    addi sp, sp, 16
2007; RV64I-NEXT:    ret
2008  %1 = call i64 @llvm.experimental.constrained.llround.i64.f32(float %a, metadata !"fpexcept.strict") strictfp
2009  ret i64 %1
2010}
2011
2012define float @ldexp_f32(float %x, i32 signext %y) nounwind {
2013; RV32IF-LABEL: ldexp_f32:
2014; RV32IF:       # %bb.0:
2015; RV32IF-NEXT:    addi sp, sp, -16
2016; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
2017; RV32IF-NEXT:    call ldexpf
2018; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
2019; RV32IF-NEXT:    addi sp, sp, 16
2020; RV32IF-NEXT:    ret
2021;
2022; RV64IF-LABEL: ldexp_f32:
2023; RV64IF:       # %bb.0:
2024; RV64IF-NEXT:    addi sp, sp, -16
2025; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
2026; RV64IF-NEXT:    call ldexpf
2027; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
2028; RV64IF-NEXT:    addi sp, sp, 16
2029; RV64IF-NEXT:    ret
2030;
2031; RV32IZFINX-LABEL: ldexp_f32:
2032; RV32IZFINX:       # %bb.0:
2033; RV32IZFINX-NEXT:    addi sp, sp, -16
2034; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
2035; RV32IZFINX-NEXT:    call ldexpf
2036; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
2037; RV32IZFINX-NEXT:    addi sp, sp, 16
2038; RV32IZFINX-NEXT:    ret
2039;
2040; RV64IZFINX-LABEL: ldexp_f32:
2041; RV64IZFINX:       # %bb.0:
2042; RV64IZFINX-NEXT:    addi sp, sp, -16
2043; RV64IZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
2044; RV64IZFINX-NEXT:    call ldexpf
2045; RV64IZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
2046; RV64IZFINX-NEXT:    addi sp, sp, 16
2047; RV64IZFINX-NEXT:    ret
2048;
2049; RV32I-LABEL: ldexp_f32:
2050; RV32I:       # %bb.0:
2051; RV32I-NEXT:    addi sp, sp, -16
2052; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
2053; RV32I-NEXT:    call ldexpf
2054; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
2055; RV32I-NEXT:    addi sp, sp, 16
2056; RV32I-NEXT:    ret
2057;
2058; RV64I-LABEL: ldexp_f32:
2059; RV64I:       # %bb.0:
2060; RV64I-NEXT:    addi sp, sp, -16
2061; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
2062; RV64I-NEXT:    call ldexpf
2063; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
2064; RV64I-NEXT:    addi sp, sp, 16
2065; RV64I-NEXT:    ret
2066  %z = call float @llvm.experimental.constrained.ldexp.f32.i32(float %x, i32 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
2067  ret float %z
2068}
2069