xref: /llvm-project/llvm/test/CodeGen/RISCV/float-round-conv.ll (revision 97982a8c605fac7c86d02e641a6cd7898b3ca343)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3; RUN:   -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
4; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
5; RUN:   -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
6; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
7; RUN:   -target-abi=ilp32 | FileCheck -check-prefix=RV32IZFINX %s
8; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
9; RUN:   -target-abi=lp64 | FileCheck -check-prefix=RV64IZFINX %s
10
11define signext i8 @test_floor_si8(float %x) {
12; RV32IF-LABEL: test_floor_si8:
13; RV32IF:       # %bb.0:
14; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
15; RV32IF-NEXT:    ret
16;
17; RV64IF-LABEL: test_floor_si8:
18; RV64IF:       # %bb.0:
19; RV64IF-NEXT:    fcvt.l.s a0, fa0, rdn
20; RV64IF-NEXT:    ret
21;
22; RV32IZFINX-LABEL: test_floor_si8:
23; RV32IZFINX:       # %bb.0:
24; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rdn
25; RV32IZFINX-NEXT:    ret
26;
27; RV64IZFINX-LABEL: test_floor_si8:
28; RV64IZFINX:       # %bb.0:
29; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rdn
30; RV64IZFINX-NEXT:    ret
31  %a = call float @llvm.floor.f32(float %x)
32  %b = fptosi float %a to i8
33  ret i8 %b
34}
35
36define signext i16 @test_floor_si16(float %x) {
37; RV32IF-LABEL: test_floor_si16:
38; RV32IF:       # %bb.0:
39; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
40; RV32IF-NEXT:    ret
41;
42; RV64IF-LABEL: test_floor_si16:
43; RV64IF:       # %bb.0:
44; RV64IF-NEXT:    fcvt.l.s a0, fa0, rdn
45; RV64IF-NEXT:    ret
46;
47; RV32IZFINX-LABEL: test_floor_si16:
48; RV32IZFINX:       # %bb.0:
49; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rdn
50; RV32IZFINX-NEXT:    ret
51;
52; RV64IZFINX-LABEL: test_floor_si16:
53; RV64IZFINX:       # %bb.0:
54; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rdn
55; RV64IZFINX-NEXT:    ret
56  %a = call float @llvm.floor.f32(float %x)
57  %b = fptosi float %a to i16
58  ret i16 %b
59}
60
61define signext i32 @test_floor_si32(float %x) {
62; RV32IF-LABEL: test_floor_si32:
63; RV32IF:       # %bb.0:
64; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
65; RV32IF-NEXT:    ret
66;
67; RV64IF-LABEL: test_floor_si32:
68; RV64IF:       # %bb.0:
69; RV64IF-NEXT:    fcvt.w.s a0, fa0, rdn
70; RV64IF-NEXT:    ret
71;
72; RV32IZFINX-LABEL: test_floor_si32:
73; RV32IZFINX:       # %bb.0:
74; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rdn
75; RV32IZFINX-NEXT:    ret
76;
77; RV64IZFINX-LABEL: test_floor_si32:
78; RV64IZFINX:       # %bb.0:
79; RV64IZFINX-NEXT:    fcvt.w.s a0, a0, rdn
80; RV64IZFINX-NEXT:    ret
81  %a = call float @llvm.floor.f32(float %x)
82  %b = fptosi float %a to i32
83  ret i32 %b
84}
85
86define i64 @test_floor_si64(float %x) {
87; RV32IF-LABEL: test_floor_si64:
88; RV32IF:       # %bb.0:
89; RV32IF-NEXT:    lui a0, 307200
90; RV32IF-NEXT:    fmv.w.x fa5, a0
91; RV32IF-NEXT:    fabs.s fa4, fa0
92; RV32IF-NEXT:    flt.s a0, fa4, fa5
93; RV32IF-NEXT:    beqz a0, .LBB3_2
94; RV32IF-NEXT:  # %bb.1:
95; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
96; RV32IF-NEXT:    fcvt.s.w fa5, a0, rdn
97; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
98; RV32IF-NEXT:  .LBB3_2:
99; RV32IF-NEXT:    addi sp, sp, -16
100; RV32IF-NEXT:    .cfi_def_cfa_offset 16
101; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
102; RV32IF-NEXT:    .cfi_offset ra, -4
103; RV32IF-NEXT:    call __fixsfdi
104; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
105; RV32IF-NEXT:    .cfi_restore ra
106; RV32IF-NEXT:    addi sp, sp, 16
107; RV32IF-NEXT:    .cfi_def_cfa_offset 0
108; RV32IF-NEXT:    ret
109;
110; RV64IF-LABEL: test_floor_si64:
111; RV64IF:       # %bb.0:
112; RV64IF-NEXT:    fcvt.l.s a0, fa0, rdn
113; RV64IF-NEXT:    ret
114;
115; RV32IZFINX-LABEL: test_floor_si64:
116; RV32IZFINX:       # %bb.0:
117; RV32IZFINX-NEXT:    lui a1, 307200
118; RV32IZFINX-NEXT:    fabs.s a2, a0
119; RV32IZFINX-NEXT:    flt.s a1, a2, a1
120; RV32IZFINX-NEXT:    beqz a1, .LBB3_2
121; RV32IZFINX-NEXT:  # %bb.1:
122; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rdn
123; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rdn
124; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
125; RV32IZFINX-NEXT:  .LBB3_2:
126; RV32IZFINX-NEXT:    addi sp, sp, -16
127; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
128; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
129; RV32IZFINX-NEXT:    .cfi_offset ra, -4
130; RV32IZFINX-NEXT:    call __fixsfdi
131; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
132; RV32IZFINX-NEXT:    .cfi_restore ra
133; RV32IZFINX-NEXT:    addi sp, sp, 16
134; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
135; RV32IZFINX-NEXT:    ret
136;
137; RV64IZFINX-LABEL: test_floor_si64:
138; RV64IZFINX:       # %bb.0:
139; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rdn
140; RV64IZFINX-NEXT:    ret
141  %a = call float @llvm.floor.f32(float %x)
142  %b = fptosi float %a to i64
143  ret i64 %b
144}
145
146define zeroext i8 @test_floor_ui8(float %x) {
147; RV32IF-LABEL: test_floor_ui8:
148; RV32IF:       # %bb.0:
149; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rdn
150; RV32IF-NEXT:    ret
151;
152; RV64IF-LABEL: test_floor_ui8:
153; RV64IF:       # %bb.0:
154; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rdn
155; RV64IF-NEXT:    ret
156;
157; RV32IZFINX-LABEL: test_floor_ui8:
158; RV32IZFINX:       # %bb.0:
159; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rdn
160; RV32IZFINX-NEXT:    ret
161;
162; RV64IZFINX-LABEL: test_floor_ui8:
163; RV64IZFINX:       # %bb.0:
164; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rdn
165; RV64IZFINX-NEXT:    ret
166  %a = call float @llvm.floor.f32(float %x)
167  %b = fptoui float %a to i8
168  ret i8 %b
169}
170
171define zeroext i16 @test_floor_ui16(float %x) {
172; RV32IF-LABEL: test_floor_ui16:
173; RV32IF:       # %bb.0:
174; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rdn
175; RV32IF-NEXT:    ret
176;
177; RV64IF-LABEL: test_floor_ui16:
178; RV64IF:       # %bb.0:
179; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rdn
180; RV64IF-NEXT:    ret
181;
182; RV32IZFINX-LABEL: test_floor_ui16:
183; RV32IZFINX:       # %bb.0:
184; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rdn
185; RV32IZFINX-NEXT:    ret
186;
187; RV64IZFINX-LABEL: test_floor_ui16:
188; RV64IZFINX:       # %bb.0:
189; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rdn
190; RV64IZFINX-NEXT:    ret
191  %a = call float @llvm.floor.f32(float %x)
192  %b = fptoui float %a to i16
193  ret i16 %b
194}
195
196define signext i32 @test_floor_ui32(float %x) {
197; RV32IF-LABEL: test_floor_ui32:
198; RV32IF:       # %bb.0:
199; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rdn
200; RV32IF-NEXT:    ret
201;
202; RV64IF-LABEL: test_floor_ui32:
203; RV64IF:       # %bb.0:
204; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rdn
205; RV64IF-NEXT:    ret
206;
207; RV32IZFINX-LABEL: test_floor_ui32:
208; RV32IZFINX:       # %bb.0:
209; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rdn
210; RV32IZFINX-NEXT:    ret
211;
212; RV64IZFINX-LABEL: test_floor_ui32:
213; RV64IZFINX:       # %bb.0:
214; RV64IZFINX-NEXT:    fcvt.wu.s a0, a0, rdn
215; RV64IZFINX-NEXT:    ret
216  %a = call float @llvm.floor.f32(float %x)
217  %b = fptoui float %a to i32
218  ret i32 %b
219}
220
221define i64 @test_floor_ui64(float %x) {
222; RV32IF-LABEL: test_floor_ui64:
223; RV32IF:       # %bb.0:
224; RV32IF-NEXT:    lui a0, 307200
225; RV32IF-NEXT:    fmv.w.x fa5, a0
226; RV32IF-NEXT:    fabs.s fa4, fa0
227; RV32IF-NEXT:    flt.s a0, fa4, fa5
228; RV32IF-NEXT:    beqz a0, .LBB7_2
229; RV32IF-NEXT:  # %bb.1:
230; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
231; RV32IF-NEXT:    fcvt.s.w fa5, a0, rdn
232; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
233; RV32IF-NEXT:  .LBB7_2:
234; RV32IF-NEXT:    addi sp, sp, -16
235; RV32IF-NEXT:    .cfi_def_cfa_offset 16
236; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
237; RV32IF-NEXT:    .cfi_offset ra, -4
238; RV32IF-NEXT:    call __fixunssfdi
239; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
240; RV32IF-NEXT:    .cfi_restore ra
241; RV32IF-NEXT:    addi sp, sp, 16
242; RV32IF-NEXT:    .cfi_def_cfa_offset 0
243; RV32IF-NEXT:    ret
244;
245; RV64IF-LABEL: test_floor_ui64:
246; RV64IF:       # %bb.0:
247; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rdn
248; RV64IF-NEXT:    ret
249;
250; RV32IZFINX-LABEL: test_floor_ui64:
251; RV32IZFINX:       # %bb.0:
252; RV32IZFINX-NEXT:    lui a1, 307200
253; RV32IZFINX-NEXT:    fabs.s a2, a0
254; RV32IZFINX-NEXT:    flt.s a1, a2, a1
255; RV32IZFINX-NEXT:    beqz a1, .LBB7_2
256; RV32IZFINX-NEXT:  # %bb.1:
257; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rdn
258; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rdn
259; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
260; RV32IZFINX-NEXT:  .LBB7_2:
261; RV32IZFINX-NEXT:    addi sp, sp, -16
262; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
263; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
264; RV32IZFINX-NEXT:    .cfi_offset ra, -4
265; RV32IZFINX-NEXT:    call __fixunssfdi
266; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
267; RV32IZFINX-NEXT:    .cfi_restore ra
268; RV32IZFINX-NEXT:    addi sp, sp, 16
269; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
270; RV32IZFINX-NEXT:    ret
271;
272; RV64IZFINX-LABEL: test_floor_ui64:
273; RV64IZFINX:       # %bb.0:
274; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rdn
275; RV64IZFINX-NEXT:    ret
276  %a = call float @llvm.floor.f32(float %x)
277  %b = fptoui float %a to i64
278  ret i64 %b
279}
280
281define signext i8 @test_ceil_si8(float %x) {
282; RV32IF-LABEL: test_ceil_si8:
283; RV32IF:       # %bb.0:
284; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
285; RV32IF-NEXT:    ret
286;
287; RV64IF-LABEL: test_ceil_si8:
288; RV64IF:       # %bb.0:
289; RV64IF-NEXT:    fcvt.l.s a0, fa0, rup
290; RV64IF-NEXT:    ret
291;
292; RV32IZFINX-LABEL: test_ceil_si8:
293; RV32IZFINX:       # %bb.0:
294; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rup
295; RV32IZFINX-NEXT:    ret
296;
297; RV64IZFINX-LABEL: test_ceil_si8:
298; RV64IZFINX:       # %bb.0:
299; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rup
300; RV64IZFINX-NEXT:    ret
301  %a = call float @llvm.ceil.f32(float %x)
302  %b = fptosi float %a to i8
303  ret i8 %b
304}
305
306define signext i16 @test_ceil_si16(float %x) {
307; RV32IF-LABEL: test_ceil_si16:
308; RV32IF:       # %bb.0:
309; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
310; RV32IF-NEXT:    ret
311;
312; RV64IF-LABEL: test_ceil_si16:
313; RV64IF:       # %bb.0:
314; RV64IF-NEXT:    fcvt.l.s a0, fa0, rup
315; RV64IF-NEXT:    ret
316;
317; RV32IZFINX-LABEL: test_ceil_si16:
318; RV32IZFINX:       # %bb.0:
319; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rup
320; RV32IZFINX-NEXT:    ret
321;
322; RV64IZFINX-LABEL: test_ceil_si16:
323; RV64IZFINX:       # %bb.0:
324; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rup
325; RV64IZFINX-NEXT:    ret
326  %a = call float @llvm.ceil.f32(float %x)
327  %b = fptosi float %a to i16
328  ret i16 %b
329}
330
331define signext i32 @test_ceil_si32(float %x) {
332; RV32IF-LABEL: test_ceil_si32:
333; RV32IF:       # %bb.0:
334; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
335; RV32IF-NEXT:    ret
336;
337; RV64IF-LABEL: test_ceil_si32:
338; RV64IF:       # %bb.0:
339; RV64IF-NEXT:    fcvt.w.s a0, fa0, rup
340; RV64IF-NEXT:    ret
341;
342; RV32IZFINX-LABEL: test_ceil_si32:
343; RV32IZFINX:       # %bb.0:
344; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rup
345; RV32IZFINX-NEXT:    ret
346;
347; RV64IZFINX-LABEL: test_ceil_si32:
348; RV64IZFINX:       # %bb.0:
349; RV64IZFINX-NEXT:    fcvt.w.s a0, a0, rup
350; RV64IZFINX-NEXT:    ret
351  %a = call float @llvm.ceil.f32(float %x)
352  %b = fptosi float %a to i32
353  ret i32 %b
354}
355
356define i64 @test_ceil_si64(float %x) {
357; RV32IF-LABEL: test_ceil_si64:
358; RV32IF:       # %bb.0:
359; RV32IF-NEXT:    lui a0, 307200
360; RV32IF-NEXT:    fmv.w.x fa5, a0
361; RV32IF-NEXT:    fabs.s fa4, fa0
362; RV32IF-NEXT:    flt.s a0, fa4, fa5
363; RV32IF-NEXT:    beqz a0, .LBB11_2
364; RV32IF-NEXT:  # %bb.1:
365; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
366; RV32IF-NEXT:    fcvt.s.w fa5, a0, rup
367; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
368; RV32IF-NEXT:  .LBB11_2:
369; RV32IF-NEXT:    addi sp, sp, -16
370; RV32IF-NEXT:    .cfi_def_cfa_offset 16
371; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
372; RV32IF-NEXT:    .cfi_offset ra, -4
373; RV32IF-NEXT:    call __fixsfdi
374; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
375; RV32IF-NEXT:    .cfi_restore ra
376; RV32IF-NEXT:    addi sp, sp, 16
377; RV32IF-NEXT:    .cfi_def_cfa_offset 0
378; RV32IF-NEXT:    ret
379;
380; RV64IF-LABEL: test_ceil_si64:
381; RV64IF:       # %bb.0:
382; RV64IF-NEXT:    fcvt.l.s a0, fa0, rup
383; RV64IF-NEXT:    ret
384;
385; RV32IZFINX-LABEL: test_ceil_si64:
386; RV32IZFINX:       # %bb.0:
387; RV32IZFINX-NEXT:    lui a1, 307200
388; RV32IZFINX-NEXT:    fabs.s a2, a0
389; RV32IZFINX-NEXT:    flt.s a1, a2, a1
390; RV32IZFINX-NEXT:    beqz a1, .LBB11_2
391; RV32IZFINX-NEXT:  # %bb.1:
392; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rup
393; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rup
394; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
395; RV32IZFINX-NEXT:  .LBB11_2:
396; RV32IZFINX-NEXT:    addi sp, sp, -16
397; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
398; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
399; RV32IZFINX-NEXT:    .cfi_offset ra, -4
400; RV32IZFINX-NEXT:    call __fixsfdi
401; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
402; RV32IZFINX-NEXT:    .cfi_restore ra
403; RV32IZFINX-NEXT:    addi sp, sp, 16
404; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
405; RV32IZFINX-NEXT:    ret
406;
407; RV64IZFINX-LABEL: test_ceil_si64:
408; RV64IZFINX:       # %bb.0:
409; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rup
410; RV64IZFINX-NEXT:    ret
411  %a = call float @llvm.ceil.f32(float %x)
412  %b = fptosi float %a to i64
413  ret i64 %b
414}
415
416define zeroext i8 @test_ceil_ui8(float %x) {
417; RV32IF-LABEL: test_ceil_ui8:
418; RV32IF:       # %bb.0:
419; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rup
420; RV32IF-NEXT:    ret
421;
422; RV64IF-LABEL: test_ceil_ui8:
423; RV64IF:       # %bb.0:
424; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rup
425; RV64IF-NEXT:    ret
426;
427; RV32IZFINX-LABEL: test_ceil_ui8:
428; RV32IZFINX:       # %bb.0:
429; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rup
430; RV32IZFINX-NEXT:    ret
431;
432; RV64IZFINX-LABEL: test_ceil_ui8:
433; RV64IZFINX:       # %bb.0:
434; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rup
435; RV64IZFINX-NEXT:    ret
436  %a = call float @llvm.ceil.f32(float %x)
437  %b = fptoui float %a to i8
438  ret i8 %b
439}
440
441define zeroext i16 @test_ceil_ui16(float %x) {
442; RV32IF-LABEL: test_ceil_ui16:
443; RV32IF:       # %bb.0:
444; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rup
445; RV32IF-NEXT:    ret
446;
447; RV64IF-LABEL: test_ceil_ui16:
448; RV64IF:       # %bb.0:
449; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rup
450; RV64IF-NEXT:    ret
451;
452; RV32IZFINX-LABEL: test_ceil_ui16:
453; RV32IZFINX:       # %bb.0:
454; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rup
455; RV32IZFINX-NEXT:    ret
456;
457; RV64IZFINX-LABEL: test_ceil_ui16:
458; RV64IZFINX:       # %bb.0:
459; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rup
460; RV64IZFINX-NEXT:    ret
461  %a = call float @llvm.ceil.f32(float %x)
462  %b = fptoui float %a to i16
463  ret i16 %b
464}
465
466define signext i32 @test_ceil_ui32(float %x) {
467; RV32IF-LABEL: test_ceil_ui32:
468; RV32IF:       # %bb.0:
469; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rup
470; RV32IF-NEXT:    ret
471;
472; RV64IF-LABEL: test_ceil_ui32:
473; RV64IF:       # %bb.0:
474; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rup
475; RV64IF-NEXT:    ret
476;
477; RV32IZFINX-LABEL: test_ceil_ui32:
478; RV32IZFINX:       # %bb.0:
479; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rup
480; RV32IZFINX-NEXT:    ret
481;
482; RV64IZFINX-LABEL: test_ceil_ui32:
483; RV64IZFINX:       # %bb.0:
484; RV64IZFINX-NEXT:    fcvt.wu.s a0, a0, rup
485; RV64IZFINX-NEXT:    ret
486  %a = call float @llvm.ceil.f32(float %x)
487  %b = fptoui float %a to i32
488  ret i32 %b
489}
490
491define i64 @test_ceil_ui64(float %x) {
492; RV32IF-LABEL: test_ceil_ui64:
493; RV32IF:       # %bb.0:
494; RV32IF-NEXT:    lui a0, 307200
495; RV32IF-NEXT:    fmv.w.x fa5, a0
496; RV32IF-NEXT:    fabs.s fa4, fa0
497; RV32IF-NEXT:    flt.s a0, fa4, fa5
498; RV32IF-NEXT:    beqz a0, .LBB15_2
499; RV32IF-NEXT:  # %bb.1:
500; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
501; RV32IF-NEXT:    fcvt.s.w fa5, a0, rup
502; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
503; RV32IF-NEXT:  .LBB15_2:
504; RV32IF-NEXT:    addi sp, sp, -16
505; RV32IF-NEXT:    .cfi_def_cfa_offset 16
506; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
507; RV32IF-NEXT:    .cfi_offset ra, -4
508; RV32IF-NEXT:    call __fixunssfdi
509; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
510; RV32IF-NEXT:    .cfi_restore ra
511; RV32IF-NEXT:    addi sp, sp, 16
512; RV32IF-NEXT:    .cfi_def_cfa_offset 0
513; RV32IF-NEXT:    ret
514;
515; RV64IF-LABEL: test_ceil_ui64:
516; RV64IF:       # %bb.0:
517; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rup
518; RV64IF-NEXT:    ret
519;
520; RV32IZFINX-LABEL: test_ceil_ui64:
521; RV32IZFINX:       # %bb.0:
522; RV32IZFINX-NEXT:    lui a1, 307200
523; RV32IZFINX-NEXT:    fabs.s a2, a0
524; RV32IZFINX-NEXT:    flt.s a1, a2, a1
525; RV32IZFINX-NEXT:    beqz a1, .LBB15_2
526; RV32IZFINX-NEXT:  # %bb.1:
527; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rup
528; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rup
529; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
530; RV32IZFINX-NEXT:  .LBB15_2:
531; RV32IZFINX-NEXT:    addi sp, sp, -16
532; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
533; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
534; RV32IZFINX-NEXT:    .cfi_offset ra, -4
535; RV32IZFINX-NEXT:    call __fixunssfdi
536; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
537; RV32IZFINX-NEXT:    .cfi_restore ra
538; RV32IZFINX-NEXT:    addi sp, sp, 16
539; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
540; RV32IZFINX-NEXT:    ret
541;
542; RV64IZFINX-LABEL: test_ceil_ui64:
543; RV64IZFINX:       # %bb.0:
544; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rup
545; RV64IZFINX-NEXT:    ret
546  %a = call float @llvm.ceil.f32(float %x)
547  %b = fptoui float %a to i64
548  ret i64 %b
549}
550
551define signext i8 @test_trunc_si8(float %x) {
552; RV32IF-LABEL: test_trunc_si8:
553; RV32IF:       # %bb.0:
554; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
555; RV32IF-NEXT:    ret
556;
557; RV64IF-LABEL: test_trunc_si8:
558; RV64IF:       # %bb.0:
559; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
560; RV64IF-NEXT:    ret
561;
562; RV32IZFINX-LABEL: test_trunc_si8:
563; RV32IZFINX:       # %bb.0:
564; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rtz
565; RV32IZFINX-NEXT:    ret
566;
567; RV64IZFINX-LABEL: test_trunc_si8:
568; RV64IZFINX:       # %bb.0:
569; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rtz
570; RV64IZFINX-NEXT:    ret
571  %a = call float @llvm.trunc.f32(float %x)
572  %b = fptosi float %a to i8
573  ret i8 %b
574}
575
576define signext i16 @test_trunc_si16(float %x) {
577; RV32IF-LABEL: test_trunc_si16:
578; RV32IF:       # %bb.0:
579; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
580; RV32IF-NEXT:    ret
581;
582; RV64IF-LABEL: test_trunc_si16:
583; RV64IF:       # %bb.0:
584; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
585; RV64IF-NEXT:    ret
586;
587; RV32IZFINX-LABEL: test_trunc_si16:
588; RV32IZFINX:       # %bb.0:
589; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rtz
590; RV32IZFINX-NEXT:    ret
591;
592; RV64IZFINX-LABEL: test_trunc_si16:
593; RV64IZFINX:       # %bb.0:
594; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rtz
595; RV64IZFINX-NEXT:    ret
596  %a = call float @llvm.trunc.f32(float %x)
597  %b = fptosi float %a to i16
598  ret i16 %b
599}
600
601define signext i32 @test_trunc_si32(float %x) {
602; RV32IF-LABEL: test_trunc_si32:
603; RV32IF:       # %bb.0:
604; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
605; RV32IF-NEXT:    ret
606;
607; RV64IF-LABEL: test_trunc_si32:
608; RV64IF:       # %bb.0:
609; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
610; RV64IF-NEXT:    ret
611;
612; RV32IZFINX-LABEL: test_trunc_si32:
613; RV32IZFINX:       # %bb.0:
614; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rtz
615; RV32IZFINX-NEXT:    ret
616;
617; RV64IZFINX-LABEL: test_trunc_si32:
618; RV64IZFINX:       # %bb.0:
619; RV64IZFINX-NEXT:    fcvt.w.s a0, a0, rtz
620; RV64IZFINX-NEXT:    ret
621  %a = call float @llvm.trunc.f32(float %x)
622  %b = fptosi float %a to i32
623  ret i32 %b
624}
625
626define i64 @test_trunc_si64(float %x) {
627; RV32IF-LABEL: test_trunc_si64:
628; RV32IF:       # %bb.0:
629; RV32IF-NEXT:    lui a0, 307200
630; RV32IF-NEXT:    fmv.w.x fa5, a0
631; RV32IF-NEXT:    fabs.s fa4, fa0
632; RV32IF-NEXT:    flt.s a0, fa4, fa5
633; RV32IF-NEXT:    beqz a0, .LBB19_2
634; RV32IF-NEXT:  # %bb.1:
635; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
636; RV32IF-NEXT:    fcvt.s.w fa5, a0, rtz
637; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
638; RV32IF-NEXT:  .LBB19_2:
639; RV32IF-NEXT:    addi sp, sp, -16
640; RV32IF-NEXT:    .cfi_def_cfa_offset 16
641; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
642; RV32IF-NEXT:    .cfi_offset ra, -4
643; RV32IF-NEXT:    call __fixsfdi
644; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
645; RV32IF-NEXT:    .cfi_restore ra
646; RV32IF-NEXT:    addi sp, sp, 16
647; RV32IF-NEXT:    .cfi_def_cfa_offset 0
648; RV32IF-NEXT:    ret
649;
650; RV64IF-LABEL: test_trunc_si64:
651; RV64IF:       # %bb.0:
652; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
653; RV64IF-NEXT:    ret
654;
655; RV32IZFINX-LABEL: test_trunc_si64:
656; RV32IZFINX:       # %bb.0:
657; RV32IZFINX-NEXT:    lui a1, 307200
658; RV32IZFINX-NEXT:    fabs.s a2, a0
659; RV32IZFINX-NEXT:    flt.s a1, a2, a1
660; RV32IZFINX-NEXT:    beqz a1, .LBB19_2
661; RV32IZFINX-NEXT:  # %bb.1:
662; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rtz
663; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rtz
664; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
665; RV32IZFINX-NEXT:  .LBB19_2:
666; RV32IZFINX-NEXT:    addi sp, sp, -16
667; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
668; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
669; RV32IZFINX-NEXT:    .cfi_offset ra, -4
670; RV32IZFINX-NEXT:    call __fixsfdi
671; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
672; RV32IZFINX-NEXT:    .cfi_restore ra
673; RV32IZFINX-NEXT:    addi sp, sp, 16
674; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
675; RV32IZFINX-NEXT:    ret
676;
677; RV64IZFINX-LABEL: test_trunc_si64:
678; RV64IZFINX:       # %bb.0:
679; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rtz
680; RV64IZFINX-NEXT:    ret
681  %a = call float @llvm.trunc.f32(float %x)
682  %b = fptosi float %a to i64
683  ret i64 %b
684}
685
686define zeroext i8 @test_trunc_ui8(float %x) {
687; RV32IF-LABEL: test_trunc_ui8:
688; RV32IF:       # %bb.0:
689; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
690; RV32IF-NEXT:    ret
691;
692; RV64IF-LABEL: test_trunc_ui8:
693; RV64IF:       # %bb.0:
694; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
695; RV64IF-NEXT:    ret
696;
697; RV32IZFINX-LABEL: test_trunc_ui8:
698; RV32IZFINX:       # %bb.0:
699; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rtz
700; RV32IZFINX-NEXT:    ret
701;
702; RV64IZFINX-LABEL: test_trunc_ui8:
703; RV64IZFINX:       # %bb.0:
704; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rtz
705; RV64IZFINX-NEXT:    ret
706  %a = call float @llvm.trunc.f32(float %x)
707  %b = fptoui float %a to i8
708  ret i8 %b
709}
710
711define zeroext i16 @test_trunc_ui16(float %x) {
712; RV32IF-LABEL: test_trunc_ui16:
713; RV32IF:       # %bb.0:
714; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
715; RV32IF-NEXT:    ret
716;
717; RV64IF-LABEL: test_trunc_ui16:
718; RV64IF:       # %bb.0:
719; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
720; RV64IF-NEXT:    ret
721;
722; RV32IZFINX-LABEL: test_trunc_ui16:
723; RV32IZFINX:       # %bb.0:
724; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rtz
725; RV32IZFINX-NEXT:    ret
726;
727; RV64IZFINX-LABEL: test_trunc_ui16:
728; RV64IZFINX:       # %bb.0:
729; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rtz
730; RV64IZFINX-NEXT:    ret
731  %a = call float @llvm.trunc.f32(float %x)
732  %b = fptoui float %a to i16
733  ret i16 %b
734}
735
736define signext i32 @test_trunc_ui32(float %x) {
737; RV32IF-LABEL: test_trunc_ui32:
738; RV32IF:       # %bb.0:
739; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
740; RV32IF-NEXT:    ret
741;
742; RV64IF-LABEL: test_trunc_ui32:
743; RV64IF:       # %bb.0:
744; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
745; RV64IF-NEXT:    ret
746;
747; RV32IZFINX-LABEL: test_trunc_ui32:
748; RV32IZFINX:       # %bb.0:
749; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rtz
750; RV32IZFINX-NEXT:    ret
751;
752; RV64IZFINX-LABEL: test_trunc_ui32:
753; RV64IZFINX:       # %bb.0:
754; RV64IZFINX-NEXT:    fcvt.wu.s a0, a0, rtz
755; RV64IZFINX-NEXT:    ret
756  %a = call float @llvm.trunc.f32(float %x)
757  %b = fptoui float %a to i32
758  ret i32 %b
759}
760
761define i64 @test_trunc_ui64(float %x) {
762; RV32IF-LABEL: test_trunc_ui64:
763; RV32IF:       # %bb.0:
764; RV32IF-NEXT:    lui a0, 307200
765; RV32IF-NEXT:    fmv.w.x fa5, a0
766; RV32IF-NEXT:    fabs.s fa4, fa0
767; RV32IF-NEXT:    flt.s a0, fa4, fa5
768; RV32IF-NEXT:    beqz a0, .LBB23_2
769; RV32IF-NEXT:  # %bb.1:
770; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
771; RV32IF-NEXT:    fcvt.s.w fa5, a0, rtz
772; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
773; RV32IF-NEXT:  .LBB23_2:
774; RV32IF-NEXT:    addi sp, sp, -16
775; RV32IF-NEXT:    .cfi_def_cfa_offset 16
776; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
777; RV32IF-NEXT:    .cfi_offset ra, -4
778; RV32IF-NEXT:    call __fixunssfdi
779; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
780; RV32IF-NEXT:    .cfi_restore ra
781; RV32IF-NEXT:    addi sp, sp, 16
782; RV32IF-NEXT:    .cfi_def_cfa_offset 0
783; RV32IF-NEXT:    ret
784;
785; RV64IF-LABEL: test_trunc_ui64:
786; RV64IF:       # %bb.0:
787; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
788; RV64IF-NEXT:    ret
789;
790; RV32IZFINX-LABEL: test_trunc_ui64:
791; RV32IZFINX:       # %bb.0:
792; RV32IZFINX-NEXT:    lui a1, 307200
793; RV32IZFINX-NEXT:    fabs.s a2, a0
794; RV32IZFINX-NEXT:    flt.s a1, a2, a1
795; RV32IZFINX-NEXT:    beqz a1, .LBB23_2
796; RV32IZFINX-NEXT:  # %bb.1:
797; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rtz
798; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rtz
799; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
800; RV32IZFINX-NEXT:  .LBB23_2:
801; RV32IZFINX-NEXT:    addi sp, sp, -16
802; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
803; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
804; RV32IZFINX-NEXT:    .cfi_offset ra, -4
805; RV32IZFINX-NEXT:    call __fixunssfdi
806; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
807; RV32IZFINX-NEXT:    .cfi_restore ra
808; RV32IZFINX-NEXT:    addi sp, sp, 16
809; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
810; RV32IZFINX-NEXT:    ret
811;
812; RV64IZFINX-LABEL: test_trunc_ui64:
813; RV64IZFINX:       # %bb.0:
814; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rtz
815; RV64IZFINX-NEXT:    ret
816  %a = call float @llvm.trunc.f32(float %x)
817  %b = fptoui float %a to i64
818  ret i64 %b
819}
820
821define signext i8 @test_round_si8(float %x) {
822; RV32IF-LABEL: test_round_si8:
823; RV32IF:       # %bb.0:
824; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
825; RV32IF-NEXT:    ret
826;
827; RV64IF-LABEL: test_round_si8:
828; RV64IF:       # %bb.0:
829; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
830; RV64IF-NEXT:    ret
831;
832; RV32IZFINX-LABEL: test_round_si8:
833; RV32IZFINX:       # %bb.0:
834; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rmm
835; RV32IZFINX-NEXT:    ret
836;
837; RV64IZFINX-LABEL: test_round_si8:
838; RV64IZFINX:       # %bb.0:
839; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rmm
840; RV64IZFINX-NEXT:    ret
841  %a = call float @llvm.round.f32(float %x)
842  %b = fptosi float %a to i8
843  ret i8 %b
844}
845
846define signext i16 @test_round_si16(float %x) {
847; RV32IF-LABEL: test_round_si16:
848; RV32IF:       # %bb.0:
849; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
850; RV32IF-NEXT:    ret
851;
852; RV64IF-LABEL: test_round_si16:
853; RV64IF:       # %bb.0:
854; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
855; RV64IF-NEXT:    ret
856;
857; RV32IZFINX-LABEL: test_round_si16:
858; RV32IZFINX:       # %bb.0:
859; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rmm
860; RV32IZFINX-NEXT:    ret
861;
862; RV64IZFINX-LABEL: test_round_si16:
863; RV64IZFINX:       # %bb.0:
864; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rmm
865; RV64IZFINX-NEXT:    ret
866  %a = call float @llvm.round.f32(float %x)
867  %b = fptosi float %a to i16
868  ret i16 %b
869}
870
871define signext i32 @test_round_si32(float %x) {
872; RV32IF-LABEL: test_round_si32:
873; RV32IF:       # %bb.0:
874; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
875; RV32IF-NEXT:    ret
876;
877; RV64IF-LABEL: test_round_si32:
878; RV64IF:       # %bb.0:
879; RV64IF-NEXT:    fcvt.w.s a0, fa0, rmm
880; RV64IF-NEXT:    ret
881;
882; RV32IZFINX-LABEL: test_round_si32:
883; RV32IZFINX:       # %bb.0:
884; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rmm
885; RV32IZFINX-NEXT:    ret
886;
887; RV64IZFINX-LABEL: test_round_si32:
888; RV64IZFINX:       # %bb.0:
889; RV64IZFINX-NEXT:    fcvt.w.s a0, a0, rmm
890; RV64IZFINX-NEXT:    ret
891  %a = call float @llvm.round.f32(float %x)
892  %b = fptosi float %a to i32
893  ret i32 %b
894}
895
896define i64 @test_round_si64(float %x) {
897; RV32IF-LABEL: test_round_si64:
898; RV32IF:       # %bb.0:
899; RV32IF-NEXT:    lui a0, 307200
900; RV32IF-NEXT:    fmv.w.x fa5, a0
901; RV32IF-NEXT:    fabs.s fa4, fa0
902; RV32IF-NEXT:    flt.s a0, fa4, fa5
903; RV32IF-NEXT:    beqz a0, .LBB27_2
904; RV32IF-NEXT:  # %bb.1:
905; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
906; RV32IF-NEXT:    fcvt.s.w fa5, a0, rmm
907; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
908; RV32IF-NEXT:  .LBB27_2:
909; RV32IF-NEXT:    addi sp, sp, -16
910; RV32IF-NEXT:    .cfi_def_cfa_offset 16
911; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
912; RV32IF-NEXT:    .cfi_offset ra, -4
913; RV32IF-NEXT:    call __fixsfdi
914; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
915; RV32IF-NEXT:    .cfi_restore ra
916; RV32IF-NEXT:    addi sp, sp, 16
917; RV32IF-NEXT:    .cfi_def_cfa_offset 0
918; RV32IF-NEXT:    ret
919;
920; RV64IF-LABEL: test_round_si64:
921; RV64IF:       # %bb.0:
922; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
923; RV64IF-NEXT:    ret
924;
925; RV32IZFINX-LABEL: test_round_si64:
926; RV32IZFINX:       # %bb.0:
927; RV32IZFINX-NEXT:    lui a1, 307200
928; RV32IZFINX-NEXT:    fabs.s a2, a0
929; RV32IZFINX-NEXT:    flt.s a1, a2, a1
930; RV32IZFINX-NEXT:    beqz a1, .LBB27_2
931; RV32IZFINX-NEXT:  # %bb.1:
932; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rmm
933; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rmm
934; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
935; RV32IZFINX-NEXT:  .LBB27_2:
936; RV32IZFINX-NEXT:    addi sp, sp, -16
937; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
938; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
939; RV32IZFINX-NEXT:    .cfi_offset ra, -4
940; RV32IZFINX-NEXT:    call __fixsfdi
941; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
942; RV32IZFINX-NEXT:    .cfi_restore ra
943; RV32IZFINX-NEXT:    addi sp, sp, 16
944; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
945; RV32IZFINX-NEXT:    ret
946;
947; RV64IZFINX-LABEL: test_round_si64:
948; RV64IZFINX:       # %bb.0:
949; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rmm
950; RV64IZFINX-NEXT:    ret
951  %a = call float @llvm.round.f32(float %x)
952  %b = fptosi float %a to i64
953  ret i64 %b
954}
955
956define zeroext i8 @test_round_ui8(float %x) {
957; RV32IF-LABEL: test_round_ui8:
958; RV32IF:       # %bb.0:
959; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rmm
960; RV32IF-NEXT:    ret
961;
962; RV64IF-LABEL: test_round_ui8:
963; RV64IF:       # %bb.0:
964; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rmm
965; RV64IF-NEXT:    ret
966;
967; RV32IZFINX-LABEL: test_round_ui8:
968; RV32IZFINX:       # %bb.0:
969; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rmm
970; RV32IZFINX-NEXT:    ret
971;
972; RV64IZFINX-LABEL: test_round_ui8:
973; RV64IZFINX:       # %bb.0:
974; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rmm
975; RV64IZFINX-NEXT:    ret
976  %a = call float @llvm.round.f32(float %x)
977  %b = fptoui float %a to i8
978  ret i8 %b
979}
980
981define zeroext i16 @test_round_ui16(float %x) {
982; RV32IF-LABEL: test_round_ui16:
983; RV32IF:       # %bb.0:
984; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rmm
985; RV32IF-NEXT:    ret
986;
987; RV64IF-LABEL: test_round_ui16:
988; RV64IF:       # %bb.0:
989; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rmm
990; RV64IF-NEXT:    ret
991;
992; RV32IZFINX-LABEL: test_round_ui16:
993; RV32IZFINX:       # %bb.0:
994; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rmm
995; RV32IZFINX-NEXT:    ret
996;
997; RV64IZFINX-LABEL: test_round_ui16:
998; RV64IZFINX:       # %bb.0:
999; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rmm
1000; RV64IZFINX-NEXT:    ret
1001  %a = call float @llvm.round.f32(float %x)
1002  %b = fptoui float %a to i16
1003  ret i16 %b
1004}
1005
1006define signext i32 @test_round_ui32(float %x) {
1007; RV32IF-LABEL: test_round_ui32:
1008; RV32IF:       # %bb.0:
1009; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rmm
1010; RV32IF-NEXT:    ret
1011;
1012; RV64IF-LABEL: test_round_ui32:
1013; RV64IF:       # %bb.0:
1014; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rmm
1015; RV64IF-NEXT:    ret
1016;
1017; RV32IZFINX-LABEL: test_round_ui32:
1018; RV32IZFINX:       # %bb.0:
1019; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rmm
1020; RV32IZFINX-NEXT:    ret
1021;
1022; RV64IZFINX-LABEL: test_round_ui32:
1023; RV64IZFINX:       # %bb.0:
1024; RV64IZFINX-NEXT:    fcvt.wu.s a0, a0, rmm
1025; RV64IZFINX-NEXT:    ret
1026  %a = call float @llvm.round.f32(float %x)
1027  %b = fptoui float %a to i32
1028  ret i32 %b
1029}
1030
1031define i64 @test_round_ui64(float %x) {
1032; RV32IF-LABEL: test_round_ui64:
1033; RV32IF:       # %bb.0:
1034; RV32IF-NEXT:    lui a0, 307200
1035; RV32IF-NEXT:    fmv.w.x fa5, a0
1036; RV32IF-NEXT:    fabs.s fa4, fa0
1037; RV32IF-NEXT:    flt.s a0, fa4, fa5
1038; RV32IF-NEXT:    beqz a0, .LBB31_2
1039; RV32IF-NEXT:  # %bb.1:
1040; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
1041; RV32IF-NEXT:    fcvt.s.w fa5, a0, rmm
1042; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
1043; RV32IF-NEXT:  .LBB31_2:
1044; RV32IF-NEXT:    addi sp, sp, -16
1045; RV32IF-NEXT:    .cfi_def_cfa_offset 16
1046; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1047; RV32IF-NEXT:    .cfi_offset ra, -4
1048; RV32IF-NEXT:    call __fixunssfdi
1049; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1050; RV32IF-NEXT:    .cfi_restore ra
1051; RV32IF-NEXT:    addi sp, sp, 16
1052; RV32IF-NEXT:    .cfi_def_cfa_offset 0
1053; RV32IF-NEXT:    ret
1054;
1055; RV64IF-LABEL: test_round_ui64:
1056; RV64IF:       # %bb.0:
1057; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rmm
1058; RV64IF-NEXT:    ret
1059;
1060; RV32IZFINX-LABEL: test_round_ui64:
1061; RV32IZFINX:       # %bb.0:
1062; RV32IZFINX-NEXT:    lui a1, 307200
1063; RV32IZFINX-NEXT:    fabs.s a2, a0
1064; RV32IZFINX-NEXT:    flt.s a1, a2, a1
1065; RV32IZFINX-NEXT:    beqz a1, .LBB31_2
1066; RV32IZFINX-NEXT:  # %bb.1:
1067; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rmm
1068; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rmm
1069; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
1070; RV32IZFINX-NEXT:  .LBB31_2:
1071; RV32IZFINX-NEXT:    addi sp, sp, -16
1072; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
1073; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1074; RV32IZFINX-NEXT:    .cfi_offset ra, -4
1075; RV32IZFINX-NEXT:    call __fixunssfdi
1076; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1077; RV32IZFINX-NEXT:    .cfi_restore ra
1078; RV32IZFINX-NEXT:    addi sp, sp, 16
1079; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
1080; RV32IZFINX-NEXT:    ret
1081;
1082; RV64IZFINX-LABEL: test_round_ui64:
1083; RV64IZFINX:       # %bb.0:
1084; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rmm
1085; RV64IZFINX-NEXT:    ret
1086  %a = call float @llvm.round.f32(float %x)
1087  %b = fptoui float %a to i64
1088  ret i64 %b
1089}
1090
1091define signext i8 @test_roundeven_si8(float %x) {
1092; RV32IF-LABEL: test_roundeven_si8:
1093; RV32IF:       # %bb.0:
1094; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
1095; RV32IF-NEXT:    ret
1096;
1097; RV64IF-LABEL: test_roundeven_si8:
1098; RV64IF:       # %bb.0:
1099; RV64IF-NEXT:    fcvt.l.s a0, fa0, rne
1100; RV64IF-NEXT:    ret
1101;
1102; RV32IZFINX-LABEL: test_roundeven_si8:
1103; RV32IZFINX:       # %bb.0:
1104; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rne
1105; RV32IZFINX-NEXT:    ret
1106;
1107; RV64IZFINX-LABEL: test_roundeven_si8:
1108; RV64IZFINX:       # %bb.0:
1109; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rne
1110; RV64IZFINX-NEXT:    ret
1111  %a = call float @llvm.roundeven.f32(float %x)
1112  %b = fptosi float %a to i8
1113  ret i8 %b
1114}
1115
1116define signext i16 @test_roundeven_si16(float %x) {
1117; RV32IF-LABEL: test_roundeven_si16:
1118; RV32IF:       # %bb.0:
1119; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
1120; RV32IF-NEXT:    ret
1121;
1122; RV64IF-LABEL: test_roundeven_si16:
1123; RV64IF:       # %bb.0:
1124; RV64IF-NEXT:    fcvt.l.s a0, fa0, rne
1125; RV64IF-NEXT:    ret
1126;
1127; RV32IZFINX-LABEL: test_roundeven_si16:
1128; RV32IZFINX:       # %bb.0:
1129; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rne
1130; RV32IZFINX-NEXT:    ret
1131;
1132; RV64IZFINX-LABEL: test_roundeven_si16:
1133; RV64IZFINX:       # %bb.0:
1134; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rne
1135; RV64IZFINX-NEXT:    ret
1136  %a = call float @llvm.roundeven.f32(float %x)
1137  %b = fptosi float %a to i16
1138  ret i16 %b
1139}
1140
1141define signext i32 @test_roundeven_si32(float %x) {
1142; RV32IF-LABEL: test_roundeven_si32:
1143; RV32IF:       # %bb.0:
1144; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
1145; RV32IF-NEXT:    ret
1146;
1147; RV64IF-LABEL: test_roundeven_si32:
1148; RV64IF:       # %bb.0:
1149; RV64IF-NEXT:    fcvt.w.s a0, fa0, rne
1150; RV64IF-NEXT:    ret
1151;
1152; RV32IZFINX-LABEL: test_roundeven_si32:
1153; RV32IZFINX:       # %bb.0:
1154; RV32IZFINX-NEXT:    fcvt.w.s a0, a0, rne
1155; RV32IZFINX-NEXT:    ret
1156;
1157; RV64IZFINX-LABEL: test_roundeven_si32:
1158; RV64IZFINX:       # %bb.0:
1159; RV64IZFINX-NEXT:    fcvt.w.s a0, a0, rne
1160; RV64IZFINX-NEXT:    ret
1161  %a = call float @llvm.roundeven.f32(float %x)
1162  %b = fptosi float %a to i32
1163  ret i32 %b
1164}
1165
1166define i64 @test_roundeven_si64(float %x) {
1167; RV32IF-LABEL: test_roundeven_si64:
1168; RV32IF:       # %bb.0:
1169; RV32IF-NEXT:    lui a0, 307200
1170; RV32IF-NEXT:    fmv.w.x fa5, a0
1171; RV32IF-NEXT:    fabs.s fa4, fa0
1172; RV32IF-NEXT:    flt.s a0, fa4, fa5
1173; RV32IF-NEXT:    beqz a0, .LBB35_2
1174; RV32IF-NEXT:  # %bb.1:
1175; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
1176; RV32IF-NEXT:    fcvt.s.w fa5, a0, rne
1177; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
1178; RV32IF-NEXT:  .LBB35_2:
1179; RV32IF-NEXT:    addi sp, sp, -16
1180; RV32IF-NEXT:    .cfi_def_cfa_offset 16
1181; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1182; RV32IF-NEXT:    .cfi_offset ra, -4
1183; RV32IF-NEXT:    call __fixsfdi
1184; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1185; RV32IF-NEXT:    .cfi_restore ra
1186; RV32IF-NEXT:    addi sp, sp, 16
1187; RV32IF-NEXT:    .cfi_def_cfa_offset 0
1188; RV32IF-NEXT:    ret
1189;
1190; RV64IF-LABEL: test_roundeven_si64:
1191; RV64IF:       # %bb.0:
1192; RV64IF-NEXT:    fcvt.l.s a0, fa0, rne
1193; RV64IF-NEXT:    ret
1194;
1195; RV32IZFINX-LABEL: test_roundeven_si64:
1196; RV32IZFINX:       # %bb.0:
1197; RV32IZFINX-NEXT:    lui a1, 307200
1198; RV32IZFINX-NEXT:    fabs.s a2, a0
1199; RV32IZFINX-NEXT:    flt.s a1, a2, a1
1200; RV32IZFINX-NEXT:    beqz a1, .LBB35_2
1201; RV32IZFINX-NEXT:  # %bb.1:
1202; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rne
1203; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rne
1204; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
1205; RV32IZFINX-NEXT:  .LBB35_2:
1206; RV32IZFINX-NEXT:    addi sp, sp, -16
1207; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
1208; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1209; RV32IZFINX-NEXT:    .cfi_offset ra, -4
1210; RV32IZFINX-NEXT:    call __fixsfdi
1211; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1212; RV32IZFINX-NEXT:    .cfi_restore ra
1213; RV32IZFINX-NEXT:    addi sp, sp, 16
1214; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
1215; RV32IZFINX-NEXT:    ret
1216;
1217; RV64IZFINX-LABEL: test_roundeven_si64:
1218; RV64IZFINX:       # %bb.0:
1219; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rne
1220; RV64IZFINX-NEXT:    ret
1221  %a = call float @llvm.roundeven.f32(float %x)
1222  %b = fptosi float %a to i64
1223  ret i64 %b
1224}
1225
1226define zeroext i8 @test_roundeven_ui8(float %x) {
1227; RV32IF-LABEL: test_roundeven_ui8:
1228; RV32IF:       # %bb.0:
1229; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rne
1230; RV32IF-NEXT:    ret
1231;
1232; RV64IF-LABEL: test_roundeven_ui8:
1233; RV64IF:       # %bb.0:
1234; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rne
1235; RV64IF-NEXT:    ret
1236;
1237; RV32IZFINX-LABEL: test_roundeven_ui8:
1238; RV32IZFINX:       # %bb.0:
1239; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rne
1240; RV32IZFINX-NEXT:    ret
1241;
1242; RV64IZFINX-LABEL: test_roundeven_ui8:
1243; RV64IZFINX:       # %bb.0:
1244; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rne
1245; RV64IZFINX-NEXT:    ret
1246  %a = call float @llvm.roundeven.f32(float %x)
1247  %b = fptoui float %a to i8
1248  ret i8 %b
1249}
1250
1251define zeroext i16 @test_roundeven_ui16(float %x) {
1252; RV32IF-LABEL: test_roundeven_ui16:
1253; RV32IF:       # %bb.0:
1254; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rne
1255; RV32IF-NEXT:    ret
1256;
1257; RV64IF-LABEL: test_roundeven_ui16:
1258; RV64IF:       # %bb.0:
1259; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rne
1260; RV64IF-NEXT:    ret
1261;
1262; RV32IZFINX-LABEL: test_roundeven_ui16:
1263; RV32IZFINX:       # %bb.0:
1264; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rne
1265; RV32IZFINX-NEXT:    ret
1266;
1267; RV64IZFINX-LABEL: test_roundeven_ui16:
1268; RV64IZFINX:       # %bb.0:
1269; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rne
1270; RV64IZFINX-NEXT:    ret
1271  %a = call float @llvm.roundeven.f32(float %x)
1272  %b = fptoui float %a to i16
1273  ret i16 %b
1274}
1275
1276define signext i32 @test_roundeven_ui32(float %x) {
1277; RV32IF-LABEL: test_roundeven_ui32:
1278; RV32IF:       # %bb.0:
1279; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rne
1280; RV32IF-NEXT:    ret
1281;
1282; RV64IF-LABEL: test_roundeven_ui32:
1283; RV64IF:       # %bb.0:
1284; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rne
1285; RV64IF-NEXT:    ret
1286;
1287; RV32IZFINX-LABEL: test_roundeven_ui32:
1288; RV32IZFINX:       # %bb.0:
1289; RV32IZFINX-NEXT:    fcvt.wu.s a0, a0, rne
1290; RV32IZFINX-NEXT:    ret
1291;
1292; RV64IZFINX-LABEL: test_roundeven_ui32:
1293; RV64IZFINX:       # %bb.0:
1294; RV64IZFINX-NEXT:    fcvt.wu.s a0, a0, rne
1295; RV64IZFINX-NEXT:    ret
1296  %a = call float @llvm.roundeven.f32(float %x)
1297  %b = fptoui float %a to i32
1298  ret i32 %b
1299}
1300
1301define i64 @test_roundeven_ui64(float %x) {
1302; RV32IF-LABEL: test_roundeven_ui64:
1303; RV32IF:       # %bb.0:
1304; RV32IF-NEXT:    lui a0, 307200
1305; RV32IF-NEXT:    fmv.w.x fa5, a0
1306; RV32IF-NEXT:    fabs.s fa4, fa0
1307; RV32IF-NEXT:    flt.s a0, fa4, fa5
1308; RV32IF-NEXT:    beqz a0, .LBB39_2
1309; RV32IF-NEXT:  # %bb.1:
1310; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
1311; RV32IF-NEXT:    fcvt.s.w fa5, a0, rne
1312; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
1313; RV32IF-NEXT:  .LBB39_2:
1314; RV32IF-NEXT:    addi sp, sp, -16
1315; RV32IF-NEXT:    .cfi_def_cfa_offset 16
1316; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1317; RV32IF-NEXT:    .cfi_offset ra, -4
1318; RV32IF-NEXT:    call __fixunssfdi
1319; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1320; RV32IF-NEXT:    .cfi_restore ra
1321; RV32IF-NEXT:    addi sp, sp, 16
1322; RV32IF-NEXT:    .cfi_def_cfa_offset 0
1323; RV32IF-NEXT:    ret
1324;
1325; RV64IF-LABEL: test_roundeven_ui64:
1326; RV64IF:       # %bb.0:
1327; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rne
1328; RV64IF-NEXT:    ret
1329;
1330; RV32IZFINX-LABEL: test_roundeven_ui64:
1331; RV32IZFINX:       # %bb.0:
1332; RV32IZFINX-NEXT:    lui a1, 307200
1333; RV32IZFINX-NEXT:    fabs.s a2, a0
1334; RV32IZFINX-NEXT:    flt.s a1, a2, a1
1335; RV32IZFINX-NEXT:    beqz a1, .LBB39_2
1336; RV32IZFINX-NEXT:  # %bb.1:
1337; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rne
1338; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rne
1339; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
1340; RV32IZFINX-NEXT:  .LBB39_2:
1341; RV32IZFINX-NEXT:    addi sp, sp, -16
1342; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 16
1343; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1344; RV32IZFINX-NEXT:    .cfi_offset ra, -4
1345; RV32IZFINX-NEXT:    call __fixunssfdi
1346; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1347; RV32IZFINX-NEXT:    .cfi_restore ra
1348; RV32IZFINX-NEXT:    addi sp, sp, 16
1349; RV32IZFINX-NEXT:    .cfi_def_cfa_offset 0
1350; RV32IZFINX-NEXT:    ret
1351;
1352; RV64IZFINX-LABEL: test_roundeven_ui64:
1353; RV64IZFINX:       # %bb.0:
1354; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rne
1355; RV64IZFINX-NEXT:    ret
1356  %a = call float @llvm.roundeven.f32(float %x)
1357  %b = fptoui float %a to i64
1358  ret i64 %b
1359}
1360
1361define float @test_floor_float(float %x) {
1362; RV32IFD-LABEL: test_floor_float:
1363; RV32IFD:       # %bb.0:
1364; RV32IFD-NEXT:    addi sp, sp, -16
1365; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
1366; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1367; RV32IFD-NEXT:    .cfi_offset ra, -4
1368; RV32IFD-NEXT:    call floor@plt
1369; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1370; RV32IFD-NEXT:    addi sp, sp, 16
1371; RV32IFD-NEXT:    ret
1372;
1373; RV64IFD-LABEL: test_floor_float:
1374; RV64IFD:       # %bb.0:
1375; RV64IFD-NEXT:    addi sp, sp, -16
1376; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
1377; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1378; RV64IFD-NEXT:    .cfi_offset ra, -8
1379; RV64IFD-NEXT:    call floor@plt
1380; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1381; RV64IFD-NEXT:    addi sp, sp, 16
1382; RV64IFD-NEXT:    ret
1383; RV32IF-LABEL: test_floor_float:
1384; RV32IF:       # %bb.0:
1385; RV32IF-NEXT:    lui a0, 307200
1386; RV32IF-NEXT:    fmv.w.x fa5, a0
1387; RV32IF-NEXT:    fabs.s fa4, fa0
1388; RV32IF-NEXT:    flt.s a0, fa4, fa5
1389; RV32IF-NEXT:    beqz a0, .LBB40_2
1390; RV32IF-NEXT:  # %bb.1:
1391; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
1392; RV32IF-NEXT:    fcvt.s.w fa5, a0, rdn
1393; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
1394; RV32IF-NEXT:  .LBB40_2:
1395; RV32IF-NEXT:    ret
1396;
1397; RV64IF-LABEL: test_floor_float:
1398; RV64IF:       # %bb.0:
1399; RV64IF-NEXT:    lui a0, 307200
1400; RV64IF-NEXT:    fmv.w.x fa5, a0
1401; RV64IF-NEXT:    fabs.s fa4, fa0
1402; RV64IF-NEXT:    flt.s a0, fa4, fa5
1403; RV64IF-NEXT:    beqz a0, .LBB40_2
1404; RV64IF-NEXT:  # %bb.1:
1405; RV64IF-NEXT:    fcvt.w.s a0, fa0, rdn
1406; RV64IF-NEXT:    fcvt.s.w fa5, a0, rdn
1407; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
1408; RV64IF-NEXT:  .LBB40_2:
1409; RV64IF-NEXT:    ret
1410;
1411; RV32IZFINX-LABEL: test_floor_float:
1412; RV32IZFINX:       # %bb.0:
1413; RV32IZFINX-NEXT:    lui a1, 307200
1414; RV32IZFINX-NEXT:    fabs.s a2, a0
1415; RV32IZFINX-NEXT:    flt.s a1, a2, a1
1416; RV32IZFINX-NEXT:    beqz a1, .LBB40_2
1417; RV32IZFINX-NEXT:  # %bb.1:
1418; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rdn
1419; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rdn
1420; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
1421; RV32IZFINX-NEXT:  .LBB40_2:
1422; RV32IZFINX-NEXT:    ret
1423;
1424; RV64IZFINX-LABEL: test_floor_float:
1425; RV64IZFINX:       # %bb.0:
1426; RV64IZFINX-NEXT:    lui a1, 307200
1427; RV64IZFINX-NEXT:    fabs.s a2, a0
1428; RV64IZFINX-NEXT:    flt.s a1, a2, a1
1429; RV64IZFINX-NEXT:    beqz a1, .LBB40_2
1430; RV64IZFINX-NEXT:  # %bb.1:
1431; RV64IZFINX-NEXT:    fcvt.w.s a1, a0, rdn
1432; RV64IZFINX-NEXT:    fcvt.s.w a1, a1, rdn
1433; RV64IZFINX-NEXT:    fsgnj.s a0, a1, a0
1434; RV64IZFINX-NEXT:  .LBB40_2:
1435; RV64IZFINX-NEXT:    ret
1436  %a = call float @llvm.floor.f32(float %x)
1437  ret float %a
1438}
1439
1440define float @test_ceil_float(float %x) {
1441; RV32IFD-LABEL: test_ceil_float:
1442; RV32IFD:       # %bb.0:
1443; RV32IFD-NEXT:    addi sp, sp, -16
1444; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
1445; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1446; RV32IFD-NEXT:    .cfi_offset ra, -4
1447; RV32IFD-NEXT:    call ceil@plt
1448; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1449; RV32IFD-NEXT:    addi sp, sp, 16
1450; RV32IFD-NEXT:    ret
1451;
1452; RV64IFD-LABEL: test_ceil_float:
1453; RV64IFD:       # %bb.0:
1454; RV64IFD-NEXT:    addi sp, sp, -16
1455; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
1456; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1457; RV64IFD-NEXT:    .cfi_offset ra, -8
1458; RV64IFD-NEXT:    call ceil@plt
1459; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1460; RV64IFD-NEXT:    addi sp, sp, 16
1461; RV64IFD-NEXT:    ret
1462; RV32IF-LABEL: test_ceil_float:
1463; RV32IF:       # %bb.0:
1464; RV32IF-NEXT:    lui a0, 307200
1465; RV32IF-NEXT:    fmv.w.x fa5, a0
1466; RV32IF-NEXT:    fabs.s fa4, fa0
1467; RV32IF-NEXT:    flt.s a0, fa4, fa5
1468; RV32IF-NEXT:    beqz a0, .LBB41_2
1469; RV32IF-NEXT:  # %bb.1:
1470; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
1471; RV32IF-NEXT:    fcvt.s.w fa5, a0, rup
1472; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
1473; RV32IF-NEXT:  .LBB41_2:
1474; RV32IF-NEXT:    ret
1475;
1476; RV64IF-LABEL: test_ceil_float:
1477; RV64IF:       # %bb.0:
1478; RV64IF-NEXT:    lui a0, 307200
1479; RV64IF-NEXT:    fmv.w.x fa5, a0
1480; RV64IF-NEXT:    fabs.s fa4, fa0
1481; RV64IF-NEXT:    flt.s a0, fa4, fa5
1482; RV64IF-NEXT:    beqz a0, .LBB41_2
1483; RV64IF-NEXT:  # %bb.1:
1484; RV64IF-NEXT:    fcvt.w.s a0, fa0, rup
1485; RV64IF-NEXT:    fcvt.s.w fa5, a0, rup
1486; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
1487; RV64IF-NEXT:  .LBB41_2:
1488; RV64IF-NEXT:    ret
1489;
1490; RV32IZFINX-LABEL: test_ceil_float:
1491; RV32IZFINX:       # %bb.0:
1492; RV32IZFINX-NEXT:    lui a1, 307200
1493; RV32IZFINX-NEXT:    fabs.s a2, a0
1494; RV32IZFINX-NEXT:    flt.s a1, a2, a1
1495; RV32IZFINX-NEXT:    beqz a1, .LBB41_2
1496; RV32IZFINX-NEXT:  # %bb.1:
1497; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rup
1498; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rup
1499; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
1500; RV32IZFINX-NEXT:  .LBB41_2:
1501; RV32IZFINX-NEXT:    ret
1502;
1503; RV64IZFINX-LABEL: test_ceil_float:
1504; RV64IZFINX:       # %bb.0:
1505; RV64IZFINX-NEXT:    lui a1, 307200
1506; RV64IZFINX-NEXT:    fabs.s a2, a0
1507; RV64IZFINX-NEXT:    flt.s a1, a2, a1
1508; RV64IZFINX-NEXT:    beqz a1, .LBB41_2
1509; RV64IZFINX-NEXT:  # %bb.1:
1510; RV64IZFINX-NEXT:    fcvt.w.s a1, a0, rup
1511; RV64IZFINX-NEXT:    fcvt.s.w a1, a1, rup
1512; RV64IZFINX-NEXT:    fsgnj.s a0, a1, a0
1513; RV64IZFINX-NEXT:  .LBB41_2:
1514; RV64IZFINX-NEXT:    ret
1515  %a = call float @llvm.ceil.f32(float %x)
1516  ret float %a
1517}
1518
1519define float @test_trunc_float(float %x) {
1520; RV32IFD-LABEL: test_trunc_float:
1521; RV32IFD:       # %bb.0:
1522; RV32IFD-NEXT:    addi sp, sp, -16
1523; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
1524; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1525; RV32IFD-NEXT:    .cfi_offset ra, -4
1526; RV32IFD-NEXT:    call trunc@plt
1527; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1528; RV32IFD-NEXT:    addi sp, sp, 16
1529; RV32IFD-NEXT:    ret
1530;
1531; RV64IFD-LABEL: test_trunc_float:
1532; RV64IFD:       # %bb.0:
1533; RV64IFD-NEXT:    addi sp, sp, -16
1534; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
1535; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1536; RV64IFD-NEXT:    .cfi_offset ra, -8
1537; RV64IFD-NEXT:    call trunc@plt
1538; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1539; RV64IFD-NEXT:    addi sp, sp, 16
1540; RV64IFD-NEXT:    ret
1541; RV32IF-LABEL: test_trunc_float:
1542; RV32IF:       # %bb.0:
1543; RV32IF-NEXT:    lui a0, 307200
1544; RV32IF-NEXT:    fmv.w.x fa5, a0
1545; RV32IF-NEXT:    fabs.s fa4, fa0
1546; RV32IF-NEXT:    flt.s a0, fa4, fa5
1547; RV32IF-NEXT:    beqz a0, .LBB42_2
1548; RV32IF-NEXT:  # %bb.1:
1549; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
1550; RV32IF-NEXT:    fcvt.s.w fa5, a0, rtz
1551; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
1552; RV32IF-NEXT:  .LBB42_2:
1553; RV32IF-NEXT:    ret
1554;
1555; RV64IF-LABEL: test_trunc_float:
1556; RV64IF:       # %bb.0:
1557; RV64IF-NEXT:    lui a0, 307200
1558; RV64IF-NEXT:    fmv.w.x fa5, a0
1559; RV64IF-NEXT:    fabs.s fa4, fa0
1560; RV64IF-NEXT:    flt.s a0, fa4, fa5
1561; RV64IF-NEXT:    beqz a0, .LBB42_2
1562; RV64IF-NEXT:  # %bb.1:
1563; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
1564; RV64IF-NEXT:    fcvt.s.w fa5, a0, rtz
1565; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
1566; RV64IF-NEXT:  .LBB42_2:
1567; RV64IF-NEXT:    ret
1568;
1569; RV32IZFINX-LABEL: test_trunc_float:
1570; RV32IZFINX:       # %bb.0:
1571; RV32IZFINX-NEXT:    lui a1, 307200
1572; RV32IZFINX-NEXT:    fabs.s a2, a0
1573; RV32IZFINX-NEXT:    flt.s a1, a2, a1
1574; RV32IZFINX-NEXT:    beqz a1, .LBB42_2
1575; RV32IZFINX-NEXT:  # %bb.1:
1576; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rtz
1577; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rtz
1578; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
1579; RV32IZFINX-NEXT:  .LBB42_2:
1580; RV32IZFINX-NEXT:    ret
1581;
1582; RV64IZFINX-LABEL: test_trunc_float:
1583; RV64IZFINX:       # %bb.0:
1584; RV64IZFINX-NEXT:    lui a1, 307200
1585; RV64IZFINX-NEXT:    fabs.s a2, a0
1586; RV64IZFINX-NEXT:    flt.s a1, a2, a1
1587; RV64IZFINX-NEXT:    beqz a1, .LBB42_2
1588; RV64IZFINX-NEXT:  # %bb.1:
1589; RV64IZFINX-NEXT:    fcvt.w.s a1, a0, rtz
1590; RV64IZFINX-NEXT:    fcvt.s.w a1, a1, rtz
1591; RV64IZFINX-NEXT:    fsgnj.s a0, a1, a0
1592; RV64IZFINX-NEXT:  .LBB42_2:
1593; RV64IZFINX-NEXT:    ret
1594  %a = call float @llvm.trunc.f32(float %x)
1595  ret float %a
1596}
1597
1598define float @test_round_float(float %x) {
1599; RV32IFD-LABEL: test_round_float:
1600; RV32IFD:       # %bb.0:
1601; RV32IFD-NEXT:    addi sp, sp, -16
1602; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
1603; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1604; RV32IFD-NEXT:    .cfi_offset ra, -4
1605; RV32IFD-NEXT:    call round@plt
1606; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1607; RV32IFD-NEXT:    addi sp, sp, 16
1608; RV32IFD-NEXT:    ret
1609;
1610; RV64IFD-LABEL: test_round_float:
1611; RV64IFD:       # %bb.0:
1612; RV64IFD-NEXT:    addi sp, sp, -16
1613; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
1614; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1615; RV64IFD-NEXT:    .cfi_offset ra, -8
1616; RV64IFD-NEXT:    call round@plt
1617; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1618; RV64IFD-NEXT:    addi sp, sp, 16
1619; RV64IFD-NEXT:    ret
1620; RV32IF-LABEL: test_round_float:
1621; RV32IF:       # %bb.0:
1622; RV32IF-NEXT:    lui a0, 307200
1623; RV32IF-NEXT:    fmv.w.x fa5, a0
1624; RV32IF-NEXT:    fabs.s fa4, fa0
1625; RV32IF-NEXT:    flt.s a0, fa4, fa5
1626; RV32IF-NEXT:    beqz a0, .LBB43_2
1627; RV32IF-NEXT:  # %bb.1:
1628; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
1629; RV32IF-NEXT:    fcvt.s.w fa5, a0, rmm
1630; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
1631; RV32IF-NEXT:  .LBB43_2:
1632; RV32IF-NEXT:    ret
1633;
1634; RV64IF-LABEL: test_round_float:
1635; RV64IF:       # %bb.0:
1636; RV64IF-NEXT:    lui a0, 307200
1637; RV64IF-NEXT:    fmv.w.x fa5, a0
1638; RV64IF-NEXT:    fabs.s fa4, fa0
1639; RV64IF-NEXT:    flt.s a0, fa4, fa5
1640; RV64IF-NEXT:    beqz a0, .LBB43_2
1641; RV64IF-NEXT:  # %bb.1:
1642; RV64IF-NEXT:    fcvt.w.s a0, fa0, rmm
1643; RV64IF-NEXT:    fcvt.s.w fa5, a0, rmm
1644; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
1645; RV64IF-NEXT:  .LBB43_2:
1646; RV64IF-NEXT:    ret
1647;
1648; RV32IZFINX-LABEL: test_round_float:
1649; RV32IZFINX:       # %bb.0:
1650; RV32IZFINX-NEXT:    lui a1, 307200
1651; RV32IZFINX-NEXT:    fabs.s a2, a0
1652; RV32IZFINX-NEXT:    flt.s a1, a2, a1
1653; RV32IZFINX-NEXT:    beqz a1, .LBB43_2
1654; RV32IZFINX-NEXT:  # %bb.1:
1655; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rmm
1656; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rmm
1657; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
1658; RV32IZFINX-NEXT:  .LBB43_2:
1659; RV32IZFINX-NEXT:    ret
1660;
1661; RV64IZFINX-LABEL: test_round_float:
1662; RV64IZFINX:       # %bb.0:
1663; RV64IZFINX-NEXT:    lui a1, 307200
1664; RV64IZFINX-NEXT:    fabs.s a2, a0
1665; RV64IZFINX-NEXT:    flt.s a1, a2, a1
1666; RV64IZFINX-NEXT:    beqz a1, .LBB43_2
1667; RV64IZFINX-NEXT:  # %bb.1:
1668; RV64IZFINX-NEXT:    fcvt.w.s a1, a0, rmm
1669; RV64IZFINX-NEXT:    fcvt.s.w a1, a1, rmm
1670; RV64IZFINX-NEXT:    fsgnj.s a0, a1, a0
1671; RV64IZFINX-NEXT:  .LBB43_2:
1672; RV64IZFINX-NEXT:    ret
1673  %a = call float @llvm.round.f32(float %x)
1674  ret float %a
1675}
1676
1677define float @test_roundeven_float(float %x) {
1678; RV32IFD-LABEL: test_roundeven_float:
1679; RV32IFD:       # %bb.0:
1680; RV32IFD-NEXT:    addi sp, sp, -16
1681; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
1682; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1683; RV32IFD-NEXT:    .cfi_offset ra, -4
1684; RV32IFD-NEXT:    call roundeven@plt
1685; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1686; RV32IFD-NEXT:    addi sp, sp, 16
1687; RV32IFD-NEXT:    ret
1688;
1689; RV64IFD-LABEL: test_roundeven_float:
1690; RV64IFD:       # %bb.0:
1691; RV64IFD-NEXT:    addi sp, sp, -16
1692; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
1693; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1694; RV64IFD-NEXT:    .cfi_offset ra, -8
1695; RV64IFD-NEXT:    call roundeven@plt
1696; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1697; RV64IFD-NEXT:    addi sp, sp, 16
1698; RV64IFD-NEXT:    ret
1699; RV32IF-LABEL: test_roundeven_float:
1700; RV32IF:       # %bb.0:
1701; RV32IF-NEXT:    lui a0, 307200
1702; RV32IF-NEXT:    fmv.w.x fa5, a0
1703; RV32IF-NEXT:    fabs.s fa4, fa0
1704; RV32IF-NEXT:    flt.s a0, fa4, fa5
1705; RV32IF-NEXT:    beqz a0, .LBB44_2
1706; RV32IF-NEXT:  # %bb.1:
1707; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
1708; RV32IF-NEXT:    fcvt.s.w fa5, a0, rne
1709; RV32IF-NEXT:    fsgnj.s fa0, fa5, fa0
1710; RV32IF-NEXT:  .LBB44_2:
1711; RV32IF-NEXT:    ret
1712;
1713; RV64IF-LABEL: test_roundeven_float:
1714; RV64IF:       # %bb.0:
1715; RV64IF-NEXT:    lui a0, 307200
1716; RV64IF-NEXT:    fmv.w.x fa5, a0
1717; RV64IF-NEXT:    fabs.s fa4, fa0
1718; RV64IF-NEXT:    flt.s a0, fa4, fa5
1719; RV64IF-NEXT:    beqz a0, .LBB44_2
1720; RV64IF-NEXT:  # %bb.1:
1721; RV64IF-NEXT:    fcvt.w.s a0, fa0, rne
1722; RV64IF-NEXT:    fcvt.s.w fa5, a0, rne
1723; RV64IF-NEXT:    fsgnj.s fa0, fa5, fa0
1724; RV64IF-NEXT:  .LBB44_2:
1725; RV64IF-NEXT:    ret
1726;
1727; RV32IZFINX-LABEL: test_roundeven_float:
1728; RV32IZFINX:       # %bb.0:
1729; RV32IZFINX-NEXT:    lui a1, 307200
1730; RV32IZFINX-NEXT:    fabs.s a2, a0
1731; RV32IZFINX-NEXT:    flt.s a1, a2, a1
1732; RV32IZFINX-NEXT:    beqz a1, .LBB44_2
1733; RV32IZFINX-NEXT:  # %bb.1:
1734; RV32IZFINX-NEXT:    fcvt.w.s a1, a0, rne
1735; RV32IZFINX-NEXT:    fcvt.s.w a1, a1, rne
1736; RV32IZFINX-NEXT:    fsgnj.s a0, a1, a0
1737; RV32IZFINX-NEXT:  .LBB44_2:
1738; RV32IZFINX-NEXT:    ret
1739;
1740; RV64IZFINX-LABEL: test_roundeven_float:
1741; RV64IZFINX:       # %bb.0:
1742; RV64IZFINX-NEXT:    lui a1, 307200
1743; RV64IZFINX-NEXT:    fabs.s a2, a0
1744; RV64IZFINX-NEXT:    flt.s a1, a2, a1
1745; RV64IZFINX-NEXT:    beqz a1, .LBB44_2
1746; RV64IZFINX-NEXT:  # %bb.1:
1747; RV64IZFINX-NEXT:    fcvt.w.s a1, a0, rne
1748; RV64IZFINX-NEXT:    fcvt.s.w a1, a1, rne
1749; RV64IZFINX-NEXT:    fsgnj.s a0, a1, a0
1750; RV64IZFINX-NEXT:  .LBB44_2:
1751; RV64IZFINX-NEXT:    ret
1752  %a = call float @llvm.roundeven.f32(float %x)
1753  ret float %a
1754}
1755
1756declare float @llvm.floor.f32(float)
1757declare float @llvm.ceil.f32(float)
1758declare float @llvm.trunc.f32(float)
1759declare float @llvm.round.f32(float)
1760declare float @llvm.roundeven.f32(float)
1761