xref: /llvm-project/llvm/test/CodeGen/RISCV/double-round-conv.ll (revision 97982a8c605fac7c86d02e641a6cd7898b3ca343)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3; RUN:   -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
4; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
5; RUN:   -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
6; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
7; RUN:   -target-abi=ilp32 | FileCheck -check-prefixes=RV32IZFINXZDINX %s
8; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
9; RUN:   -target-abi=lp64 | FileCheck -check-prefixes=RV64IZFINXZDINX %s
10
11define signext i8 @test_floor_si8(double %x) {
12; RV32IFD-LABEL: test_floor_si8:
13; RV32IFD:       # %bb.0:
14; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rdn
15; RV32IFD-NEXT:    ret
16;
17; RV64IFD-LABEL: test_floor_si8:
18; RV64IFD:       # %bb.0:
19; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
20; RV64IFD-NEXT:    ret
21;
22; RV32IZFINXZDINX-LABEL: test_floor_si8:
23; RV32IZFINXZDINX:       # %bb.0:
24; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rdn
25; RV32IZFINXZDINX-NEXT:    ret
26;
27; RV64IZFINXZDINX-LABEL: test_floor_si8:
28; RV64IZFINXZDINX:       # %bb.0:
29; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rdn
30; RV64IZFINXZDINX-NEXT:    ret
31  %a = call double @llvm.floor.f64(double %x)
32  %b = fptosi double %a to i8
33  ret i8 %b
34}
35
36define signext i16 @test_floor_si16(double %x) {
37; RV32IFD-LABEL: test_floor_si16:
38; RV32IFD:       # %bb.0:
39; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rdn
40; RV32IFD-NEXT:    ret
41;
42; RV64IFD-LABEL: test_floor_si16:
43; RV64IFD:       # %bb.0:
44; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
45; RV64IFD-NEXT:    ret
46;
47; RV32IZFINXZDINX-LABEL: test_floor_si16:
48; RV32IZFINXZDINX:       # %bb.0:
49; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rdn
50; RV32IZFINXZDINX-NEXT:    ret
51;
52; RV64IZFINXZDINX-LABEL: test_floor_si16:
53; RV64IZFINXZDINX:       # %bb.0:
54; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rdn
55; RV64IZFINXZDINX-NEXT:    ret
56  %a = call double @llvm.floor.f64(double %x)
57  %b = fptosi double %a to i16
58  ret i16 %b
59}
60
61define signext i32 @test_floor_si32(double %x) {
62; CHECKIFD-LABEL: test_floor_si32:
63; CHECKIFD:       # %bb.0:
64; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rdn
65; CHECKIFD-NEXT:    ret
66;
67; RV32IZFINXZDINX-LABEL: test_floor_si32:
68; RV32IZFINXZDINX:       # %bb.0:
69; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rdn
70; RV32IZFINXZDINX-NEXT:    ret
71;
72; RV64IZFINXZDINX-LABEL: test_floor_si32:
73; RV64IZFINXZDINX:       # %bb.0:
74; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rdn
75; RV64IZFINXZDINX-NEXT:    ret
76  %a = call double @llvm.floor.f64(double %x)
77  %b = fptosi double %a to i32
78  ret i32 %b
79}
80
81define i64 @test_floor_si64(double %x) {
82; RV32IFD-LABEL: test_floor_si64:
83; RV32IFD:       # %bb.0:
84; RV32IFD-NEXT:    addi sp, sp, -16
85; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
86; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
87; RV32IFD-NEXT:    .cfi_offset ra, -4
88; RV32IFD-NEXT:    call floor
89; RV32IFD-NEXT:    call __fixdfdi
90; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
91; RV32IFD-NEXT:    .cfi_restore ra
92; RV32IFD-NEXT:    addi sp, sp, 16
93; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
94; RV32IFD-NEXT:    ret
95;
96; RV64IFD-LABEL: test_floor_si64:
97; RV64IFD:       # %bb.0:
98; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
99; RV64IFD-NEXT:    ret
100;
101; RV32IZFINXZDINX-LABEL: test_floor_si64:
102; RV32IZFINXZDINX:       # %bb.0:
103; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
104; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
105; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
106; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
107; RV32IZFINXZDINX-NEXT:    call floor
108; RV32IZFINXZDINX-NEXT:    call __fixdfdi
109; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
110; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
111; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
112; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
113; RV32IZFINXZDINX-NEXT:    ret
114;
115; RV64IZFINXZDINX-LABEL: test_floor_si64:
116; RV64IZFINXZDINX:       # %bb.0:
117; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rdn
118; RV64IZFINXZDINX-NEXT:    ret
119  %a = call double @llvm.floor.f64(double %x)
120  %b = fptosi double %a to i64
121  ret i64 %b
122}
123
124define zeroext i8 @test_floor_ui8(double %x) {
125; RV32IFD-LABEL: test_floor_ui8:
126; RV32IFD:       # %bb.0:
127; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rdn
128; RV32IFD-NEXT:    ret
129;
130; RV64IFD-LABEL: test_floor_ui8:
131; RV64IFD:       # %bb.0:
132; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rdn
133; RV64IFD-NEXT:    ret
134;
135; RV32IZFINXZDINX-LABEL: test_floor_ui8:
136; RV32IZFINXZDINX:       # %bb.0:
137; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rdn
138; RV32IZFINXZDINX-NEXT:    ret
139;
140; RV64IZFINXZDINX-LABEL: test_floor_ui8:
141; RV64IZFINXZDINX:       # %bb.0:
142; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rdn
143; RV64IZFINXZDINX-NEXT:    ret
144  %a = call double @llvm.floor.f64(double %x)
145  %b = fptoui double %a to i8
146  ret i8 %b
147}
148
149define zeroext i16 @test_floor_ui16(double %x) {
150; RV32IFD-LABEL: test_floor_ui16:
151; RV32IFD:       # %bb.0:
152; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rdn
153; RV32IFD-NEXT:    ret
154;
155; RV64IFD-LABEL: test_floor_ui16:
156; RV64IFD:       # %bb.0:
157; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rdn
158; RV64IFD-NEXT:    ret
159;
160; RV32IZFINXZDINX-LABEL: test_floor_ui16:
161; RV32IZFINXZDINX:       # %bb.0:
162; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rdn
163; RV32IZFINXZDINX-NEXT:    ret
164;
165; RV64IZFINXZDINX-LABEL: test_floor_ui16:
166; RV64IZFINXZDINX:       # %bb.0:
167; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rdn
168; RV64IZFINXZDINX-NEXT:    ret
169  %a = call double @llvm.floor.f64(double %x)
170  %b = fptoui double %a to i16
171  ret i16 %b
172}
173
174define signext i32 @test_floor_ui32(double %x) {
175; CHECKIFD-LABEL: test_floor_ui32:
176; CHECKIFD:       # %bb.0:
177; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rdn
178; CHECKIFD-NEXT:    ret
179;
180; RV32IZFINXZDINX-LABEL: test_floor_ui32:
181; RV32IZFINXZDINX:       # %bb.0:
182; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rdn
183; RV32IZFINXZDINX-NEXT:    ret
184;
185; RV64IZFINXZDINX-LABEL: test_floor_ui32:
186; RV64IZFINXZDINX:       # %bb.0:
187; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rdn
188; RV64IZFINXZDINX-NEXT:    ret
189  %a = call double @llvm.floor.f64(double %x)
190  %b = fptoui double %a to i32
191  ret i32 %b
192}
193
194define i64 @test_floor_ui64(double %x) {
195; RV32IFD-LABEL: test_floor_ui64:
196; RV32IFD:       # %bb.0:
197; RV32IFD-NEXT:    addi sp, sp, -16
198; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
199; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
200; RV32IFD-NEXT:    .cfi_offset ra, -4
201; RV32IFD-NEXT:    call floor
202; RV32IFD-NEXT:    call __fixunsdfdi
203; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
204; RV32IFD-NEXT:    .cfi_restore ra
205; RV32IFD-NEXT:    addi sp, sp, 16
206; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
207; RV32IFD-NEXT:    ret
208;
209; RV64IFD-LABEL: test_floor_ui64:
210; RV64IFD:       # %bb.0:
211; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rdn
212; RV64IFD-NEXT:    ret
213;
214; RV32IZFINXZDINX-LABEL: test_floor_ui64:
215; RV32IZFINXZDINX:       # %bb.0:
216; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
217; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
218; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
219; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
220; RV32IZFINXZDINX-NEXT:    call floor
221; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
222; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
223; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
224; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
225; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
226; RV32IZFINXZDINX-NEXT:    ret
227;
228; RV64IZFINXZDINX-LABEL: test_floor_ui64:
229; RV64IZFINXZDINX:       # %bb.0:
230; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rdn
231; RV64IZFINXZDINX-NEXT:    ret
232  %a = call double @llvm.floor.f64(double %x)
233  %b = fptoui double %a to i64
234  ret i64 %b
235}
236
237define signext i8 @test_ceil_si8(double %x) {
238; RV32IFD-LABEL: test_ceil_si8:
239; RV32IFD:       # %bb.0:
240; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rup
241; RV32IFD-NEXT:    ret
242;
243; RV64IFD-LABEL: test_ceil_si8:
244; RV64IFD:       # %bb.0:
245; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
246; RV64IFD-NEXT:    ret
247;
248; RV32IZFINXZDINX-LABEL: test_ceil_si8:
249; RV32IZFINXZDINX:       # %bb.0:
250; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rup
251; RV32IZFINXZDINX-NEXT:    ret
252;
253; RV64IZFINXZDINX-LABEL: test_ceil_si8:
254; RV64IZFINXZDINX:       # %bb.0:
255; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rup
256; RV64IZFINXZDINX-NEXT:    ret
257  %a = call double @llvm.ceil.f64(double %x)
258  %b = fptosi double %a to i8
259  ret i8 %b
260}
261
262define signext i16 @test_ceil_si16(double %x) {
263; RV32IFD-LABEL: test_ceil_si16:
264; RV32IFD:       # %bb.0:
265; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rup
266; RV32IFD-NEXT:    ret
267;
268; RV64IFD-LABEL: test_ceil_si16:
269; RV64IFD:       # %bb.0:
270; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
271; RV64IFD-NEXT:    ret
272;
273; RV32IZFINXZDINX-LABEL: test_ceil_si16:
274; RV32IZFINXZDINX:       # %bb.0:
275; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rup
276; RV32IZFINXZDINX-NEXT:    ret
277;
278; RV64IZFINXZDINX-LABEL: test_ceil_si16:
279; RV64IZFINXZDINX:       # %bb.0:
280; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rup
281; RV64IZFINXZDINX-NEXT:    ret
282  %a = call double @llvm.ceil.f64(double %x)
283  %b = fptosi double %a to i16
284  ret i16 %b
285}
286
287define signext i32 @test_ceil_si32(double %x) {
288; CHECKIFD-LABEL: test_ceil_si32:
289; CHECKIFD:       # %bb.0:
290; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rup
291; CHECKIFD-NEXT:    ret
292;
293; RV32IZFINXZDINX-LABEL: test_ceil_si32:
294; RV32IZFINXZDINX:       # %bb.0:
295; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rup
296; RV32IZFINXZDINX-NEXT:    ret
297;
298; RV64IZFINXZDINX-LABEL: test_ceil_si32:
299; RV64IZFINXZDINX:       # %bb.0:
300; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rup
301; RV64IZFINXZDINX-NEXT:    ret
302  %a = call double @llvm.ceil.f64(double %x)
303  %b = fptosi double %a to i32
304  ret i32 %b
305}
306
307define i64 @test_ceil_si64(double %x) {
308; RV32IFD-LABEL: test_ceil_si64:
309; RV32IFD:       # %bb.0:
310; RV32IFD-NEXT:    addi sp, sp, -16
311; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
312; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
313; RV32IFD-NEXT:    .cfi_offset ra, -4
314; RV32IFD-NEXT:    call ceil
315; RV32IFD-NEXT:    call __fixdfdi
316; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
317; RV32IFD-NEXT:    .cfi_restore ra
318; RV32IFD-NEXT:    addi sp, sp, 16
319; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
320; RV32IFD-NEXT:    ret
321;
322; RV64IFD-LABEL: test_ceil_si64:
323; RV64IFD:       # %bb.0:
324; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
325; RV64IFD-NEXT:    ret
326;
327; RV32IZFINXZDINX-LABEL: test_ceil_si64:
328; RV32IZFINXZDINX:       # %bb.0:
329; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
330; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
331; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
332; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
333; RV32IZFINXZDINX-NEXT:    call ceil
334; RV32IZFINXZDINX-NEXT:    call __fixdfdi
335; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
336; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
337; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
338; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
339; RV32IZFINXZDINX-NEXT:    ret
340;
341; RV64IZFINXZDINX-LABEL: test_ceil_si64:
342; RV64IZFINXZDINX:       # %bb.0:
343; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rup
344; RV64IZFINXZDINX-NEXT:    ret
345  %a = call double @llvm.ceil.f64(double %x)
346  %b = fptosi double %a to i64
347  ret i64 %b
348}
349
350define zeroext i8 @test_ceil_ui8(double %x) {
351; RV32IFD-LABEL: test_ceil_ui8:
352; RV32IFD:       # %bb.0:
353; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rup
354; RV32IFD-NEXT:    ret
355;
356; RV64IFD-LABEL: test_ceil_ui8:
357; RV64IFD:       # %bb.0:
358; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rup
359; RV64IFD-NEXT:    ret
360;
361; RV32IZFINXZDINX-LABEL: test_ceil_ui8:
362; RV32IZFINXZDINX:       # %bb.0:
363; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rup
364; RV32IZFINXZDINX-NEXT:    ret
365;
366; RV64IZFINXZDINX-LABEL: test_ceil_ui8:
367; RV64IZFINXZDINX:       # %bb.0:
368; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rup
369; RV64IZFINXZDINX-NEXT:    ret
370  %a = call double @llvm.ceil.f64(double %x)
371  %b = fptoui double %a to i8
372  ret i8 %b
373}
374
375define zeroext i16 @test_ceil_ui16(double %x) {
376; RV32IFD-LABEL: test_ceil_ui16:
377; RV32IFD:       # %bb.0:
378; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rup
379; RV32IFD-NEXT:    ret
380;
381; RV64IFD-LABEL: test_ceil_ui16:
382; RV64IFD:       # %bb.0:
383; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rup
384; RV64IFD-NEXT:    ret
385;
386; RV32IZFINXZDINX-LABEL: test_ceil_ui16:
387; RV32IZFINXZDINX:       # %bb.0:
388; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rup
389; RV32IZFINXZDINX-NEXT:    ret
390;
391; RV64IZFINXZDINX-LABEL: test_ceil_ui16:
392; RV64IZFINXZDINX:       # %bb.0:
393; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rup
394; RV64IZFINXZDINX-NEXT:    ret
395  %a = call double @llvm.ceil.f64(double %x)
396  %b = fptoui double %a to i16
397  ret i16 %b
398}
399
400define signext i32 @test_ceil_ui32(double %x) {
401; CHECKIFD-LABEL: test_ceil_ui32:
402; CHECKIFD:       # %bb.0:
403; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rup
404; CHECKIFD-NEXT:    ret
405;
406; RV32IZFINXZDINX-LABEL: test_ceil_ui32:
407; RV32IZFINXZDINX:       # %bb.0:
408; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rup
409; RV32IZFINXZDINX-NEXT:    ret
410;
411; RV64IZFINXZDINX-LABEL: test_ceil_ui32:
412; RV64IZFINXZDINX:       # %bb.0:
413; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rup
414; RV64IZFINXZDINX-NEXT:    ret
415  %a = call double @llvm.ceil.f64(double %x)
416  %b = fptoui double %a to i32
417  ret i32 %b
418}
419
420define i64 @test_ceil_ui64(double %x) {
421; RV32IFD-LABEL: test_ceil_ui64:
422; RV32IFD:       # %bb.0:
423; RV32IFD-NEXT:    addi sp, sp, -16
424; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
425; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
426; RV32IFD-NEXT:    .cfi_offset ra, -4
427; RV32IFD-NEXT:    call ceil
428; RV32IFD-NEXT:    call __fixunsdfdi
429; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
430; RV32IFD-NEXT:    .cfi_restore ra
431; RV32IFD-NEXT:    addi sp, sp, 16
432; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
433; RV32IFD-NEXT:    ret
434;
435; RV64IFD-LABEL: test_ceil_ui64:
436; RV64IFD:       # %bb.0:
437; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rup
438; RV64IFD-NEXT:    ret
439;
440; RV32IZFINXZDINX-LABEL: test_ceil_ui64:
441; RV32IZFINXZDINX:       # %bb.0:
442; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
443; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
444; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
445; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
446; RV32IZFINXZDINX-NEXT:    call ceil
447; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
448; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
449; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
450; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
451; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
452; RV32IZFINXZDINX-NEXT:    ret
453;
454; RV64IZFINXZDINX-LABEL: test_ceil_ui64:
455; RV64IZFINXZDINX:       # %bb.0:
456; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rup
457; RV64IZFINXZDINX-NEXT:    ret
458  %a = call double @llvm.ceil.f64(double %x)
459  %b = fptoui double %a to i64
460  ret i64 %b
461}
462
463define signext i8 @test_trunc_si8(double %x) {
464; RV32IFD-LABEL: test_trunc_si8:
465; RV32IFD:       # %bb.0:
466; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz
467; RV32IFD-NEXT:    ret
468;
469; RV64IFD-LABEL: test_trunc_si8:
470; RV64IFD:       # %bb.0:
471; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
472; RV64IFD-NEXT:    ret
473;
474; RV32IZFINXZDINX-LABEL: test_trunc_si8:
475; RV32IZFINXZDINX:       # %bb.0:
476; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rtz
477; RV32IZFINXZDINX-NEXT:    ret
478;
479; RV64IZFINXZDINX-LABEL: test_trunc_si8:
480; RV64IZFINXZDINX:       # %bb.0:
481; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rtz
482; RV64IZFINXZDINX-NEXT:    ret
483  %a = call double @llvm.trunc.f64(double %x)
484  %b = fptosi double %a to i8
485  ret i8 %b
486}
487
488define signext i16 @test_trunc_si16(double %x) {
489; RV32IFD-LABEL: test_trunc_si16:
490; RV32IFD:       # %bb.0:
491; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rtz
492; RV32IFD-NEXT:    ret
493;
494; RV64IFD-LABEL: test_trunc_si16:
495; RV64IFD:       # %bb.0:
496; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
497; RV64IFD-NEXT:    ret
498;
499; RV32IZFINXZDINX-LABEL: test_trunc_si16:
500; RV32IZFINXZDINX:       # %bb.0:
501; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rtz
502; RV32IZFINXZDINX-NEXT:    ret
503;
504; RV64IZFINXZDINX-LABEL: test_trunc_si16:
505; RV64IZFINXZDINX:       # %bb.0:
506; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rtz
507; RV64IZFINXZDINX-NEXT:    ret
508  %a = call double @llvm.trunc.f64(double %x)
509  %b = fptosi double %a to i16
510  ret i16 %b
511}
512
513define signext i32 @test_trunc_si32(double %x) {
514; CHECKIFD-LABEL: test_trunc_si32:
515; CHECKIFD:       # %bb.0:
516; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rtz
517; CHECKIFD-NEXT:    ret
518;
519; RV32IZFINXZDINX-LABEL: test_trunc_si32:
520; RV32IZFINXZDINX:       # %bb.0:
521; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rtz
522; RV32IZFINXZDINX-NEXT:    ret
523;
524; RV64IZFINXZDINX-LABEL: test_trunc_si32:
525; RV64IZFINXZDINX:       # %bb.0:
526; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rtz
527; RV64IZFINXZDINX-NEXT:    ret
528  %a = call double @llvm.trunc.f64(double %x)
529  %b = fptosi double %a to i32
530  ret i32 %b
531}
532
533define i64 @test_trunc_si64(double %x) {
534; RV32IFD-LABEL: test_trunc_si64:
535; RV32IFD:       # %bb.0:
536; RV32IFD-NEXT:    addi sp, sp, -16
537; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
538; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
539; RV32IFD-NEXT:    .cfi_offset ra, -4
540; RV32IFD-NEXT:    call trunc
541; RV32IFD-NEXT:    call __fixdfdi
542; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
543; RV32IFD-NEXT:    .cfi_restore ra
544; RV32IFD-NEXT:    addi sp, sp, 16
545; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
546; RV32IFD-NEXT:    ret
547;
548; RV64IFD-LABEL: test_trunc_si64:
549; RV64IFD:       # %bb.0:
550; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
551; RV64IFD-NEXT:    ret
552;
553; RV32IZFINXZDINX-LABEL: test_trunc_si64:
554; RV32IZFINXZDINX:       # %bb.0:
555; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
556; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
557; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
558; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
559; RV32IZFINXZDINX-NEXT:    call trunc
560; RV32IZFINXZDINX-NEXT:    call __fixdfdi
561; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
562; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
563; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
564; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
565; RV32IZFINXZDINX-NEXT:    ret
566;
567; RV64IZFINXZDINX-LABEL: test_trunc_si64:
568; RV64IZFINXZDINX:       # %bb.0:
569; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rtz
570; RV64IZFINXZDINX-NEXT:    ret
571  %a = call double @llvm.trunc.f64(double %x)
572  %b = fptosi double %a to i64
573  ret i64 %b
574}
575
576define zeroext i8 @test_trunc_ui8(double %x) {
577; RV32IFD-LABEL: test_trunc_ui8:
578; RV32IFD:       # %bb.0:
579; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
580; RV32IFD-NEXT:    ret
581;
582; RV64IFD-LABEL: test_trunc_ui8:
583; RV64IFD:       # %bb.0:
584; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
585; RV64IFD-NEXT:    ret
586;
587; RV32IZFINXZDINX-LABEL: test_trunc_ui8:
588; RV32IZFINXZDINX:       # %bb.0:
589; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
590; RV32IZFINXZDINX-NEXT:    ret
591;
592; RV64IZFINXZDINX-LABEL: test_trunc_ui8:
593; RV64IZFINXZDINX:       # %bb.0:
594; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
595; RV64IZFINXZDINX-NEXT:    ret
596  %a = call double @llvm.trunc.f64(double %x)
597  %b = fptoui double %a to i8
598  ret i8 %b
599}
600
601define zeroext i16 @test_trunc_ui16(double %x) {
602; RV32IFD-LABEL: test_trunc_ui16:
603; RV32IFD:       # %bb.0:
604; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
605; RV32IFD-NEXT:    ret
606;
607; RV64IFD-LABEL: test_trunc_ui16:
608; RV64IFD:       # %bb.0:
609; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
610; RV64IFD-NEXT:    ret
611;
612; RV32IZFINXZDINX-LABEL: test_trunc_ui16:
613; RV32IZFINXZDINX:       # %bb.0:
614; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
615; RV32IZFINXZDINX-NEXT:    ret
616;
617; RV64IZFINXZDINX-LABEL: test_trunc_ui16:
618; RV64IZFINXZDINX:       # %bb.0:
619; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
620; RV64IZFINXZDINX-NEXT:    ret
621  %a = call double @llvm.trunc.f64(double %x)
622  %b = fptoui double %a to i16
623  ret i16 %b
624}
625
626define signext i32 @test_trunc_ui32(double %x) {
627; CHECKIFD-LABEL: test_trunc_ui32:
628; CHECKIFD:       # %bb.0:
629; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rtz
630; CHECKIFD-NEXT:    ret
631;
632; RV32IZFINXZDINX-LABEL: test_trunc_ui32:
633; RV32IZFINXZDINX:       # %bb.0:
634; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
635; RV32IZFINXZDINX-NEXT:    ret
636;
637; RV64IZFINXZDINX-LABEL: test_trunc_ui32:
638; RV64IZFINXZDINX:       # %bb.0:
639; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
640; RV64IZFINXZDINX-NEXT:    ret
641  %a = call double @llvm.trunc.f64(double %x)
642  %b = fptoui double %a to i32
643  ret i32 %b
644}
645
646define i64 @test_trunc_ui64(double %x) {
647; RV32IFD-LABEL: test_trunc_ui64:
648; RV32IFD:       # %bb.0:
649; RV32IFD-NEXT:    addi sp, sp, -16
650; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
651; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
652; RV32IFD-NEXT:    .cfi_offset ra, -4
653; RV32IFD-NEXT:    call trunc
654; RV32IFD-NEXT:    call __fixunsdfdi
655; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
656; RV32IFD-NEXT:    .cfi_restore ra
657; RV32IFD-NEXT:    addi sp, sp, 16
658; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
659; RV32IFD-NEXT:    ret
660;
661; RV64IFD-LABEL: test_trunc_ui64:
662; RV64IFD:       # %bb.0:
663; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
664; RV64IFD-NEXT:    ret
665;
666; RV32IZFINXZDINX-LABEL: test_trunc_ui64:
667; RV32IZFINXZDINX:       # %bb.0:
668; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
669; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
670; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
671; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
672; RV32IZFINXZDINX-NEXT:    call trunc
673; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
674; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
675; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
676; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
677; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
678; RV32IZFINXZDINX-NEXT:    ret
679;
680; RV64IZFINXZDINX-LABEL: test_trunc_ui64:
681; RV64IZFINXZDINX:       # %bb.0:
682; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
683; RV64IZFINXZDINX-NEXT:    ret
684  %a = call double @llvm.trunc.f64(double %x)
685  %b = fptoui double %a to i64
686  ret i64 %b
687}
688
689define signext i8 @test_round_si8(double %x) {
690; RV32IFD-LABEL: test_round_si8:
691; RV32IFD:       # %bb.0:
692; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rmm
693; RV32IFD-NEXT:    ret
694;
695; RV64IFD-LABEL: test_round_si8:
696; RV64IFD:       # %bb.0:
697; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
698; RV64IFD-NEXT:    ret
699;
700; RV32IZFINXZDINX-LABEL: test_round_si8:
701; RV32IZFINXZDINX:       # %bb.0:
702; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rmm
703; RV32IZFINXZDINX-NEXT:    ret
704;
705; RV64IZFINXZDINX-LABEL: test_round_si8:
706; RV64IZFINXZDINX:       # %bb.0:
707; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rmm
708; RV64IZFINXZDINX-NEXT:    ret
709  %a = call double @llvm.round.f64(double %x)
710  %b = fptosi double %a to i8
711  ret i8 %b
712}
713
714define signext i16 @test_round_si16(double %x) {
715; RV32IFD-LABEL: test_round_si16:
716; RV32IFD:       # %bb.0:
717; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rmm
718; RV32IFD-NEXT:    ret
719;
720; RV64IFD-LABEL: test_round_si16:
721; RV64IFD:       # %bb.0:
722; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
723; RV64IFD-NEXT:    ret
724;
725; RV32IZFINXZDINX-LABEL: test_round_si16:
726; RV32IZFINXZDINX:       # %bb.0:
727; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rmm
728; RV32IZFINXZDINX-NEXT:    ret
729;
730; RV64IZFINXZDINX-LABEL: test_round_si16:
731; RV64IZFINXZDINX:       # %bb.0:
732; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rmm
733; RV64IZFINXZDINX-NEXT:    ret
734  %a = call double @llvm.round.f64(double %x)
735  %b = fptosi double %a to i16
736  ret i16 %b
737}
738
739define signext i32 @test_round_si32(double %x) {
740; CHECKIFD-LABEL: test_round_si32:
741; CHECKIFD:       # %bb.0:
742; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rmm
743; CHECKIFD-NEXT:    ret
744;
745; RV32IZFINXZDINX-LABEL: test_round_si32:
746; RV32IZFINXZDINX:       # %bb.0:
747; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rmm
748; RV32IZFINXZDINX-NEXT:    ret
749;
750; RV64IZFINXZDINX-LABEL: test_round_si32:
751; RV64IZFINXZDINX:       # %bb.0:
752; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rmm
753; RV64IZFINXZDINX-NEXT:    ret
754  %a = call double @llvm.round.f64(double %x)
755  %b = fptosi double %a to i32
756  ret i32 %b
757}
758
759define i64 @test_round_si64(double %x) {
760; RV32IFD-LABEL: test_round_si64:
761; RV32IFD:       # %bb.0:
762; RV32IFD-NEXT:    addi sp, sp, -16
763; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
764; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
765; RV32IFD-NEXT:    .cfi_offset ra, -4
766; RV32IFD-NEXT:    call round
767; RV32IFD-NEXT:    call __fixdfdi
768; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
769; RV32IFD-NEXT:    .cfi_restore ra
770; RV32IFD-NEXT:    addi sp, sp, 16
771; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
772; RV32IFD-NEXT:    ret
773;
774; RV64IFD-LABEL: test_round_si64:
775; RV64IFD:       # %bb.0:
776; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
777; RV64IFD-NEXT:    ret
778;
779; RV32IZFINXZDINX-LABEL: test_round_si64:
780; RV32IZFINXZDINX:       # %bb.0:
781; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
782; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
783; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
784; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
785; RV32IZFINXZDINX-NEXT:    call round
786; RV32IZFINXZDINX-NEXT:    call __fixdfdi
787; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
788; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
789; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
790; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
791; RV32IZFINXZDINX-NEXT:    ret
792;
793; RV64IZFINXZDINX-LABEL: test_round_si64:
794; RV64IZFINXZDINX:       # %bb.0:
795; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rmm
796; RV64IZFINXZDINX-NEXT:    ret
797  %a = call double @llvm.round.f64(double %x)
798  %b = fptosi double %a to i64
799  ret i64 %b
800}
801
802define zeroext i8 @test_round_ui8(double %x) {
803; RV32IFD-LABEL: test_round_ui8:
804; RV32IFD:       # %bb.0:
805; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rmm
806; RV32IFD-NEXT:    ret
807;
808; RV64IFD-LABEL: test_round_ui8:
809; RV64IFD:       # %bb.0:
810; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rmm
811; RV64IFD-NEXT:    ret
812;
813; RV32IZFINXZDINX-LABEL: test_round_ui8:
814; RV32IZFINXZDINX:       # %bb.0:
815; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rmm
816; RV32IZFINXZDINX-NEXT:    ret
817;
818; RV64IZFINXZDINX-LABEL: test_round_ui8:
819; RV64IZFINXZDINX:       # %bb.0:
820; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rmm
821; RV64IZFINXZDINX-NEXT:    ret
822  %a = call double @llvm.round.f64(double %x)
823  %b = fptoui double %a to i8
824  ret i8 %b
825}
826
827define zeroext i16 @test_round_ui16(double %x) {
828; RV32IFD-LABEL: test_round_ui16:
829; RV32IFD:       # %bb.0:
830; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rmm
831; RV32IFD-NEXT:    ret
832;
833; RV64IFD-LABEL: test_round_ui16:
834; RV64IFD:       # %bb.0:
835; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rmm
836; RV64IFD-NEXT:    ret
837;
838; RV32IZFINXZDINX-LABEL: test_round_ui16:
839; RV32IZFINXZDINX:       # %bb.0:
840; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rmm
841; RV32IZFINXZDINX-NEXT:    ret
842;
843; RV64IZFINXZDINX-LABEL: test_round_ui16:
844; RV64IZFINXZDINX:       # %bb.0:
845; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rmm
846; RV64IZFINXZDINX-NEXT:    ret
847  %a = call double @llvm.round.f64(double %x)
848  %b = fptoui double %a to i16
849  ret i16 %b
850}
851
852define signext i32 @test_round_ui32(double %x) {
853; CHECKIFD-LABEL: test_round_ui32:
854; CHECKIFD:       # %bb.0:
855; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rmm
856; CHECKIFD-NEXT:    ret
857;
858; RV32IZFINXZDINX-LABEL: test_round_ui32:
859; RV32IZFINXZDINX:       # %bb.0:
860; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rmm
861; RV32IZFINXZDINX-NEXT:    ret
862;
863; RV64IZFINXZDINX-LABEL: test_round_ui32:
864; RV64IZFINXZDINX:       # %bb.0:
865; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rmm
866; RV64IZFINXZDINX-NEXT:    ret
867  %a = call double @llvm.round.f64(double %x)
868  %b = fptoui double %a to i32
869  ret i32 %b
870}
871
872define i64 @test_round_ui64(double %x) {
873; RV32IFD-LABEL: test_round_ui64:
874; RV32IFD:       # %bb.0:
875; RV32IFD-NEXT:    addi sp, sp, -16
876; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
877; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
878; RV32IFD-NEXT:    .cfi_offset ra, -4
879; RV32IFD-NEXT:    call round
880; RV32IFD-NEXT:    call __fixunsdfdi
881; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
882; RV32IFD-NEXT:    .cfi_restore ra
883; RV32IFD-NEXT:    addi sp, sp, 16
884; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
885; RV32IFD-NEXT:    ret
886;
887; RV64IFD-LABEL: test_round_ui64:
888; RV64IFD:       # %bb.0:
889; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rmm
890; RV64IFD-NEXT:    ret
891;
892; RV32IZFINXZDINX-LABEL: test_round_ui64:
893; RV32IZFINXZDINX:       # %bb.0:
894; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
895; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
896; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
897; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
898; RV32IZFINXZDINX-NEXT:    call round
899; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
900; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
901; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
902; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
903; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
904; RV32IZFINXZDINX-NEXT:    ret
905;
906; RV64IZFINXZDINX-LABEL: test_round_ui64:
907; RV64IZFINXZDINX:       # %bb.0:
908; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rmm
909; RV64IZFINXZDINX-NEXT:    ret
910  %a = call double @llvm.round.f64(double %x)
911  %b = fptoui double %a to i64
912  ret i64 %b
913}
914
915define signext i8 @test_roundeven_si8(double %x) {
916; RV32IFD-LABEL: test_roundeven_si8:
917; RV32IFD:       # %bb.0:
918; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rne
919; RV32IFD-NEXT:    ret
920;
921; RV64IFD-LABEL: test_roundeven_si8:
922; RV64IFD:       # %bb.0:
923; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
924; RV64IFD-NEXT:    ret
925;
926; RV32IZFINXZDINX-LABEL: test_roundeven_si8:
927; RV32IZFINXZDINX:       # %bb.0:
928; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rne
929; RV32IZFINXZDINX-NEXT:    ret
930;
931; RV64IZFINXZDINX-LABEL: test_roundeven_si8:
932; RV64IZFINXZDINX:       # %bb.0:
933; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rne
934; RV64IZFINXZDINX-NEXT:    ret
935  %a = call double @llvm.roundeven.f64(double %x)
936  %b = fptosi double %a to i8
937  ret i8 %b
938}
939
940define signext i16 @test_roundeven_si16(double %x) {
941; RV32IFD-LABEL: test_roundeven_si16:
942; RV32IFD:       # %bb.0:
943; RV32IFD-NEXT:    fcvt.w.d a0, fa0, rne
944; RV32IFD-NEXT:    ret
945;
946; RV64IFD-LABEL: test_roundeven_si16:
947; RV64IFD:       # %bb.0:
948; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
949; RV64IFD-NEXT:    ret
950;
951; RV32IZFINXZDINX-LABEL: test_roundeven_si16:
952; RV32IZFINXZDINX:       # %bb.0:
953; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rne
954; RV32IZFINXZDINX-NEXT:    ret
955;
956; RV64IZFINXZDINX-LABEL: test_roundeven_si16:
957; RV64IZFINXZDINX:       # %bb.0:
958; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rne
959; RV64IZFINXZDINX-NEXT:    ret
960  %a = call double @llvm.roundeven.f64(double %x)
961  %b = fptosi double %a to i16
962  ret i16 %b
963}
964
965define signext i32 @test_roundeven_si32(double %x) {
966; CHECKIFD-LABEL: test_roundeven_si32:
967; CHECKIFD:       # %bb.0:
968; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rne
969; CHECKIFD-NEXT:    ret
970;
971; RV32IZFINXZDINX-LABEL: test_roundeven_si32:
972; RV32IZFINXZDINX:       # %bb.0:
973; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rne
974; RV32IZFINXZDINX-NEXT:    ret
975;
976; RV64IZFINXZDINX-LABEL: test_roundeven_si32:
977; RV64IZFINXZDINX:       # %bb.0:
978; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rne
979; RV64IZFINXZDINX-NEXT:    ret
980  %a = call double @llvm.roundeven.f64(double %x)
981  %b = fptosi double %a to i32
982  ret i32 %b
983}
984
985define i64 @test_roundeven_si64(double %x) {
986; RV32IFD-LABEL: test_roundeven_si64:
987; RV32IFD:       # %bb.0:
988; RV32IFD-NEXT:    addi sp, sp, -16
989; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
990; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
991; RV32IFD-NEXT:    .cfi_offset ra, -4
992; RV32IFD-NEXT:    call roundeven
993; RV32IFD-NEXT:    call __fixdfdi
994; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
995; RV32IFD-NEXT:    .cfi_restore ra
996; RV32IFD-NEXT:    addi sp, sp, 16
997; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
998; RV32IFD-NEXT:    ret
999;
1000; RV64IFD-LABEL: test_roundeven_si64:
1001; RV64IFD:       # %bb.0:
1002; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
1003; RV64IFD-NEXT:    ret
1004;
1005; RV32IZFINXZDINX-LABEL: test_roundeven_si64:
1006; RV32IZFINXZDINX:       # %bb.0:
1007; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1008; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
1009; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1010; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
1011; RV32IZFINXZDINX-NEXT:    call roundeven
1012; RV32IZFINXZDINX-NEXT:    call __fixdfdi
1013; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1014; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
1015; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1016; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
1017; RV32IZFINXZDINX-NEXT:    ret
1018;
1019; RV64IZFINXZDINX-LABEL: test_roundeven_si64:
1020; RV64IZFINXZDINX:       # %bb.0:
1021; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rne
1022; RV64IZFINXZDINX-NEXT:    ret
1023  %a = call double @llvm.roundeven.f64(double %x)
1024  %b = fptosi double %a to i64
1025  ret i64 %b
1026}
1027
1028define zeroext i8 @test_roundeven_ui8(double %x) {
1029; RV32IFD-LABEL: test_roundeven_ui8:
1030; RV32IFD:       # %bb.0:
1031; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rne
1032; RV32IFD-NEXT:    ret
1033;
1034; RV64IFD-LABEL: test_roundeven_ui8:
1035; RV64IFD:       # %bb.0:
1036; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rne
1037; RV64IFD-NEXT:    ret
1038;
1039; RV32IZFINXZDINX-LABEL: test_roundeven_ui8:
1040; RV32IZFINXZDINX:       # %bb.0:
1041; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rne
1042; RV32IZFINXZDINX-NEXT:    ret
1043;
1044; RV64IZFINXZDINX-LABEL: test_roundeven_ui8:
1045; RV64IZFINXZDINX:       # %bb.0:
1046; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rne
1047; RV64IZFINXZDINX-NEXT:    ret
1048  %a = call double @llvm.roundeven.f64(double %x)
1049  %b = fptoui double %a to i8
1050  ret i8 %b
1051}
1052
1053define zeroext i16 @test_roundeven_ui16(double %x) {
1054; RV32IFD-LABEL: test_roundeven_ui16:
1055; RV32IFD:       # %bb.0:
1056; RV32IFD-NEXT:    fcvt.wu.d a0, fa0, rne
1057; RV32IFD-NEXT:    ret
1058;
1059; RV64IFD-LABEL: test_roundeven_ui16:
1060; RV64IFD:       # %bb.0:
1061; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rne
1062; RV64IFD-NEXT:    ret
1063;
1064; RV32IZFINXZDINX-LABEL: test_roundeven_ui16:
1065; RV32IZFINXZDINX:       # %bb.0:
1066; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rne
1067; RV32IZFINXZDINX-NEXT:    ret
1068;
1069; RV64IZFINXZDINX-LABEL: test_roundeven_ui16:
1070; RV64IZFINXZDINX:       # %bb.0:
1071; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rne
1072; RV64IZFINXZDINX-NEXT:    ret
1073  %a = call double @llvm.roundeven.f64(double %x)
1074  %b = fptoui double %a to i16
1075  ret i16 %b
1076}
1077
1078define signext i32 @test_roundeven_ui32(double %x) {
1079; CHECKIFD-LABEL: test_roundeven_ui32:
1080; CHECKIFD:       # %bb.0:
1081; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rne
1082; CHECKIFD-NEXT:    ret
1083;
1084; RV32IZFINXZDINX-LABEL: test_roundeven_ui32:
1085; RV32IZFINXZDINX:       # %bb.0:
1086; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rne
1087; RV32IZFINXZDINX-NEXT:    ret
1088;
1089; RV64IZFINXZDINX-LABEL: test_roundeven_ui32:
1090; RV64IZFINXZDINX:       # %bb.0:
1091; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rne
1092; RV64IZFINXZDINX-NEXT:    ret
1093  %a = call double @llvm.roundeven.f64(double %x)
1094  %b = fptoui double %a to i32
1095  ret i32 %b
1096}
1097
1098define i64 @test_roundeven_ui64(double %x) {
1099; RV32IFD-LABEL: test_roundeven_ui64:
1100; RV32IFD:       # %bb.0:
1101; RV32IFD-NEXT:    addi sp, sp, -16
1102; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
1103; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1104; RV32IFD-NEXT:    .cfi_offset ra, -4
1105; RV32IFD-NEXT:    call roundeven
1106; RV32IFD-NEXT:    call __fixunsdfdi
1107; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1108; RV32IFD-NEXT:    .cfi_restore ra
1109; RV32IFD-NEXT:    addi sp, sp, 16
1110; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
1111; RV32IFD-NEXT:    ret
1112;
1113; RV64IFD-LABEL: test_roundeven_ui64:
1114; RV64IFD:       # %bb.0:
1115; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rne
1116; RV64IFD-NEXT:    ret
1117;
1118; RV32IZFINXZDINX-LABEL: test_roundeven_ui64:
1119; RV32IZFINXZDINX:       # %bb.0:
1120; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1121; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
1122; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1123; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
1124; RV32IZFINXZDINX-NEXT:    call roundeven
1125; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
1126; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1127; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
1128; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1129; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
1130; RV32IZFINXZDINX-NEXT:    ret
1131;
1132; RV64IZFINXZDINX-LABEL: test_roundeven_ui64:
1133; RV64IZFINXZDINX:       # %bb.0:
1134; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rne
1135; RV64IZFINXZDINX-NEXT:    ret
1136  %a = call double @llvm.roundeven.f64(double %x)
1137  %b = fptoui double %a to i64
1138  ret i64 %b
1139}
1140
1141define double @test_floor_double(double %x) {
1142; RV32IFD-LABEL: test_floor_double:
1143; RV32IFD:       # %bb.0:
1144; RV32IFD-NEXT:    tail floor
1145;
1146; RV64IFD-LABEL: test_floor_double:
1147; RV64IFD:       # %bb.0:
1148; RV64IFD-NEXT:    lui a0, %hi(.LCPI40_0)
1149; RV64IFD-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
1150; RV64IFD-NEXT:    fabs.d fa4, fa0
1151; RV64IFD-NEXT:    flt.d a0, fa4, fa5
1152; RV64IFD-NEXT:    beqz a0, .LBB40_2
1153; RV64IFD-NEXT:  # %bb.1:
1154; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
1155; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rdn
1156; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
1157; RV64IFD-NEXT:  .LBB40_2:
1158; RV64IFD-NEXT:    ret
1159;
1160; RV32IZFINXZDINX-LABEL: test_floor_double:
1161; RV32IZFINXZDINX:       # %bb.0:
1162; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1163; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
1164; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1165; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
1166; RV32IZFINXZDINX-NEXT:    call floor
1167; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1168; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
1169; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1170; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
1171; RV32IZFINXZDINX-NEXT:    ret
1172;
1173; RV64IZFINXZDINX-LABEL: test_floor_double:
1174; RV64IZFINXZDINX:       # %bb.0:
1175; RV64IZFINXZDINX-NEXT:    li a1, 1075
1176; RV64IZFINXZDINX-NEXT:    slli a1, a1, 52
1177; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
1178; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
1179; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB40_2
1180; RV64IZFINXZDINX-NEXT:  # %bb.1:
1181; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rdn
1182; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rdn
1183; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
1184; RV64IZFINXZDINX-NEXT:  .LBB40_2:
1185; RV64IZFINXZDINX-NEXT:    ret
1186  %a = call double @llvm.floor.f64(double %x)
1187  ret double %a
1188}
1189
1190define double @test_ceil_double(double %x) {
1191; RV32IFD-LABEL: test_ceil_double:
1192; RV32IFD:       # %bb.0:
1193; RV32IFD-NEXT:    tail ceil
1194;
1195; RV64IFD-LABEL: test_ceil_double:
1196; RV64IFD:       # %bb.0:
1197; RV64IFD-NEXT:    lui a0, %hi(.LCPI41_0)
1198; RV64IFD-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
1199; RV64IFD-NEXT:    fabs.d fa4, fa0
1200; RV64IFD-NEXT:    flt.d a0, fa4, fa5
1201; RV64IFD-NEXT:    beqz a0, .LBB41_2
1202; RV64IFD-NEXT:  # %bb.1:
1203; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
1204; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rup
1205; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
1206; RV64IFD-NEXT:  .LBB41_2:
1207; RV64IFD-NEXT:    ret
1208;
1209; RV32IZFINXZDINX-LABEL: test_ceil_double:
1210; RV32IZFINXZDINX:       # %bb.0:
1211; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1212; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
1213; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1214; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
1215; RV32IZFINXZDINX-NEXT:    call ceil
1216; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1217; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
1218; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1219; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
1220; RV32IZFINXZDINX-NEXT:    ret
1221;
1222; RV64IZFINXZDINX-LABEL: test_ceil_double:
1223; RV64IZFINXZDINX:       # %bb.0:
1224; RV64IZFINXZDINX-NEXT:    li a1, 1075
1225; RV64IZFINXZDINX-NEXT:    slli a1, a1, 52
1226; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
1227; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
1228; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB41_2
1229; RV64IZFINXZDINX-NEXT:  # %bb.1:
1230; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rup
1231; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rup
1232; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
1233; RV64IZFINXZDINX-NEXT:  .LBB41_2:
1234; RV64IZFINXZDINX-NEXT:    ret
1235  %a = call double @llvm.ceil.f64(double %x)
1236  ret double %a
1237}
1238
1239define double @test_trunc_double(double %x) {
1240; RV32IFD-LABEL: test_trunc_double:
1241; RV32IFD:       # %bb.0:
1242; RV32IFD-NEXT:    tail trunc
1243;
1244; RV64IFD-LABEL: test_trunc_double:
1245; RV64IFD:       # %bb.0:
1246; RV64IFD-NEXT:    lui a0, %hi(.LCPI42_0)
1247; RV64IFD-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
1248; RV64IFD-NEXT:    fabs.d fa4, fa0
1249; RV64IFD-NEXT:    flt.d a0, fa4, fa5
1250; RV64IFD-NEXT:    beqz a0, .LBB42_2
1251; RV64IFD-NEXT:  # %bb.1:
1252; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
1253; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rtz
1254; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
1255; RV64IFD-NEXT:  .LBB42_2:
1256; RV64IFD-NEXT:    ret
1257;
1258; RV32IZFINXZDINX-LABEL: test_trunc_double:
1259; RV32IZFINXZDINX:       # %bb.0:
1260; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1261; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
1262; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1263; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
1264; RV32IZFINXZDINX-NEXT:    call trunc
1265; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1266; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
1267; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1268; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
1269; RV32IZFINXZDINX-NEXT:    ret
1270;
1271; RV64IZFINXZDINX-LABEL: test_trunc_double:
1272; RV64IZFINXZDINX:       # %bb.0:
1273; RV64IZFINXZDINX-NEXT:    li a1, 1075
1274; RV64IZFINXZDINX-NEXT:    slli a1, a1, 52
1275; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
1276; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
1277; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB42_2
1278; RV64IZFINXZDINX-NEXT:  # %bb.1:
1279; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rtz
1280; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rtz
1281; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
1282; RV64IZFINXZDINX-NEXT:  .LBB42_2:
1283; RV64IZFINXZDINX-NEXT:    ret
1284  %a = call double @llvm.trunc.f64(double %x)
1285  ret double %a
1286}
1287
1288define double @test_round_double(double %x) {
1289; RV32IFD-LABEL: test_round_double:
1290; RV32IFD:       # %bb.0:
1291; RV32IFD-NEXT:    tail round
1292;
1293; RV64IFD-LABEL: test_round_double:
1294; RV64IFD:       # %bb.0:
1295; RV64IFD-NEXT:    lui a0, %hi(.LCPI43_0)
1296; RV64IFD-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
1297; RV64IFD-NEXT:    fabs.d fa4, fa0
1298; RV64IFD-NEXT:    flt.d a0, fa4, fa5
1299; RV64IFD-NEXT:    beqz a0, .LBB43_2
1300; RV64IFD-NEXT:  # %bb.1:
1301; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
1302; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rmm
1303; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
1304; RV64IFD-NEXT:  .LBB43_2:
1305; RV64IFD-NEXT:    ret
1306;
1307; RV32IZFINXZDINX-LABEL: test_round_double:
1308; RV32IZFINXZDINX:       # %bb.0:
1309; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1310; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
1311; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1312; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
1313; RV32IZFINXZDINX-NEXT:    call round
1314; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1315; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
1316; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1317; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
1318; RV32IZFINXZDINX-NEXT:    ret
1319;
1320; RV64IZFINXZDINX-LABEL: test_round_double:
1321; RV64IZFINXZDINX:       # %bb.0:
1322; RV64IZFINXZDINX-NEXT:    li a1, 1075
1323; RV64IZFINXZDINX-NEXT:    slli a1, a1, 52
1324; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
1325; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
1326; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB43_2
1327; RV64IZFINXZDINX-NEXT:  # %bb.1:
1328; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rmm
1329; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rmm
1330; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
1331; RV64IZFINXZDINX-NEXT:  .LBB43_2:
1332; RV64IZFINXZDINX-NEXT:    ret
1333  %a = call double @llvm.round.f64(double %x)
1334  ret double %a
1335}
1336
1337define double @test_roundeven_double(double %x) {
1338; RV32IFD-LABEL: test_roundeven_double:
1339; RV32IFD:       # %bb.0:
1340; RV32IFD-NEXT:    tail roundeven
1341;
1342; RV64IFD-LABEL: test_roundeven_double:
1343; RV64IFD:       # %bb.0:
1344; RV64IFD-NEXT:    lui a0, %hi(.LCPI44_0)
1345; RV64IFD-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
1346; RV64IFD-NEXT:    fabs.d fa4, fa0
1347; RV64IFD-NEXT:    flt.d a0, fa4, fa5
1348; RV64IFD-NEXT:    beqz a0, .LBB44_2
1349; RV64IFD-NEXT:  # %bb.1:
1350; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
1351; RV64IFD-NEXT:    fcvt.d.l fa5, a0, rne
1352; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
1353; RV64IFD-NEXT:  .LBB44_2:
1354; RV64IFD-NEXT:    ret
1355;
1356; RV32IZFINXZDINX-LABEL: test_roundeven_double:
1357; RV32IZFINXZDINX:       # %bb.0:
1358; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1359; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 16
1360; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1361; RV32IZFINXZDINX-NEXT:    .cfi_offset ra, -4
1362; RV32IZFINXZDINX-NEXT:    call roundeven
1363; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1364; RV32IZFINXZDINX-NEXT:    .cfi_restore ra
1365; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1366; RV32IZFINXZDINX-NEXT:    .cfi_def_cfa_offset 0
1367; RV32IZFINXZDINX-NEXT:    ret
1368;
1369; RV64IZFINXZDINX-LABEL: test_roundeven_double:
1370; RV64IZFINXZDINX:       # %bb.0:
1371; RV64IZFINXZDINX-NEXT:    li a1, 1075
1372; RV64IZFINXZDINX-NEXT:    slli a1, a1, 52
1373; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
1374; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
1375; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB44_2
1376; RV64IZFINXZDINX-NEXT:  # %bb.1:
1377; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rne
1378; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rne
1379; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
1380; RV64IZFINXZDINX-NEXT:  .LBB44_2:
1381; RV64IZFINXZDINX-NEXT:    ret
1382  %a = call double @llvm.roundeven.f64(double %x)
1383  ret double %a
1384}
1385
1386declare double @llvm.floor.f64(double)
1387declare double @llvm.ceil.f64(double)
1388declare double @llvm.trunc.f64(double)
1389declare double @llvm.round.f64(double)
1390declare double @llvm.roundeven.f64(double)
1391