xref: /llvm-project/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3; RUN:   -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
4; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
5; RUN:   -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
6; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
7; RUN:   -target-abi=ilp32 | FileCheck -check-prefixes=RV32IZFINXZDINX %s
8; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
9; RUN:   -target-abi=lp64 | FileCheck -check-prefixes=RV64IZFINXZDINX %s
10
11define signext i32 @test_floor_si32(double %x) {
12; CHECKIFD-LABEL: test_floor_si32:
13; CHECKIFD:       # %bb.0:
14; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rdn
15; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
16; CHECKIFD-NEXT:    seqz a1, a1
17; CHECKIFD-NEXT:    addi a1, a1, -1
18; CHECKIFD-NEXT:    and a0, a1, a0
19; CHECKIFD-NEXT:    ret
20;
21; RV32IZFINXZDINX-LABEL: test_floor_si32:
22; RV32IZFINXZDINX:       # %bb.0:
23; RV32IZFINXZDINX-NEXT:    fcvt.w.d a2, a0, rdn
24; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
25; RV32IZFINXZDINX-NEXT:    seqz a0, a0
26; RV32IZFINXZDINX-NEXT:    addi a0, a0, -1
27; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
28; RV32IZFINXZDINX-NEXT:    ret
29;
30; RV64IZFINXZDINX-LABEL: test_floor_si32:
31; RV64IZFINXZDINX:       # %bb.0:
32; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0, rdn
33; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
34; RV64IZFINXZDINX-NEXT:    seqz a0, a0
35; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
36; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
37; RV64IZFINXZDINX-NEXT:    ret
38  %a = call double @llvm.floor.f64(double %x)
39  %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
40  ret i32 %b
41}
42
43define i64 @test_floor_si64(double %x) nounwind {
44; RV32IFD-LABEL: test_floor_si64:
45; RV32IFD:       # %bb.0:
46; RV32IFD-NEXT:    addi sp, sp, -16
47; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
48; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
49; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
50; RV32IFD-NEXT:    call floor
51; RV32IFD-NEXT:    lui a0, %hi(.LCPI1_0)
52; RV32IFD-NEXT:    fld fa5, %lo(.LCPI1_0)(a0)
53; RV32IFD-NEXT:    fmv.d fs0, fa0
54; RV32IFD-NEXT:    fle.d s0, fa5, fa0
55; RV32IFD-NEXT:    call __fixdfdi
56; RV32IFD-NEXT:    lui a3, 524288
57; RV32IFD-NEXT:    lui a2, 524288
58; RV32IFD-NEXT:    beqz s0, .LBB1_2
59; RV32IFD-NEXT:  # %bb.1:
60; RV32IFD-NEXT:    mv a2, a1
61; RV32IFD-NEXT:  .LBB1_2:
62; RV32IFD-NEXT:    lui a1, %hi(.LCPI1_1)
63; RV32IFD-NEXT:    fld fa5, %lo(.LCPI1_1)(a1)
64; RV32IFD-NEXT:    flt.d a1, fa5, fs0
65; RV32IFD-NEXT:    beqz a1, .LBB1_4
66; RV32IFD-NEXT:  # %bb.3:
67; RV32IFD-NEXT:    addi a2, a3, -1
68; RV32IFD-NEXT:  .LBB1_4:
69; RV32IFD-NEXT:    feq.d a3, fs0, fs0
70; RV32IFD-NEXT:    neg a4, a1
71; RV32IFD-NEXT:    neg a1, s0
72; RV32IFD-NEXT:    neg a3, a3
73; RV32IFD-NEXT:    and a0, a1, a0
74; RV32IFD-NEXT:    and a1, a3, a2
75; RV32IFD-NEXT:    or a0, a4, a0
76; RV32IFD-NEXT:    and a0, a3, a0
77; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
78; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
79; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
80; RV32IFD-NEXT:    addi sp, sp, 16
81; RV32IFD-NEXT:    ret
82;
83; RV64IFD-LABEL: test_floor_si64:
84; RV64IFD:       # %bb.0:
85; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
86; RV64IFD-NEXT:    feq.d a1, fa0, fa0
87; RV64IFD-NEXT:    seqz a1, a1
88; RV64IFD-NEXT:    addi a1, a1, -1
89; RV64IFD-NEXT:    and a0, a1, a0
90; RV64IFD-NEXT:    ret
91;
92; RV32IZFINXZDINX-LABEL: test_floor_si64:
93; RV32IZFINXZDINX:       # %bb.0:
94; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
95; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
96; RV32IZFINXZDINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
97; RV32IZFINXZDINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
98; RV32IZFINXZDINX-NEXT:    call floor
99; RV32IZFINXZDINX-NEXT:    mv s0, a0
100; RV32IZFINXZDINX-NEXT:    mv s1, a1
101; RV32IZFINXZDINX-NEXT:    call __fixdfdi
102; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI1_0)
103; RV32IZFINXZDINX-NEXT:    lui a3, %hi(.LCPI1_1)
104; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI1_0)(a2)
105; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI1_0+4)(a2)
106; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI1_1)(a3)
107; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI1_1+4)(a3)
108; RV32IZFINXZDINX-NEXT:    fle.d a6, a4, s0
109; RV32IZFINXZDINX-NEXT:    flt.d a3, a2, s0
110; RV32IZFINXZDINX-NEXT:    feq.d a2, s0, s0
111; RV32IZFINXZDINX-NEXT:    lui a4, 524288
112; RV32IZFINXZDINX-NEXT:    neg a2, a2
113; RV32IZFINXZDINX-NEXT:    neg a5, a6
114; RV32IZFINXZDINX-NEXT:    and a0, a5, a0
115; RV32IZFINXZDINX-NEXT:    neg a5, a3
116; RV32IZFINXZDINX-NEXT:    or a0, a5, a0
117; RV32IZFINXZDINX-NEXT:    lui a5, 524288
118; RV32IZFINXZDINX-NEXT:    beqz a6, .LBB1_2
119; RV32IZFINXZDINX-NEXT:  # %bb.1:
120; RV32IZFINXZDINX-NEXT:    mv a5, a1
121; RV32IZFINXZDINX-NEXT:  .LBB1_2:
122; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
123; RV32IZFINXZDINX-NEXT:    beqz a3, .LBB1_4
124; RV32IZFINXZDINX-NEXT:  # %bb.3:
125; RV32IZFINXZDINX-NEXT:    addi a5, a4, -1
126; RV32IZFINXZDINX-NEXT:  .LBB1_4:
127; RV32IZFINXZDINX-NEXT:    and a1, a2, a5
128; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
129; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
130; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
131; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
132; RV32IZFINXZDINX-NEXT:    ret
133;
134; RV64IZFINXZDINX-LABEL: test_floor_si64:
135; RV64IZFINXZDINX:       # %bb.0:
136; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rdn
137; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
138; RV64IZFINXZDINX-NEXT:    seqz a0, a0
139; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
140; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
141; RV64IZFINXZDINX-NEXT:    ret
142  %a = call double @llvm.floor.f64(double %x)
143  %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
144  ret i64 %b
145}
146
147define signext i32 @test_floor_ui32(double %x) {
148; CHECKIFD-LABEL: test_floor_ui32:
149; CHECKIFD:       # %bb.0:
150; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rdn
151; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
152; CHECKIFD-NEXT:    seqz a1, a1
153; CHECKIFD-NEXT:    addi a1, a1, -1
154; CHECKIFD-NEXT:    and a0, a1, a0
155; CHECKIFD-NEXT:    ret
156;
157; RV32IZFINXZDINX-LABEL: test_floor_ui32:
158; RV32IZFINXZDINX:       # %bb.0:
159; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a2, a0, rdn
160; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
161; RV32IZFINXZDINX-NEXT:    seqz a0, a0
162; RV32IZFINXZDINX-NEXT:    addi a0, a0, -1
163; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
164; RV32IZFINXZDINX-NEXT:    ret
165;
166; RV64IZFINXZDINX-LABEL: test_floor_ui32:
167; RV64IZFINXZDINX:       # %bb.0:
168; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0, rdn
169; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
170; RV64IZFINXZDINX-NEXT:    seqz a0, a0
171; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
172; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
173; RV64IZFINXZDINX-NEXT:    ret
174  %a = call double @llvm.floor.f64(double %x)
175  %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
176  ret i32 %b
177}
178
179define i64 @test_floor_ui64(double %x) nounwind {
180; RV32IFD-LABEL: test_floor_ui64:
181; RV32IFD:       # %bb.0:
182; RV32IFD-NEXT:    addi sp, sp, -16
183; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
184; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
185; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
186; RV32IFD-NEXT:    call floor
187; RV32IFD-NEXT:    lui a0, %hi(.LCPI3_0)
188; RV32IFD-NEXT:    fld fa5, %lo(.LCPI3_0)(a0)
189; RV32IFD-NEXT:    fcvt.d.w fa4, zero
190; RV32IFD-NEXT:    fle.d a0, fa4, fa0
191; RV32IFD-NEXT:    flt.d a1, fa5, fa0
192; RV32IFD-NEXT:    neg s0, a1
193; RV32IFD-NEXT:    neg s1, a0
194; RV32IFD-NEXT:    call __fixunsdfdi
195; RV32IFD-NEXT:    and a0, s1, a0
196; RV32IFD-NEXT:    and a1, s1, a1
197; RV32IFD-NEXT:    or a0, s0, a0
198; RV32IFD-NEXT:    or a1, s0, a1
199; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
200; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
201; RV32IFD-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
202; RV32IFD-NEXT:    addi sp, sp, 16
203; RV32IFD-NEXT:    ret
204;
205; RV64IFD-LABEL: test_floor_ui64:
206; RV64IFD:       # %bb.0:
207; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rdn
208; RV64IFD-NEXT:    feq.d a1, fa0, fa0
209; RV64IFD-NEXT:    seqz a1, a1
210; RV64IFD-NEXT:    addi a1, a1, -1
211; RV64IFD-NEXT:    and a0, a1, a0
212; RV64IFD-NEXT:    ret
213;
214; RV32IZFINXZDINX-LABEL: test_floor_ui64:
215; RV32IZFINXZDINX:       # %bb.0:
216; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
217; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
218; RV32IZFINXZDINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
219; RV32IZFINXZDINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
220; RV32IZFINXZDINX-NEXT:    call floor
221; RV32IZFINXZDINX-NEXT:    mv s0, a0
222; RV32IZFINXZDINX-NEXT:    mv s1, a1
223; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
224; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
225; RV32IZFINXZDINX-NEXT:    lui a4, %hi(.LCPI3_0)
226; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
227; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI3_0+4)(a4)
228; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI3_0)(a4)
229; RV32IZFINXZDINX-NEXT:    neg a2, a2
230; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
231; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
232; RV32IZFINXZDINX-NEXT:    flt.d a2, a4, s0
233; RV32IZFINXZDINX-NEXT:    neg a2, a2
234; RV32IZFINXZDINX-NEXT:    or a0, a2, a0
235; RV32IZFINXZDINX-NEXT:    or a1, a2, a1
236; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
237; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
238; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
239; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
240; RV32IZFINXZDINX-NEXT:    ret
241;
242; RV64IZFINXZDINX-LABEL: test_floor_ui64:
243; RV64IZFINXZDINX:       # %bb.0:
244; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a1, a0, rdn
245; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
246; RV64IZFINXZDINX-NEXT:    seqz a0, a0
247; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
248; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
249; RV64IZFINXZDINX-NEXT:    ret
250  %a = call double @llvm.floor.f64(double %x)
251  %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
252  ret i64 %b
253}
254
255define signext i32 @test_ceil_si32(double %x) {
256; CHECKIFD-LABEL: test_ceil_si32:
257; CHECKIFD:       # %bb.0:
258; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rup
259; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
260; CHECKIFD-NEXT:    seqz a1, a1
261; CHECKIFD-NEXT:    addi a1, a1, -1
262; CHECKIFD-NEXT:    and a0, a1, a0
263; CHECKIFD-NEXT:    ret
264;
265; RV32IZFINXZDINX-LABEL: test_ceil_si32:
266; RV32IZFINXZDINX:       # %bb.0:
267; RV32IZFINXZDINX-NEXT:    fcvt.w.d a2, a0, rup
268; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
269; RV32IZFINXZDINX-NEXT:    seqz a0, a0
270; RV32IZFINXZDINX-NEXT:    addi a0, a0, -1
271; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
272; RV32IZFINXZDINX-NEXT:    ret
273;
274; RV64IZFINXZDINX-LABEL: test_ceil_si32:
275; RV64IZFINXZDINX:       # %bb.0:
276; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0, rup
277; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
278; RV64IZFINXZDINX-NEXT:    seqz a0, a0
279; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
280; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
281; RV64IZFINXZDINX-NEXT:    ret
282  %a = call double @llvm.ceil.f64(double %x)
283  %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
284  ret i32 %b
285}
286
287define i64 @test_ceil_si64(double %x) nounwind {
288; RV32IFD-LABEL: test_ceil_si64:
289; RV32IFD:       # %bb.0:
290; RV32IFD-NEXT:    addi sp, sp, -16
291; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
292; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
293; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
294; RV32IFD-NEXT:    call ceil
295; RV32IFD-NEXT:    lui a0, %hi(.LCPI5_0)
296; RV32IFD-NEXT:    fld fa5, %lo(.LCPI5_0)(a0)
297; RV32IFD-NEXT:    fmv.d fs0, fa0
298; RV32IFD-NEXT:    fle.d s0, fa5, fa0
299; RV32IFD-NEXT:    call __fixdfdi
300; RV32IFD-NEXT:    lui a3, 524288
301; RV32IFD-NEXT:    lui a2, 524288
302; RV32IFD-NEXT:    beqz s0, .LBB5_2
303; RV32IFD-NEXT:  # %bb.1:
304; RV32IFD-NEXT:    mv a2, a1
305; RV32IFD-NEXT:  .LBB5_2:
306; RV32IFD-NEXT:    lui a1, %hi(.LCPI5_1)
307; RV32IFD-NEXT:    fld fa5, %lo(.LCPI5_1)(a1)
308; RV32IFD-NEXT:    flt.d a1, fa5, fs0
309; RV32IFD-NEXT:    beqz a1, .LBB5_4
310; RV32IFD-NEXT:  # %bb.3:
311; RV32IFD-NEXT:    addi a2, a3, -1
312; RV32IFD-NEXT:  .LBB5_4:
313; RV32IFD-NEXT:    feq.d a3, fs0, fs0
314; RV32IFD-NEXT:    neg a4, a1
315; RV32IFD-NEXT:    neg a1, s0
316; RV32IFD-NEXT:    neg a3, a3
317; RV32IFD-NEXT:    and a0, a1, a0
318; RV32IFD-NEXT:    and a1, a3, a2
319; RV32IFD-NEXT:    or a0, a4, a0
320; RV32IFD-NEXT:    and a0, a3, a0
321; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
322; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
323; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
324; RV32IFD-NEXT:    addi sp, sp, 16
325; RV32IFD-NEXT:    ret
326;
327; RV64IFD-LABEL: test_ceil_si64:
328; RV64IFD:       # %bb.0:
329; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
330; RV64IFD-NEXT:    feq.d a1, fa0, fa0
331; RV64IFD-NEXT:    seqz a1, a1
332; RV64IFD-NEXT:    addi a1, a1, -1
333; RV64IFD-NEXT:    and a0, a1, a0
334; RV64IFD-NEXT:    ret
335;
336; RV32IZFINXZDINX-LABEL: test_ceil_si64:
337; RV32IZFINXZDINX:       # %bb.0:
338; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
339; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
340; RV32IZFINXZDINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
341; RV32IZFINXZDINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
342; RV32IZFINXZDINX-NEXT:    call ceil
343; RV32IZFINXZDINX-NEXT:    mv s0, a0
344; RV32IZFINXZDINX-NEXT:    mv s1, a1
345; RV32IZFINXZDINX-NEXT:    call __fixdfdi
346; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI5_0)
347; RV32IZFINXZDINX-NEXT:    lui a3, %hi(.LCPI5_1)
348; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI5_0)(a2)
349; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI5_0+4)(a2)
350; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI5_1)(a3)
351; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI5_1+4)(a3)
352; RV32IZFINXZDINX-NEXT:    fle.d a6, a4, s0
353; RV32IZFINXZDINX-NEXT:    flt.d a3, a2, s0
354; RV32IZFINXZDINX-NEXT:    feq.d a2, s0, s0
355; RV32IZFINXZDINX-NEXT:    lui a4, 524288
356; RV32IZFINXZDINX-NEXT:    neg a2, a2
357; RV32IZFINXZDINX-NEXT:    neg a5, a6
358; RV32IZFINXZDINX-NEXT:    and a0, a5, a0
359; RV32IZFINXZDINX-NEXT:    neg a5, a3
360; RV32IZFINXZDINX-NEXT:    or a0, a5, a0
361; RV32IZFINXZDINX-NEXT:    lui a5, 524288
362; RV32IZFINXZDINX-NEXT:    beqz a6, .LBB5_2
363; RV32IZFINXZDINX-NEXT:  # %bb.1:
364; RV32IZFINXZDINX-NEXT:    mv a5, a1
365; RV32IZFINXZDINX-NEXT:  .LBB5_2:
366; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
367; RV32IZFINXZDINX-NEXT:    beqz a3, .LBB5_4
368; RV32IZFINXZDINX-NEXT:  # %bb.3:
369; RV32IZFINXZDINX-NEXT:    addi a5, a4, -1
370; RV32IZFINXZDINX-NEXT:  .LBB5_4:
371; RV32IZFINXZDINX-NEXT:    and a1, a2, a5
372; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
373; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
374; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
375; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
376; RV32IZFINXZDINX-NEXT:    ret
377;
378; RV64IZFINXZDINX-LABEL: test_ceil_si64:
379; RV64IZFINXZDINX:       # %bb.0:
380; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rup
381; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
382; RV64IZFINXZDINX-NEXT:    seqz a0, a0
383; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
384; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
385; RV64IZFINXZDINX-NEXT:    ret
386  %a = call double @llvm.ceil.f64(double %x)
387  %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
388  ret i64 %b
389}
390
391define signext i32 @test_ceil_ui32(double %x) {
392; CHECKIFD-LABEL: test_ceil_ui32:
393; CHECKIFD:       # %bb.0:
394; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rup
395; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
396; CHECKIFD-NEXT:    seqz a1, a1
397; CHECKIFD-NEXT:    addi a1, a1, -1
398; CHECKIFD-NEXT:    and a0, a1, a0
399; CHECKIFD-NEXT:    ret
400;
401; RV32IZFINXZDINX-LABEL: test_ceil_ui32:
402; RV32IZFINXZDINX:       # %bb.0:
403; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a2, a0, rup
404; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
405; RV32IZFINXZDINX-NEXT:    seqz a0, a0
406; RV32IZFINXZDINX-NEXT:    addi a0, a0, -1
407; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
408; RV32IZFINXZDINX-NEXT:    ret
409;
410; RV64IZFINXZDINX-LABEL: test_ceil_ui32:
411; RV64IZFINXZDINX:       # %bb.0:
412; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0, rup
413; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
414; RV64IZFINXZDINX-NEXT:    seqz a0, a0
415; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
416; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
417; RV64IZFINXZDINX-NEXT:    ret
418  %a = call double @llvm.ceil.f64(double %x)
419  %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
420  ret i32 %b
421}
422
423define i64 @test_ceil_ui64(double %x) nounwind {
424; RV32IFD-LABEL: test_ceil_ui64:
425; RV32IFD:       # %bb.0:
426; RV32IFD-NEXT:    addi sp, sp, -16
427; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
428; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
429; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
430; RV32IFD-NEXT:    call ceil
431; RV32IFD-NEXT:    lui a0, %hi(.LCPI7_0)
432; RV32IFD-NEXT:    fld fa5, %lo(.LCPI7_0)(a0)
433; RV32IFD-NEXT:    fcvt.d.w fa4, zero
434; RV32IFD-NEXT:    fle.d a0, fa4, fa0
435; RV32IFD-NEXT:    flt.d a1, fa5, fa0
436; RV32IFD-NEXT:    neg s0, a1
437; RV32IFD-NEXT:    neg s1, a0
438; RV32IFD-NEXT:    call __fixunsdfdi
439; RV32IFD-NEXT:    and a0, s1, a0
440; RV32IFD-NEXT:    and a1, s1, a1
441; RV32IFD-NEXT:    or a0, s0, a0
442; RV32IFD-NEXT:    or a1, s0, a1
443; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
444; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
445; RV32IFD-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
446; RV32IFD-NEXT:    addi sp, sp, 16
447; RV32IFD-NEXT:    ret
448;
449; RV64IFD-LABEL: test_ceil_ui64:
450; RV64IFD:       # %bb.0:
451; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rup
452; RV64IFD-NEXT:    feq.d a1, fa0, fa0
453; RV64IFD-NEXT:    seqz a1, a1
454; RV64IFD-NEXT:    addi a1, a1, -1
455; RV64IFD-NEXT:    and a0, a1, a0
456; RV64IFD-NEXT:    ret
457;
458; RV32IZFINXZDINX-LABEL: test_ceil_ui64:
459; RV32IZFINXZDINX:       # %bb.0:
460; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
461; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
462; RV32IZFINXZDINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
463; RV32IZFINXZDINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
464; RV32IZFINXZDINX-NEXT:    call ceil
465; RV32IZFINXZDINX-NEXT:    mv s0, a0
466; RV32IZFINXZDINX-NEXT:    mv s1, a1
467; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
468; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
469; RV32IZFINXZDINX-NEXT:    lui a4, %hi(.LCPI7_0)
470; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
471; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI7_0+4)(a4)
472; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI7_0)(a4)
473; RV32IZFINXZDINX-NEXT:    neg a2, a2
474; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
475; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
476; RV32IZFINXZDINX-NEXT:    flt.d a2, a4, s0
477; RV32IZFINXZDINX-NEXT:    neg a2, a2
478; RV32IZFINXZDINX-NEXT:    or a0, a2, a0
479; RV32IZFINXZDINX-NEXT:    or a1, a2, a1
480; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
481; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
482; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
483; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
484; RV32IZFINXZDINX-NEXT:    ret
485;
486; RV64IZFINXZDINX-LABEL: test_ceil_ui64:
487; RV64IZFINXZDINX:       # %bb.0:
488; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a1, a0, rup
489; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
490; RV64IZFINXZDINX-NEXT:    seqz a0, a0
491; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
492; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
493; RV64IZFINXZDINX-NEXT:    ret
494  %a = call double @llvm.ceil.f64(double %x)
495  %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
496  ret i64 %b
497}
498
499define signext i32 @test_trunc_si32(double %x) {
500; CHECKIFD-LABEL: test_trunc_si32:
501; CHECKIFD:       # %bb.0:
502; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rtz
503; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
504; CHECKIFD-NEXT:    seqz a1, a1
505; CHECKIFD-NEXT:    addi a1, a1, -1
506; CHECKIFD-NEXT:    and a0, a1, a0
507; CHECKIFD-NEXT:    ret
508;
509; RV32IZFINXZDINX-LABEL: test_trunc_si32:
510; RV32IZFINXZDINX:       # %bb.0:
511; RV32IZFINXZDINX-NEXT:    fcvt.w.d a2, a0, rtz
512; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
513; RV32IZFINXZDINX-NEXT:    seqz a0, a0
514; RV32IZFINXZDINX-NEXT:    addi a0, a0, -1
515; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
516; RV32IZFINXZDINX-NEXT:    ret
517;
518; RV64IZFINXZDINX-LABEL: test_trunc_si32:
519; RV64IZFINXZDINX:       # %bb.0:
520; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0, rtz
521; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
522; RV64IZFINXZDINX-NEXT:    seqz a0, a0
523; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
524; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
525; RV64IZFINXZDINX-NEXT:    ret
526  %a = call double @llvm.trunc.f64(double %x)
527  %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
528  ret i32 %b
529}
530
531define i64 @test_trunc_si64(double %x) nounwind {
532; RV32IFD-LABEL: test_trunc_si64:
533; RV32IFD:       # %bb.0:
534; RV32IFD-NEXT:    addi sp, sp, -16
535; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
536; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
537; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
538; RV32IFD-NEXT:    call trunc
539; RV32IFD-NEXT:    lui a0, %hi(.LCPI9_0)
540; RV32IFD-NEXT:    fld fa5, %lo(.LCPI9_0)(a0)
541; RV32IFD-NEXT:    fmv.d fs0, fa0
542; RV32IFD-NEXT:    fle.d s0, fa5, fa0
543; RV32IFD-NEXT:    call __fixdfdi
544; RV32IFD-NEXT:    lui a3, 524288
545; RV32IFD-NEXT:    lui a2, 524288
546; RV32IFD-NEXT:    beqz s0, .LBB9_2
547; RV32IFD-NEXT:  # %bb.1:
548; RV32IFD-NEXT:    mv a2, a1
549; RV32IFD-NEXT:  .LBB9_2:
550; RV32IFD-NEXT:    lui a1, %hi(.LCPI9_1)
551; RV32IFD-NEXT:    fld fa5, %lo(.LCPI9_1)(a1)
552; RV32IFD-NEXT:    flt.d a1, fa5, fs0
553; RV32IFD-NEXT:    beqz a1, .LBB9_4
554; RV32IFD-NEXT:  # %bb.3:
555; RV32IFD-NEXT:    addi a2, a3, -1
556; RV32IFD-NEXT:  .LBB9_4:
557; RV32IFD-NEXT:    feq.d a3, fs0, fs0
558; RV32IFD-NEXT:    neg a4, a1
559; RV32IFD-NEXT:    neg a1, s0
560; RV32IFD-NEXT:    neg a3, a3
561; RV32IFD-NEXT:    and a0, a1, a0
562; RV32IFD-NEXT:    and a1, a3, a2
563; RV32IFD-NEXT:    or a0, a4, a0
564; RV32IFD-NEXT:    and a0, a3, a0
565; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
566; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
567; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
568; RV32IFD-NEXT:    addi sp, sp, 16
569; RV32IFD-NEXT:    ret
570;
571; RV64IFD-LABEL: test_trunc_si64:
572; RV64IFD:       # %bb.0:
573; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
574; RV64IFD-NEXT:    feq.d a1, fa0, fa0
575; RV64IFD-NEXT:    seqz a1, a1
576; RV64IFD-NEXT:    addi a1, a1, -1
577; RV64IFD-NEXT:    and a0, a1, a0
578; RV64IFD-NEXT:    ret
579;
580; RV32IZFINXZDINX-LABEL: test_trunc_si64:
581; RV32IZFINXZDINX:       # %bb.0:
582; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
583; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
584; RV32IZFINXZDINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
585; RV32IZFINXZDINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
586; RV32IZFINXZDINX-NEXT:    call trunc
587; RV32IZFINXZDINX-NEXT:    mv s0, a0
588; RV32IZFINXZDINX-NEXT:    mv s1, a1
589; RV32IZFINXZDINX-NEXT:    call __fixdfdi
590; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI9_0)
591; RV32IZFINXZDINX-NEXT:    lui a3, %hi(.LCPI9_1)
592; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI9_0)(a2)
593; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI9_0+4)(a2)
594; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI9_1)(a3)
595; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI9_1+4)(a3)
596; RV32IZFINXZDINX-NEXT:    fle.d a6, a4, s0
597; RV32IZFINXZDINX-NEXT:    flt.d a3, a2, s0
598; RV32IZFINXZDINX-NEXT:    feq.d a2, s0, s0
599; RV32IZFINXZDINX-NEXT:    lui a4, 524288
600; RV32IZFINXZDINX-NEXT:    neg a2, a2
601; RV32IZFINXZDINX-NEXT:    neg a5, a6
602; RV32IZFINXZDINX-NEXT:    and a0, a5, a0
603; RV32IZFINXZDINX-NEXT:    neg a5, a3
604; RV32IZFINXZDINX-NEXT:    or a0, a5, a0
605; RV32IZFINXZDINX-NEXT:    lui a5, 524288
606; RV32IZFINXZDINX-NEXT:    beqz a6, .LBB9_2
607; RV32IZFINXZDINX-NEXT:  # %bb.1:
608; RV32IZFINXZDINX-NEXT:    mv a5, a1
609; RV32IZFINXZDINX-NEXT:  .LBB9_2:
610; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
611; RV32IZFINXZDINX-NEXT:    beqz a3, .LBB9_4
612; RV32IZFINXZDINX-NEXT:  # %bb.3:
613; RV32IZFINXZDINX-NEXT:    addi a5, a4, -1
614; RV32IZFINXZDINX-NEXT:  .LBB9_4:
615; RV32IZFINXZDINX-NEXT:    and a1, a2, a5
616; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
617; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
618; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
619; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
620; RV32IZFINXZDINX-NEXT:    ret
621;
622; RV64IZFINXZDINX-LABEL: test_trunc_si64:
623; RV64IZFINXZDINX:       # %bb.0:
624; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rtz
625; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
626; RV64IZFINXZDINX-NEXT:    seqz a0, a0
627; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
628; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
629; RV64IZFINXZDINX-NEXT:    ret
630  %a = call double @llvm.trunc.f64(double %x)
631  %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
632  ret i64 %b
633}
634
635define signext i32 @test_trunc_ui32(double %x) {
636; CHECKIFD-LABEL: test_trunc_ui32:
637; CHECKIFD:       # %bb.0:
638; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rtz
639; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
640; CHECKIFD-NEXT:    seqz a1, a1
641; CHECKIFD-NEXT:    addi a1, a1, -1
642; CHECKIFD-NEXT:    and a0, a1, a0
643; CHECKIFD-NEXT:    ret
644;
645; RV32IZFINXZDINX-LABEL: test_trunc_ui32:
646; RV32IZFINXZDINX:       # %bb.0:
647; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a2, a0, rtz
648; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
649; RV32IZFINXZDINX-NEXT:    seqz a0, a0
650; RV32IZFINXZDINX-NEXT:    addi a0, a0, -1
651; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
652; RV32IZFINXZDINX-NEXT:    ret
653;
654; RV64IZFINXZDINX-LABEL: test_trunc_ui32:
655; RV64IZFINXZDINX:       # %bb.0:
656; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0, rtz
657; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
658; RV64IZFINXZDINX-NEXT:    seqz a0, a0
659; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
660; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
661; RV64IZFINXZDINX-NEXT:    ret
662  %a = call double @llvm.trunc.f64(double %x)
663  %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
664  ret i32 %b
665}
666
667define i64 @test_trunc_ui64(double %x) nounwind {
668; RV32IFD-LABEL: test_trunc_ui64:
669; RV32IFD:       # %bb.0:
670; RV32IFD-NEXT:    addi sp, sp, -16
671; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
672; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
673; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
674; RV32IFD-NEXT:    call trunc
675; RV32IFD-NEXT:    lui a0, %hi(.LCPI11_0)
676; RV32IFD-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
677; RV32IFD-NEXT:    fcvt.d.w fa4, zero
678; RV32IFD-NEXT:    fle.d a0, fa4, fa0
679; RV32IFD-NEXT:    flt.d a1, fa5, fa0
680; RV32IFD-NEXT:    neg s0, a1
681; RV32IFD-NEXT:    neg s1, a0
682; RV32IFD-NEXT:    call __fixunsdfdi
683; RV32IFD-NEXT:    and a0, s1, a0
684; RV32IFD-NEXT:    and a1, s1, a1
685; RV32IFD-NEXT:    or a0, s0, a0
686; RV32IFD-NEXT:    or a1, s0, a1
687; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
688; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
689; RV32IFD-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
690; RV32IFD-NEXT:    addi sp, sp, 16
691; RV32IFD-NEXT:    ret
692;
693; RV64IFD-LABEL: test_trunc_ui64:
694; RV64IFD:       # %bb.0:
695; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
696; RV64IFD-NEXT:    feq.d a1, fa0, fa0
697; RV64IFD-NEXT:    seqz a1, a1
698; RV64IFD-NEXT:    addi a1, a1, -1
699; RV64IFD-NEXT:    and a0, a1, a0
700; RV64IFD-NEXT:    ret
701;
702; RV32IZFINXZDINX-LABEL: test_trunc_ui64:
703; RV32IZFINXZDINX:       # %bb.0:
704; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
705; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
706; RV32IZFINXZDINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
707; RV32IZFINXZDINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
708; RV32IZFINXZDINX-NEXT:    call trunc
709; RV32IZFINXZDINX-NEXT:    mv s0, a0
710; RV32IZFINXZDINX-NEXT:    mv s1, a1
711; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
712; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
713; RV32IZFINXZDINX-NEXT:    lui a4, %hi(.LCPI11_0)
714; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
715; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI11_0+4)(a4)
716; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI11_0)(a4)
717; RV32IZFINXZDINX-NEXT:    neg a2, a2
718; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
719; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
720; RV32IZFINXZDINX-NEXT:    flt.d a2, a4, s0
721; RV32IZFINXZDINX-NEXT:    neg a2, a2
722; RV32IZFINXZDINX-NEXT:    or a0, a2, a0
723; RV32IZFINXZDINX-NEXT:    or a1, a2, a1
724; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
725; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
726; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
727; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
728; RV32IZFINXZDINX-NEXT:    ret
729;
730; RV64IZFINXZDINX-LABEL: test_trunc_ui64:
731; RV64IZFINXZDINX:       # %bb.0:
732; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a1, a0, rtz
733; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
734; RV64IZFINXZDINX-NEXT:    seqz a0, a0
735; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
736; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
737; RV64IZFINXZDINX-NEXT:    ret
738  %a = call double @llvm.trunc.f64(double %x)
739  %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
740  ret i64 %b
741}
742
743define signext i32 @test_round_si32(double %x) {
744; CHECKIFD-LABEL: test_round_si32:
745; CHECKIFD:       # %bb.0:
746; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rmm
747; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
748; CHECKIFD-NEXT:    seqz a1, a1
749; CHECKIFD-NEXT:    addi a1, a1, -1
750; CHECKIFD-NEXT:    and a0, a1, a0
751; CHECKIFD-NEXT:    ret
752;
753; RV32IZFINXZDINX-LABEL: test_round_si32:
754; RV32IZFINXZDINX:       # %bb.0:
755; RV32IZFINXZDINX-NEXT:    fcvt.w.d a2, a0, rmm
756; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
757; RV32IZFINXZDINX-NEXT:    seqz a0, a0
758; RV32IZFINXZDINX-NEXT:    addi a0, a0, -1
759; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
760; RV32IZFINXZDINX-NEXT:    ret
761;
762; RV64IZFINXZDINX-LABEL: test_round_si32:
763; RV64IZFINXZDINX:       # %bb.0:
764; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0, rmm
765; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
766; RV64IZFINXZDINX-NEXT:    seqz a0, a0
767; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
768; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
769; RV64IZFINXZDINX-NEXT:    ret
770  %a = call double @llvm.round.f64(double %x)
771  %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
772  ret i32 %b
773}
774
775define i64 @test_round_si64(double %x) nounwind {
776; RV32IFD-LABEL: test_round_si64:
777; RV32IFD:       # %bb.0:
778; RV32IFD-NEXT:    addi sp, sp, -16
779; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
780; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
781; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
782; RV32IFD-NEXT:    call round
783; RV32IFD-NEXT:    lui a0, %hi(.LCPI13_0)
784; RV32IFD-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
785; RV32IFD-NEXT:    fmv.d fs0, fa0
786; RV32IFD-NEXT:    fle.d s0, fa5, fa0
787; RV32IFD-NEXT:    call __fixdfdi
788; RV32IFD-NEXT:    lui a3, 524288
789; RV32IFD-NEXT:    lui a2, 524288
790; RV32IFD-NEXT:    beqz s0, .LBB13_2
791; RV32IFD-NEXT:  # %bb.1:
792; RV32IFD-NEXT:    mv a2, a1
793; RV32IFD-NEXT:  .LBB13_2:
794; RV32IFD-NEXT:    lui a1, %hi(.LCPI13_1)
795; RV32IFD-NEXT:    fld fa5, %lo(.LCPI13_1)(a1)
796; RV32IFD-NEXT:    flt.d a1, fa5, fs0
797; RV32IFD-NEXT:    beqz a1, .LBB13_4
798; RV32IFD-NEXT:  # %bb.3:
799; RV32IFD-NEXT:    addi a2, a3, -1
800; RV32IFD-NEXT:  .LBB13_4:
801; RV32IFD-NEXT:    feq.d a3, fs0, fs0
802; RV32IFD-NEXT:    neg a4, a1
803; RV32IFD-NEXT:    neg a1, s0
804; RV32IFD-NEXT:    neg a3, a3
805; RV32IFD-NEXT:    and a0, a1, a0
806; RV32IFD-NEXT:    and a1, a3, a2
807; RV32IFD-NEXT:    or a0, a4, a0
808; RV32IFD-NEXT:    and a0, a3, a0
809; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
810; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
811; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
812; RV32IFD-NEXT:    addi sp, sp, 16
813; RV32IFD-NEXT:    ret
814;
815; RV64IFD-LABEL: test_round_si64:
816; RV64IFD:       # %bb.0:
817; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
818; RV64IFD-NEXT:    feq.d a1, fa0, fa0
819; RV64IFD-NEXT:    seqz a1, a1
820; RV64IFD-NEXT:    addi a1, a1, -1
821; RV64IFD-NEXT:    and a0, a1, a0
822; RV64IFD-NEXT:    ret
823;
824; RV32IZFINXZDINX-LABEL: test_round_si64:
825; RV32IZFINXZDINX:       # %bb.0:
826; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
827; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
828; RV32IZFINXZDINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
829; RV32IZFINXZDINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
830; RV32IZFINXZDINX-NEXT:    call round
831; RV32IZFINXZDINX-NEXT:    mv s0, a0
832; RV32IZFINXZDINX-NEXT:    mv s1, a1
833; RV32IZFINXZDINX-NEXT:    call __fixdfdi
834; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI13_0)
835; RV32IZFINXZDINX-NEXT:    lui a3, %hi(.LCPI13_1)
836; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI13_0)(a2)
837; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI13_0+4)(a2)
838; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI13_1)(a3)
839; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI13_1+4)(a3)
840; RV32IZFINXZDINX-NEXT:    fle.d a6, a4, s0
841; RV32IZFINXZDINX-NEXT:    flt.d a3, a2, s0
842; RV32IZFINXZDINX-NEXT:    feq.d a2, s0, s0
843; RV32IZFINXZDINX-NEXT:    lui a4, 524288
844; RV32IZFINXZDINX-NEXT:    neg a2, a2
845; RV32IZFINXZDINX-NEXT:    neg a5, a6
846; RV32IZFINXZDINX-NEXT:    and a0, a5, a0
847; RV32IZFINXZDINX-NEXT:    neg a5, a3
848; RV32IZFINXZDINX-NEXT:    or a0, a5, a0
849; RV32IZFINXZDINX-NEXT:    lui a5, 524288
850; RV32IZFINXZDINX-NEXT:    beqz a6, .LBB13_2
851; RV32IZFINXZDINX-NEXT:  # %bb.1:
852; RV32IZFINXZDINX-NEXT:    mv a5, a1
853; RV32IZFINXZDINX-NEXT:  .LBB13_2:
854; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
855; RV32IZFINXZDINX-NEXT:    beqz a3, .LBB13_4
856; RV32IZFINXZDINX-NEXT:  # %bb.3:
857; RV32IZFINXZDINX-NEXT:    addi a5, a4, -1
858; RV32IZFINXZDINX-NEXT:  .LBB13_4:
859; RV32IZFINXZDINX-NEXT:    and a1, a2, a5
860; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
861; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
862; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
863; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
864; RV32IZFINXZDINX-NEXT:    ret
865;
866; RV64IZFINXZDINX-LABEL: test_round_si64:
867; RV64IZFINXZDINX:       # %bb.0:
868; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rmm
869; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
870; RV64IZFINXZDINX-NEXT:    seqz a0, a0
871; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
872; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
873; RV64IZFINXZDINX-NEXT:    ret
874  %a = call double @llvm.round.f64(double %x)
875  %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
876  ret i64 %b
877}
878
879define signext i32 @test_round_ui32(double %x) {
880; CHECKIFD-LABEL: test_round_ui32:
881; CHECKIFD:       # %bb.0:
882; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rmm
883; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
884; CHECKIFD-NEXT:    seqz a1, a1
885; CHECKIFD-NEXT:    addi a1, a1, -1
886; CHECKIFD-NEXT:    and a0, a1, a0
887; CHECKIFD-NEXT:    ret
888;
889; RV32IZFINXZDINX-LABEL: test_round_ui32:
890; RV32IZFINXZDINX:       # %bb.0:
891; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a2, a0, rmm
892; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
893; RV32IZFINXZDINX-NEXT:    seqz a0, a0
894; RV32IZFINXZDINX-NEXT:    addi a0, a0, -1
895; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
896; RV32IZFINXZDINX-NEXT:    ret
897;
898; RV64IZFINXZDINX-LABEL: test_round_ui32:
899; RV64IZFINXZDINX:       # %bb.0:
900; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0, rmm
901; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
902; RV64IZFINXZDINX-NEXT:    seqz a0, a0
903; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
904; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
905; RV64IZFINXZDINX-NEXT:    ret
906  %a = call double @llvm.round.f64(double %x)
907  %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
908  ret i32 %b
909}
910
911define i64 @test_round_ui64(double %x) nounwind {
912; RV32IFD-LABEL: test_round_ui64:
913; RV32IFD:       # %bb.0:
914; RV32IFD-NEXT:    addi sp, sp, -16
915; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
916; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
917; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
918; RV32IFD-NEXT:    call round
919; RV32IFD-NEXT:    lui a0, %hi(.LCPI15_0)
920; RV32IFD-NEXT:    fld fa5, %lo(.LCPI15_0)(a0)
921; RV32IFD-NEXT:    fcvt.d.w fa4, zero
922; RV32IFD-NEXT:    fle.d a0, fa4, fa0
923; RV32IFD-NEXT:    flt.d a1, fa5, fa0
924; RV32IFD-NEXT:    neg s0, a1
925; RV32IFD-NEXT:    neg s1, a0
926; RV32IFD-NEXT:    call __fixunsdfdi
927; RV32IFD-NEXT:    and a0, s1, a0
928; RV32IFD-NEXT:    and a1, s1, a1
929; RV32IFD-NEXT:    or a0, s0, a0
930; RV32IFD-NEXT:    or a1, s0, a1
931; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
932; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
933; RV32IFD-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
934; RV32IFD-NEXT:    addi sp, sp, 16
935; RV32IFD-NEXT:    ret
936;
937; RV64IFD-LABEL: test_round_ui64:
938; RV64IFD:       # %bb.0:
939; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rmm
940; RV64IFD-NEXT:    feq.d a1, fa0, fa0
941; RV64IFD-NEXT:    seqz a1, a1
942; RV64IFD-NEXT:    addi a1, a1, -1
943; RV64IFD-NEXT:    and a0, a1, a0
944; RV64IFD-NEXT:    ret
945;
946; RV32IZFINXZDINX-LABEL: test_round_ui64:
947; RV32IZFINXZDINX:       # %bb.0:
948; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
949; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
950; RV32IZFINXZDINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
951; RV32IZFINXZDINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
952; RV32IZFINXZDINX-NEXT:    call round
953; RV32IZFINXZDINX-NEXT:    mv s0, a0
954; RV32IZFINXZDINX-NEXT:    mv s1, a1
955; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
956; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
957; RV32IZFINXZDINX-NEXT:    lui a4, %hi(.LCPI15_0)
958; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
959; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI15_0+4)(a4)
960; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI15_0)(a4)
961; RV32IZFINXZDINX-NEXT:    neg a2, a2
962; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
963; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
964; RV32IZFINXZDINX-NEXT:    flt.d a2, a4, s0
965; RV32IZFINXZDINX-NEXT:    neg a2, a2
966; RV32IZFINXZDINX-NEXT:    or a0, a2, a0
967; RV32IZFINXZDINX-NEXT:    or a1, a2, a1
968; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
969; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
970; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
971; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
972; RV32IZFINXZDINX-NEXT:    ret
973;
974; RV64IZFINXZDINX-LABEL: test_round_ui64:
975; RV64IZFINXZDINX:       # %bb.0:
976; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a1, a0, rmm
977; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
978; RV64IZFINXZDINX-NEXT:    seqz a0, a0
979; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
980; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
981; RV64IZFINXZDINX-NEXT:    ret
982  %a = call double @llvm.round.f64(double %x)
983  %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
984  ret i64 %b
985}
986
987define signext i32 @test_roundeven_si32(double %x) {
988; CHECKIFD-LABEL: test_roundeven_si32:
989; CHECKIFD:       # %bb.0:
990; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rne
991; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
992; CHECKIFD-NEXT:    seqz a1, a1
993; CHECKIFD-NEXT:    addi a1, a1, -1
994; CHECKIFD-NEXT:    and a0, a1, a0
995; CHECKIFD-NEXT:    ret
996;
997; RV32IZFINXZDINX-LABEL: test_roundeven_si32:
998; RV32IZFINXZDINX:       # %bb.0:
999; RV32IZFINXZDINX-NEXT:    fcvt.w.d a2, a0, rne
1000; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
1001; RV32IZFINXZDINX-NEXT:    seqz a0, a0
1002; RV32IZFINXZDINX-NEXT:    addi a0, a0, -1
1003; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
1004; RV32IZFINXZDINX-NEXT:    ret
1005;
1006; RV64IZFINXZDINX-LABEL: test_roundeven_si32:
1007; RV64IZFINXZDINX:       # %bb.0:
1008; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0, rne
1009; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
1010; RV64IZFINXZDINX-NEXT:    seqz a0, a0
1011; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
1012; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
1013; RV64IZFINXZDINX-NEXT:    ret
1014  %a = call double @llvm.roundeven.f64(double %x)
1015  %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
1016  ret i32 %b
1017}
1018
1019define i64 @test_roundeven_si64(double %x) nounwind {
1020; RV32IFD-LABEL: test_roundeven_si64:
1021; RV32IFD:       # %bb.0:
1022; RV32IFD-NEXT:    addi sp, sp, -16
1023; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1024; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1025; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
1026; RV32IFD-NEXT:    call roundeven
1027; RV32IFD-NEXT:    lui a0, %hi(.LCPI17_0)
1028; RV32IFD-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
1029; RV32IFD-NEXT:    fmv.d fs0, fa0
1030; RV32IFD-NEXT:    fle.d s0, fa5, fa0
1031; RV32IFD-NEXT:    call __fixdfdi
1032; RV32IFD-NEXT:    lui a3, 524288
1033; RV32IFD-NEXT:    lui a2, 524288
1034; RV32IFD-NEXT:    beqz s0, .LBB17_2
1035; RV32IFD-NEXT:  # %bb.1:
1036; RV32IFD-NEXT:    mv a2, a1
1037; RV32IFD-NEXT:  .LBB17_2:
1038; RV32IFD-NEXT:    lui a1, %hi(.LCPI17_1)
1039; RV32IFD-NEXT:    fld fa5, %lo(.LCPI17_1)(a1)
1040; RV32IFD-NEXT:    flt.d a1, fa5, fs0
1041; RV32IFD-NEXT:    beqz a1, .LBB17_4
1042; RV32IFD-NEXT:  # %bb.3:
1043; RV32IFD-NEXT:    addi a2, a3, -1
1044; RV32IFD-NEXT:  .LBB17_4:
1045; RV32IFD-NEXT:    feq.d a3, fs0, fs0
1046; RV32IFD-NEXT:    neg a4, a1
1047; RV32IFD-NEXT:    neg a1, s0
1048; RV32IFD-NEXT:    neg a3, a3
1049; RV32IFD-NEXT:    and a0, a1, a0
1050; RV32IFD-NEXT:    and a1, a3, a2
1051; RV32IFD-NEXT:    or a0, a4, a0
1052; RV32IFD-NEXT:    and a0, a3, a0
1053; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1054; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1055; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
1056; RV32IFD-NEXT:    addi sp, sp, 16
1057; RV32IFD-NEXT:    ret
1058;
1059; RV64IFD-LABEL: test_roundeven_si64:
1060; RV64IFD:       # %bb.0:
1061; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
1062; RV64IFD-NEXT:    feq.d a1, fa0, fa0
1063; RV64IFD-NEXT:    seqz a1, a1
1064; RV64IFD-NEXT:    addi a1, a1, -1
1065; RV64IFD-NEXT:    and a0, a1, a0
1066; RV64IFD-NEXT:    ret
1067;
1068; RV32IZFINXZDINX-LABEL: test_roundeven_si64:
1069; RV32IZFINXZDINX:       # %bb.0:
1070; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1071; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1072; RV32IZFINXZDINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1073; RV32IZFINXZDINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1074; RV32IZFINXZDINX-NEXT:    call roundeven
1075; RV32IZFINXZDINX-NEXT:    mv s0, a0
1076; RV32IZFINXZDINX-NEXT:    mv s1, a1
1077; RV32IZFINXZDINX-NEXT:    call __fixdfdi
1078; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI17_0)
1079; RV32IZFINXZDINX-NEXT:    lui a3, %hi(.LCPI17_1)
1080; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI17_0)(a2)
1081; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI17_0+4)(a2)
1082; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI17_1)(a3)
1083; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI17_1+4)(a3)
1084; RV32IZFINXZDINX-NEXT:    fle.d a6, a4, s0
1085; RV32IZFINXZDINX-NEXT:    flt.d a3, a2, s0
1086; RV32IZFINXZDINX-NEXT:    feq.d a2, s0, s0
1087; RV32IZFINXZDINX-NEXT:    lui a4, 524288
1088; RV32IZFINXZDINX-NEXT:    neg a2, a2
1089; RV32IZFINXZDINX-NEXT:    neg a5, a6
1090; RV32IZFINXZDINX-NEXT:    and a0, a5, a0
1091; RV32IZFINXZDINX-NEXT:    neg a5, a3
1092; RV32IZFINXZDINX-NEXT:    or a0, a5, a0
1093; RV32IZFINXZDINX-NEXT:    lui a5, 524288
1094; RV32IZFINXZDINX-NEXT:    beqz a6, .LBB17_2
1095; RV32IZFINXZDINX-NEXT:  # %bb.1:
1096; RV32IZFINXZDINX-NEXT:    mv a5, a1
1097; RV32IZFINXZDINX-NEXT:  .LBB17_2:
1098; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
1099; RV32IZFINXZDINX-NEXT:    beqz a3, .LBB17_4
1100; RV32IZFINXZDINX-NEXT:  # %bb.3:
1101; RV32IZFINXZDINX-NEXT:    addi a5, a4, -1
1102; RV32IZFINXZDINX-NEXT:  .LBB17_4:
1103; RV32IZFINXZDINX-NEXT:    and a1, a2, a5
1104; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1105; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1106; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1107; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1108; RV32IZFINXZDINX-NEXT:    ret
1109;
1110; RV64IZFINXZDINX-LABEL: test_roundeven_si64:
1111; RV64IZFINXZDINX:       # %bb.0:
1112; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rne
1113; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
1114; RV64IZFINXZDINX-NEXT:    seqz a0, a0
1115; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
1116; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
1117; RV64IZFINXZDINX-NEXT:    ret
1118  %a = call double @llvm.roundeven.f64(double %x)
1119  %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
1120  ret i64 %b
1121}
1122
1123define signext i32 @test_roundeven_ui32(double %x) {
1124; CHECKIFD-LABEL: test_roundeven_ui32:
1125; CHECKIFD:       # %bb.0:
1126; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rne
1127; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
1128; CHECKIFD-NEXT:    seqz a1, a1
1129; CHECKIFD-NEXT:    addi a1, a1, -1
1130; CHECKIFD-NEXT:    and a0, a1, a0
1131; CHECKIFD-NEXT:    ret
1132;
1133; RV32IZFINXZDINX-LABEL: test_roundeven_ui32:
1134; RV32IZFINXZDINX:       # %bb.0:
1135; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a2, a0, rne
1136; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
1137; RV32IZFINXZDINX-NEXT:    seqz a0, a0
1138; RV32IZFINXZDINX-NEXT:    addi a0, a0, -1
1139; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
1140; RV32IZFINXZDINX-NEXT:    ret
1141;
1142; RV64IZFINXZDINX-LABEL: test_roundeven_ui32:
1143; RV64IZFINXZDINX:       # %bb.0:
1144; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0, rne
1145; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
1146; RV64IZFINXZDINX-NEXT:    seqz a0, a0
1147; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
1148; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
1149; RV64IZFINXZDINX-NEXT:    ret
1150  %a = call double @llvm.roundeven.f64(double %x)
1151  %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
1152  ret i32 %b
1153}
1154
1155define i64 @test_roundeven_ui64(double %x) nounwind {
1156; RV32IFD-LABEL: test_roundeven_ui64:
1157; RV32IFD:       # %bb.0:
1158; RV32IFD-NEXT:    addi sp, sp, -16
1159; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1160; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1161; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1162; RV32IFD-NEXT:    call roundeven
1163; RV32IFD-NEXT:    lui a0, %hi(.LCPI19_0)
1164; RV32IFD-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
1165; RV32IFD-NEXT:    fcvt.d.w fa4, zero
1166; RV32IFD-NEXT:    fle.d a0, fa4, fa0
1167; RV32IFD-NEXT:    flt.d a1, fa5, fa0
1168; RV32IFD-NEXT:    neg s0, a1
1169; RV32IFD-NEXT:    neg s1, a0
1170; RV32IFD-NEXT:    call __fixunsdfdi
1171; RV32IFD-NEXT:    and a0, s1, a0
1172; RV32IFD-NEXT:    and a1, s1, a1
1173; RV32IFD-NEXT:    or a0, s0, a0
1174; RV32IFD-NEXT:    or a1, s0, a1
1175; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1176; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1177; RV32IFD-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1178; RV32IFD-NEXT:    addi sp, sp, 16
1179; RV32IFD-NEXT:    ret
1180;
1181; RV64IFD-LABEL: test_roundeven_ui64:
1182; RV64IFD:       # %bb.0:
1183; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rne
1184; RV64IFD-NEXT:    feq.d a1, fa0, fa0
1185; RV64IFD-NEXT:    seqz a1, a1
1186; RV64IFD-NEXT:    addi a1, a1, -1
1187; RV64IFD-NEXT:    and a0, a1, a0
1188; RV64IFD-NEXT:    ret
1189;
1190; RV32IZFINXZDINX-LABEL: test_roundeven_ui64:
1191; RV32IZFINXZDINX:       # %bb.0:
1192; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1193; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1194; RV32IZFINXZDINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1195; RV32IZFINXZDINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1196; RV32IZFINXZDINX-NEXT:    call roundeven
1197; RV32IZFINXZDINX-NEXT:    mv s0, a0
1198; RV32IZFINXZDINX-NEXT:    mv s1, a1
1199; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
1200; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
1201; RV32IZFINXZDINX-NEXT:    lui a4, %hi(.LCPI19_0)
1202; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
1203; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI19_0+4)(a4)
1204; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI19_0)(a4)
1205; RV32IZFINXZDINX-NEXT:    neg a2, a2
1206; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
1207; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
1208; RV32IZFINXZDINX-NEXT:    flt.d a2, a4, s0
1209; RV32IZFINXZDINX-NEXT:    neg a2, a2
1210; RV32IZFINXZDINX-NEXT:    or a0, a2, a0
1211; RV32IZFINXZDINX-NEXT:    or a1, a2, a1
1212; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1213; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1214; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1215; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1216; RV32IZFINXZDINX-NEXT:    ret
1217;
1218; RV64IZFINXZDINX-LABEL: test_roundeven_ui64:
1219; RV64IZFINXZDINX:       # %bb.0:
1220; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a1, a0, rne
1221; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
1222; RV64IZFINXZDINX-NEXT:    seqz a0, a0
1223; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
1224; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
1225; RV64IZFINXZDINX-NEXT:    ret
1226  %a = call double @llvm.roundeven.f64(double %x)
1227  %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
1228  ret i64 %b
1229}
1230
1231define signext i32 @test_rint_si32(double %x) {
1232; CHECKIFD-LABEL: test_rint_si32:
1233; CHECKIFD:       # %bb.0:
1234; CHECKIFD-NEXT:    fcvt.w.d a0, fa0
1235; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
1236; CHECKIFD-NEXT:    seqz a1, a1
1237; CHECKIFD-NEXT:    addi a1, a1, -1
1238; CHECKIFD-NEXT:    and a0, a1, a0
1239; CHECKIFD-NEXT:    ret
1240;
1241; RV32IZFINXZDINX-LABEL: test_rint_si32:
1242; RV32IZFINXZDINX:       # %bb.0:
1243; RV32IZFINXZDINX-NEXT:    fcvt.w.d a2, a0
1244; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
1245; RV32IZFINXZDINX-NEXT:    seqz a0, a0
1246; RV32IZFINXZDINX-NEXT:    addi a0, a0, -1
1247; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
1248; RV32IZFINXZDINX-NEXT:    ret
1249;
1250; RV64IZFINXZDINX-LABEL: test_rint_si32:
1251; RV64IZFINXZDINX:       # %bb.0:
1252; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0
1253; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
1254; RV64IZFINXZDINX-NEXT:    seqz a0, a0
1255; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
1256; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
1257; RV64IZFINXZDINX-NEXT:    ret
1258  %a = call double @llvm.rint.f64(double %x)
1259  %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
1260  ret i32 %b
1261}
1262
1263define i64 @test_rint_si64(double %x) nounwind {
1264; RV32IFD-LABEL: test_rint_si64:
1265; RV32IFD:       # %bb.0:
1266; RV32IFD-NEXT:    addi sp, sp, -16
1267; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1268; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1269; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
1270; RV32IFD-NEXT:    call rint
1271; RV32IFD-NEXT:    lui a0, %hi(.LCPI21_0)
1272; RV32IFD-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
1273; RV32IFD-NEXT:    fmv.d fs0, fa0
1274; RV32IFD-NEXT:    fle.d s0, fa5, fa0
1275; RV32IFD-NEXT:    call __fixdfdi
1276; RV32IFD-NEXT:    lui a3, 524288
1277; RV32IFD-NEXT:    lui a2, 524288
1278; RV32IFD-NEXT:    beqz s0, .LBB21_2
1279; RV32IFD-NEXT:  # %bb.1:
1280; RV32IFD-NEXT:    mv a2, a1
1281; RV32IFD-NEXT:  .LBB21_2:
1282; RV32IFD-NEXT:    lui a1, %hi(.LCPI21_1)
1283; RV32IFD-NEXT:    fld fa5, %lo(.LCPI21_1)(a1)
1284; RV32IFD-NEXT:    flt.d a1, fa5, fs0
1285; RV32IFD-NEXT:    beqz a1, .LBB21_4
1286; RV32IFD-NEXT:  # %bb.3:
1287; RV32IFD-NEXT:    addi a2, a3, -1
1288; RV32IFD-NEXT:  .LBB21_4:
1289; RV32IFD-NEXT:    feq.d a3, fs0, fs0
1290; RV32IFD-NEXT:    neg a4, a1
1291; RV32IFD-NEXT:    neg a1, s0
1292; RV32IFD-NEXT:    neg a3, a3
1293; RV32IFD-NEXT:    and a0, a1, a0
1294; RV32IFD-NEXT:    and a1, a3, a2
1295; RV32IFD-NEXT:    or a0, a4, a0
1296; RV32IFD-NEXT:    and a0, a3, a0
1297; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1298; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1299; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
1300; RV32IFD-NEXT:    addi sp, sp, 16
1301; RV32IFD-NEXT:    ret
1302;
1303; RV64IFD-LABEL: test_rint_si64:
1304; RV64IFD:       # %bb.0:
1305; RV64IFD-NEXT:    fcvt.l.d a0, fa0
1306; RV64IFD-NEXT:    feq.d a1, fa0, fa0
1307; RV64IFD-NEXT:    seqz a1, a1
1308; RV64IFD-NEXT:    addi a1, a1, -1
1309; RV64IFD-NEXT:    and a0, a1, a0
1310; RV64IFD-NEXT:    ret
1311;
1312; RV32IZFINXZDINX-LABEL: test_rint_si64:
1313; RV32IZFINXZDINX:       # %bb.0:
1314; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1315; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1316; RV32IZFINXZDINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1317; RV32IZFINXZDINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1318; RV32IZFINXZDINX-NEXT:    call rint
1319; RV32IZFINXZDINX-NEXT:    mv s0, a0
1320; RV32IZFINXZDINX-NEXT:    mv s1, a1
1321; RV32IZFINXZDINX-NEXT:    call __fixdfdi
1322; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI21_0)
1323; RV32IZFINXZDINX-NEXT:    lui a3, %hi(.LCPI21_1)
1324; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI21_0)(a2)
1325; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI21_0+4)(a2)
1326; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI21_1)(a3)
1327; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI21_1+4)(a3)
1328; RV32IZFINXZDINX-NEXT:    fle.d a6, a4, s0
1329; RV32IZFINXZDINX-NEXT:    flt.d a3, a2, s0
1330; RV32IZFINXZDINX-NEXT:    feq.d a2, s0, s0
1331; RV32IZFINXZDINX-NEXT:    lui a4, 524288
1332; RV32IZFINXZDINX-NEXT:    neg a2, a2
1333; RV32IZFINXZDINX-NEXT:    neg a5, a6
1334; RV32IZFINXZDINX-NEXT:    and a0, a5, a0
1335; RV32IZFINXZDINX-NEXT:    neg a5, a3
1336; RV32IZFINXZDINX-NEXT:    or a0, a5, a0
1337; RV32IZFINXZDINX-NEXT:    lui a5, 524288
1338; RV32IZFINXZDINX-NEXT:    beqz a6, .LBB21_2
1339; RV32IZFINXZDINX-NEXT:  # %bb.1:
1340; RV32IZFINXZDINX-NEXT:    mv a5, a1
1341; RV32IZFINXZDINX-NEXT:  .LBB21_2:
1342; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
1343; RV32IZFINXZDINX-NEXT:    beqz a3, .LBB21_4
1344; RV32IZFINXZDINX-NEXT:  # %bb.3:
1345; RV32IZFINXZDINX-NEXT:    addi a5, a4, -1
1346; RV32IZFINXZDINX-NEXT:  .LBB21_4:
1347; RV32IZFINXZDINX-NEXT:    and a1, a2, a5
1348; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1349; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1350; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1351; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1352; RV32IZFINXZDINX-NEXT:    ret
1353;
1354; RV64IZFINXZDINX-LABEL: test_rint_si64:
1355; RV64IZFINXZDINX:       # %bb.0:
1356; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0
1357; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
1358; RV64IZFINXZDINX-NEXT:    seqz a0, a0
1359; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
1360; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
1361; RV64IZFINXZDINX-NEXT:    ret
1362  %a = call double @llvm.rint.f64(double %x)
1363  %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
1364  ret i64 %b
1365}
1366
1367define signext i32 @test_rint_ui32(double %x) {
1368; CHECKIFD-LABEL: test_rint_ui32:
1369; CHECKIFD:       # %bb.0:
1370; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0
1371; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
1372; CHECKIFD-NEXT:    seqz a1, a1
1373; CHECKIFD-NEXT:    addi a1, a1, -1
1374; CHECKIFD-NEXT:    and a0, a1, a0
1375; CHECKIFD-NEXT:    ret
1376;
1377; RV32IZFINXZDINX-LABEL: test_rint_ui32:
1378; RV32IZFINXZDINX:       # %bb.0:
1379; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a2, a0
1380; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
1381; RV32IZFINXZDINX-NEXT:    seqz a0, a0
1382; RV32IZFINXZDINX-NEXT:    addi a0, a0, -1
1383; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
1384; RV32IZFINXZDINX-NEXT:    ret
1385;
1386; RV64IZFINXZDINX-LABEL: test_rint_ui32:
1387; RV64IZFINXZDINX:       # %bb.0:
1388; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0
1389; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
1390; RV64IZFINXZDINX-NEXT:    seqz a0, a0
1391; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
1392; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
1393; RV64IZFINXZDINX-NEXT:    ret
1394  %a = call double @llvm.rint.f64(double %x)
1395  %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
1396  ret i32 %b
1397}
1398
1399define i64 @test_rint_ui64(double %x) nounwind {
1400; RV32IFD-LABEL: test_rint_ui64:
1401; RV32IFD:       # %bb.0:
1402; RV32IFD-NEXT:    addi sp, sp, -16
1403; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1404; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1405; RV32IFD-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1406; RV32IFD-NEXT:    call rint
1407; RV32IFD-NEXT:    lui a0, %hi(.LCPI23_0)
1408; RV32IFD-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
1409; RV32IFD-NEXT:    fcvt.d.w fa4, zero
1410; RV32IFD-NEXT:    fle.d a0, fa4, fa0
1411; RV32IFD-NEXT:    flt.d a1, fa5, fa0
1412; RV32IFD-NEXT:    neg s0, a1
1413; RV32IFD-NEXT:    neg s1, a0
1414; RV32IFD-NEXT:    call __fixunsdfdi
1415; RV32IFD-NEXT:    and a0, s1, a0
1416; RV32IFD-NEXT:    and a1, s1, a1
1417; RV32IFD-NEXT:    or a0, s0, a0
1418; RV32IFD-NEXT:    or a1, s0, a1
1419; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1420; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1421; RV32IFD-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1422; RV32IFD-NEXT:    addi sp, sp, 16
1423; RV32IFD-NEXT:    ret
1424;
1425; RV64IFD-LABEL: test_rint_ui64:
1426; RV64IFD:       # %bb.0:
1427; RV64IFD-NEXT:    fcvt.lu.d a0, fa0
1428; RV64IFD-NEXT:    feq.d a1, fa0, fa0
1429; RV64IFD-NEXT:    seqz a1, a1
1430; RV64IFD-NEXT:    addi a1, a1, -1
1431; RV64IFD-NEXT:    and a0, a1, a0
1432; RV64IFD-NEXT:    ret
1433;
1434; RV32IZFINXZDINX-LABEL: test_rint_ui64:
1435; RV32IZFINXZDINX:       # %bb.0:
1436; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
1437; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1438; RV32IZFINXZDINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1439; RV32IZFINXZDINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1440; RV32IZFINXZDINX-NEXT:    call rint
1441; RV32IZFINXZDINX-NEXT:    mv s0, a0
1442; RV32IZFINXZDINX-NEXT:    mv s1, a1
1443; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
1444; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, zero
1445; RV32IZFINXZDINX-NEXT:    lui a4, %hi(.LCPI23_0)
1446; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, s0
1447; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI23_0+4)(a4)
1448; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI23_0)(a4)
1449; RV32IZFINXZDINX-NEXT:    neg a2, a2
1450; RV32IZFINXZDINX-NEXT:    and a0, a2, a0
1451; RV32IZFINXZDINX-NEXT:    and a1, a2, a1
1452; RV32IZFINXZDINX-NEXT:    flt.d a2, a4, s0
1453; RV32IZFINXZDINX-NEXT:    neg a2, a2
1454; RV32IZFINXZDINX-NEXT:    or a0, a2, a0
1455; RV32IZFINXZDINX-NEXT:    or a1, a2, a1
1456; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1457; RV32IZFINXZDINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1458; RV32IZFINXZDINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1459; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
1460; RV32IZFINXZDINX-NEXT:    ret
1461;
1462; RV64IZFINXZDINX-LABEL: test_rint_ui64:
1463; RV64IZFINXZDINX:       # %bb.0:
1464; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a1, a0
1465; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
1466; RV64IZFINXZDINX-NEXT:    seqz a0, a0
1467; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
1468; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
1469; RV64IZFINXZDINX-NEXT:    ret
1470  %a = call double @llvm.rint.f64(double %x)
1471  %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
1472  ret i64 %b
1473}
1474
1475declare double @llvm.floor.f64(double)
1476declare double @llvm.ceil.f64(double)
1477declare double @llvm.trunc.f64(double)
1478declare double @llvm.round.f64(double)
1479declare double @llvm.roundeven.f64(double)
1480declare double @llvm.rint.f64(double)
1481declare i32 @llvm.fptosi.sat.i32.f64(double)
1482declare i64 @llvm.fptosi.sat.i64.f64(double)
1483declare i32 @llvm.fptoui.sat.i32.f64(double)
1484declare i64 @llvm.fptoui.sat.i64.f64(double)
1485