xref: /llvm-project/llvm/test/CodeGen/RISCV/double-convert-strict.ll (revision 2967e5f8007d873a3e9d97870d2461d0827a3976)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3; RUN:   -disable-strictnode-mutation -target-abi=ilp32d \
4; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
5; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
6; RUN:   -disable-strictnode-mutation -target-abi=lp64d \
7; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
8; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
9; RUN:   -disable-strictnode-mutation -target-abi=ilp32 \
10; RUN:   | FileCheck -check-prefix=RV32IZFINXZDINX %s
11; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
12; RUN:   -disable-strictnode-mutation -target-abi=lp64 \
13; RUN:   | FileCheck -check-prefix=RV64IZFINXZDINX %s
14; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
15; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
16; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
17; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
18
19; NOTE: The rounding mode metadata does not effect which instruction is
20; selected. Dynamic rounding mode is always used for operations that
21; support rounding mode.
22
23define float @fcvt_s_d(double %a) nounwind strictfp {
24; CHECKIFD-LABEL: fcvt_s_d:
25; CHECKIFD:       # %bb.0:
26; CHECKIFD-NEXT:    fcvt.s.d fa0, fa0
27; CHECKIFD-NEXT:    ret
28;
29; RV32IZFINXZDINX-LABEL: fcvt_s_d:
30; RV32IZFINXZDINX:       # %bb.0:
31; RV32IZFINXZDINX-NEXT:    fcvt.s.d a0, a0
32; RV32IZFINXZDINX-NEXT:    ret
33;
34; RV64IZFINXZDINX-LABEL: fcvt_s_d:
35; RV64IZFINXZDINX:       # %bb.0:
36; RV64IZFINXZDINX-NEXT:    fcvt.s.d a0, a0
37; RV64IZFINXZDINX-NEXT:    ret
38;
39; RV32I-LABEL: fcvt_s_d:
40; RV32I:       # %bb.0:
41; RV32I-NEXT:    addi sp, sp, -16
42; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
43; RV32I-NEXT:    call __truncdfsf2
44; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
45; RV32I-NEXT:    addi sp, sp, 16
46; RV32I-NEXT:    ret
47;
48; RV64I-LABEL: fcvt_s_d:
49; RV64I:       # %bb.0:
50; RV64I-NEXT:    addi sp, sp, -16
51; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
52; RV64I-NEXT:    call __truncdfsf2
53; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
54; RV64I-NEXT:    addi sp, sp, 16
55; RV64I-NEXT:    ret
56  %1 = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
57  ret float %1
58}
59declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
60
61define double @fcvt_d_s(float %a) nounwind strictfp {
62; CHECKIFD-LABEL: fcvt_d_s:
63; CHECKIFD:       # %bb.0:
64; CHECKIFD-NEXT:    fcvt.d.s fa0, fa0
65; CHECKIFD-NEXT:    ret
66;
67; RV32IZFINXZDINX-LABEL: fcvt_d_s:
68; RV32IZFINXZDINX:       # %bb.0:
69; RV32IZFINXZDINX-NEXT:    fcvt.d.s a0, a0
70; RV32IZFINXZDINX-NEXT:    ret
71;
72; RV64IZFINXZDINX-LABEL: fcvt_d_s:
73; RV64IZFINXZDINX:       # %bb.0:
74; RV64IZFINXZDINX-NEXT:    fcvt.d.s a0, a0
75; RV64IZFINXZDINX-NEXT:    ret
76;
77; RV32I-LABEL: fcvt_d_s:
78; RV32I:       # %bb.0:
79; RV32I-NEXT:    addi sp, sp, -16
80; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
81; RV32I-NEXT:    call __extendsfdf2
82; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
83; RV32I-NEXT:    addi sp, sp, 16
84; RV32I-NEXT:    ret
85;
86; RV64I-LABEL: fcvt_d_s:
87; RV64I:       # %bb.0:
88; RV64I-NEXT:    addi sp, sp, -16
89; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
90; RV64I-NEXT:    call __extendsfdf2
91; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
92; RV64I-NEXT:    addi sp, sp, 16
93; RV64I-NEXT:    ret
94  %1 = call double @llvm.experimental.constrained.fpext.f64.f32(float %a, metadata !"fpexcept.strict")
95  ret double %1
96}
97declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
98
99define i32 @fcvt_w_d(double %a) nounwind strictfp {
100; CHECKIFD-LABEL: fcvt_w_d:
101; CHECKIFD:       # %bb.0:
102; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rtz
103; CHECKIFD-NEXT:    ret
104;
105; RV32IZFINXZDINX-LABEL: fcvt_w_d:
106; RV32IZFINXZDINX:       # %bb.0:
107; RV32IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rtz
108; RV32IZFINXZDINX-NEXT:    ret
109;
110; RV64IZFINXZDINX-LABEL: fcvt_w_d:
111; RV64IZFINXZDINX:       # %bb.0:
112; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rtz
113; RV64IZFINXZDINX-NEXT:    ret
114;
115; RV32I-LABEL: fcvt_w_d:
116; RV32I:       # %bb.0:
117; RV32I-NEXT:    addi sp, sp, -16
118; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
119; RV32I-NEXT:    call __fixdfsi
120; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
121; RV32I-NEXT:    addi sp, sp, 16
122; RV32I-NEXT:    ret
123;
124; RV64I-LABEL: fcvt_w_d:
125; RV64I:       # %bb.0:
126; RV64I-NEXT:    addi sp, sp, -16
127; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
128; RV64I-NEXT:    call __fixdfsi
129; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
130; RV64I-NEXT:    addi sp, sp, 16
131; RV64I-NEXT:    ret
132  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict")
133  ret i32 %1
134}
135declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
136
137; For RV64D, fcvt.lu.d is semantically equivalent to fcvt.wu.d in this case
138; because fptosi will produce poison if the result doesn't fit into an i32.
139define i32 @fcvt_wu_d(double %a) nounwind strictfp {
140; CHECKIFD-LABEL: fcvt_wu_d:
141; CHECKIFD:       # %bb.0:
142; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rtz
143; CHECKIFD-NEXT:    ret
144;
145; RV32IZFINXZDINX-LABEL: fcvt_wu_d:
146; RV32IZFINXZDINX:       # %bb.0:
147; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
148; RV32IZFINXZDINX-NEXT:    ret
149;
150; RV64IZFINXZDINX-LABEL: fcvt_wu_d:
151; RV64IZFINXZDINX:       # %bb.0:
152; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
153; RV64IZFINXZDINX-NEXT:    ret
154;
155; RV32I-LABEL: fcvt_wu_d:
156; RV32I:       # %bb.0:
157; RV32I-NEXT:    addi sp, sp, -16
158; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
159; RV32I-NEXT:    call __fixunsdfsi
160; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
161; RV32I-NEXT:    addi sp, sp, 16
162; RV32I-NEXT:    ret
163;
164; RV64I-LABEL: fcvt_wu_d:
165; RV64I:       # %bb.0:
166; RV64I-NEXT:    addi sp, sp, -16
167; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
168; RV64I-NEXT:    call __fixunsdfsi
169; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
170; RV64I-NEXT:    addi sp, sp, 16
171; RV64I-NEXT:    ret
172  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict")
173  ret i32 %1
174}
175declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
176
177; Test where the fptoui has multiple uses, one of which causes a sext to be
178; inserted on RV64.
179define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind strictfp {
180; CHECKIFD-LABEL: fcvt_wu_d_multiple_use:
181; CHECKIFD:       # %bb.0:
182; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rtz
183; CHECKIFD-NEXT:    seqz a1, a0
184; CHECKIFD-NEXT:    add a0, a0, a1
185; CHECKIFD-NEXT:    ret
186;
187; RV32IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
188; RV32IZFINXZDINX:       # %bb.0:
189; RV32IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
190; RV32IZFINXZDINX-NEXT:    seqz a1, a0
191; RV32IZFINXZDINX-NEXT:    add a0, a0, a1
192; RV32IZFINXZDINX-NEXT:    ret
193;
194; RV64IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
195; RV64IZFINXZDINX:       # %bb.0:
196; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
197; RV64IZFINXZDINX-NEXT:    seqz a1, a0
198; RV64IZFINXZDINX-NEXT:    add a0, a0, a1
199; RV64IZFINXZDINX-NEXT:    ret
200;
201; RV32I-LABEL: fcvt_wu_d_multiple_use:
202; RV32I:       # %bb.0:
203; RV32I-NEXT:    addi sp, sp, -16
204; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
205; RV32I-NEXT:    call __fixunsdfsi
206; RV32I-NEXT:    seqz a1, a0
207; RV32I-NEXT:    add a0, a0, a1
208; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
209; RV32I-NEXT:    addi sp, sp, 16
210; RV32I-NEXT:    ret
211;
212; RV64I-LABEL: fcvt_wu_d_multiple_use:
213; RV64I:       # %bb.0:
214; RV64I-NEXT:    addi sp, sp, -16
215; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
216; RV64I-NEXT:    call __fixunsdfsi
217; RV64I-NEXT:    seqz a1, a0
218; RV64I-NEXT:    add a0, a0, a1
219; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
220; RV64I-NEXT:    addi sp, sp, 16
221; RV64I-NEXT:    ret
222  %a = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict")
223  %b = icmp eq i32 %a, 0
224  %c = select i1 %b, i32 1, i32 %a
225  ret i32 %c
226}
227
228define double @fcvt_d_w(i32 %a) nounwind strictfp {
229; CHECKIFD-LABEL: fcvt_d_w:
230; CHECKIFD:       # %bb.0:
231; CHECKIFD-NEXT:    fcvt.d.w fa0, a0
232; CHECKIFD-NEXT:    ret
233;
234; RV32IZFINXZDINX-LABEL: fcvt_d_w:
235; RV32IZFINXZDINX:       # %bb.0:
236; RV32IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
237; RV32IZFINXZDINX-NEXT:    ret
238;
239; RV64IZFINXZDINX-LABEL: fcvt_d_w:
240; RV64IZFINXZDINX:       # %bb.0:
241; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
242; RV64IZFINXZDINX-NEXT:    ret
243;
244; RV32I-LABEL: fcvt_d_w:
245; RV32I:       # %bb.0:
246; RV32I-NEXT:    addi sp, sp, -16
247; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
248; RV32I-NEXT:    call __floatsidf
249; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
250; RV32I-NEXT:    addi sp, sp, 16
251; RV32I-NEXT:    ret
252;
253; RV64I-LABEL: fcvt_d_w:
254; RV64I:       # %bb.0:
255; RV64I-NEXT:    addi sp, sp, -16
256; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
257; RV64I-NEXT:    sext.w a0, a0
258; RV64I-NEXT:    call __floatsidf
259; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
260; RV64I-NEXT:    addi sp, sp, 16
261; RV64I-NEXT:    ret
262  %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
263  ret double %1
264}
265declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
266
267define double @fcvt_d_w_load(ptr %p) nounwind strictfp {
268; CHECKIFD-LABEL: fcvt_d_w_load:
269; CHECKIFD:       # %bb.0:
270; CHECKIFD-NEXT:    lw a0, 0(a0)
271; CHECKIFD-NEXT:    fcvt.d.w fa0, a0
272; CHECKIFD-NEXT:    ret
273;
274; RV32IZFINXZDINX-LABEL: fcvt_d_w_load:
275; RV32IZFINXZDINX:       # %bb.0:
276; RV32IZFINXZDINX-NEXT:    lw a0, 0(a0)
277; RV32IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
278; RV32IZFINXZDINX-NEXT:    ret
279;
280; RV64IZFINXZDINX-LABEL: fcvt_d_w_load:
281; RV64IZFINXZDINX:       # %bb.0:
282; RV64IZFINXZDINX-NEXT:    lw a0, 0(a0)
283; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
284; RV64IZFINXZDINX-NEXT:    ret
285;
286; RV32I-LABEL: fcvt_d_w_load:
287; RV32I:       # %bb.0:
288; RV32I-NEXT:    addi sp, sp, -16
289; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
290; RV32I-NEXT:    lw a0, 0(a0)
291; RV32I-NEXT:    call __floatsidf
292; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
293; RV32I-NEXT:    addi sp, sp, 16
294; RV32I-NEXT:    ret
295;
296; RV64I-LABEL: fcvt_d_w_load:
297; RV64I:       # %bb.0:
298; RV64I-NEXT:    addi sp, sp, -16
299; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
300; RV64I-NEXT:    lw a0, 0(a0)
301; RV64I-NEXT:    call __floatsidf
302; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
303; RV64I-NEXT:    addi sp, sp, 16
304; RV64I-NEXT:    ret
305  %a = load i32, ptr %p
306  %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
307  ret double %1
308}
309
310define double @fcvt_d_wu(i32 %a) nounwind strictfp {
311; CHECKIFD-LABEL: fcvt_d_wu:
312; CHECKIFD:       # %bb.0:
313; CHECKIFD-NEXT:    fcvt.d.wu fa0, a0
314; CHECKIFD-NEXT:    ret
315;
316; RV32IZFINXZDINX-LABEL: fcvt_d_wu:
317; RV32IZFINXZDINX:       # %bb.0:
318; RV32IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
319; RV32IZFINXZDINX-NEXT:    ret
320;
321; RV64IZFINXZDINX-LABEL: fcvt_d_wu:
322; RV64IZFINXZDINX:       # %bb.0:
323; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
324; RV64IZFINXZDINX-NEXT:    ret
325;
326; RV32I-LABEL: fcvt_d_wu:
327; RV32I:       # %bb.0:
328; RV32I-NEXT:    addi sp, sp, -16
329; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
330; RV32I-NEXT:    call __floatunsidf
331; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
332; RV32I-NEXT:    addi sp, sp, 16
333; RV32I-NEXT:    ret
334;
335; RV64I-LABEL: fcvt_d_wu:
336; RV64I:       # %bb.0:
337; RV64I-NEXT:    addi sp, sp, -16
338; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
339; RV64I-NEXT:    sext.w a0, a0
340; RV64I-NEXT:    call __floatunsidf
341; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
342; RV64I-NEXT:    addi sp, sp, 16
343; RV64I-NEXT:    ret
344  %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
345  ret double %1
346}
347declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
348
349define double @fcvt_d_wu_load(ptr %p) nounwind strictfp {
350; RV32IFD-LABEL: fcvt_d_wu_load:
351; RV32IFD:       # %bb.0:
352; RV32IFD-NEXT:    lw a0, 0(a0)
353; RV32IFD-NEXT:    fcvt.d.wu fa0, a0
354; RV32IFD-NEXT:    ret
355;
356; RV64IFD-LABEL: fcvt_d_wu_load:
357; RV64IFD:       # %bb.0:
358; RV64IFD-NEXT:    lwu a0, 0(a0)
359; RV64IFD-NEXT:    fcvt.d.wu fa0, a0
360; RV64IFD-NEXT:    ret
361;
362; RV32IZFINXZDINX-LABEL: fcvt_d_wu_load:
363; RV32IZFINXZDINX:       # %bb.0:
364; RV32IZFINXZDINX-NEXT:    lw a0, 0(a0)
365; RV32IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
366; RV32IZFINXZDINX-NEXT:    ret
367;
368; RV64IZFINXZDINX-LABEL: fcvt_d_wu_load:
369; RV64IZFINXZDINX:       # %bb.0:
370; RV64IZFINXZDINX-NEXT:    lwu a0, 0(a0)
371; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
372; RV64IZFINXZDINX-NEXT:    ret
373;
374; RV32I-LABEL: fcvt_d_wu_load:
375; RV32I:       # %bb.0:
376; RV32I-NEXT:    addi sp, sp, -16
377; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
378; RV32I-NEXT:    lw a0, 0(a0)
379; RV32I-NEXT:    call __floatunsidf
380; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
381; RV32I-NEXT:    addi sp, sp, 16
382; RV32I-NEXT:    ret
383;
384; RV64I-LABEL: fcvt_d_wu_load:
385; RV64I:       # %bb.0:
386; RV64I-NEXT:    addi sp, sp, -16
387; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
388; RV64I-NEXT:    lw a0, 0(a0)
389; RV64I-NEXT:    call __floatunsidf
390; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
391; RV64I-NEXT:    addi sp, sp, 16
392; RV64I-NEXT:    ret
393  %a = load i32, ptr %p
394  %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
395  ret double %1
396}
397
398define i64 @fcvt_l_d(double %a) nounwind strictfp {
399; RV32IFD-LABEL: fcvt_l_d:
400; RV32IFD:       # %bb.0:
401; RV32IFD-NEXT:    addi sp, sp, -16
402; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
403; RV32IFD-NEXT:    call __fixdfdi
404; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
405; RV32IFD-NEXT:    addi sp, sp, 16
406; RV32IFD-NEXT:    ret
407;
408; RV64IFD-LABEL: fcvt_l_d:
409; RV64IFD:       # %bb.0:
410; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
411; RV64IFD-NEXT:    ret
412;
413; RV32IZFINXZDINX-LABEL: fcvt_l_d:
414; RV32IZFINXZDINX:       # %bb.0:
415; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
416; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
417; RV32IZFINXZDINX-NEXT:    call __fixdfdi
418; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
419; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
420; RV32IZFINXZDINX-NEXT:    ret
421;
422; RV64IZFINXZDINX-LABEL: fcvt_l_d:
423; RV64IZFINXZDINX:       # %bb.0:
424; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rtz
425; RV64IZFINXZDINX-NEXT:    ret
426;
427; RV32I-LABEL: fcvt_l_d:
428; RV32I:       # %bb.0:
429; RV32I-NEXT:    addi sp, sp, -16
430; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
431; RV32I-NEXT:    call __fixdfdi
432; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
433; RV32I-NEXT:    addi sp, sp, 16
434; RV32I-NEXT:    ret
435;
436; RV64I-LABEL: fcvt_l_d:
437; RV64I:       # %bb.0:
438; RV64I-NEXT:    addi sp, sp, -16
439; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
440; RV64I-NEXT:    call __fixdfdi
441; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
442; RV64I-NEXT:    addi sp, sp, 16
443; RV64I-NEXT:    ret
444  %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %a, metadata !"fpexcept.strict")
445  ret i64 %1
446}
447declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
448
449define i64 @fcvt_lu_d(double %a) nounwind strictfp {
450; RV32IFD-LABEL: fcvt_lu_d:
451; RV32IFD:       # %bb.0:
452; RV32IFD-NEXT:    addi sp, sp, -16
453; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
454; RV32IFD-NEXT:    call __fixunsdfdi
455; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
456; RV32IFD-NEXT:    addi sp, sp, 16
457; RV32IFD-NEXT:    ret
458;
459; RV64IFD-LABEL: fcvt_lu_d:
460; RV64IFD:       # %bb.0:
461; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
462; RV64IFD-NEXT:    ret
463;
464; RV32IZFINXZDINX-LABEL: fcvt_lu_d:
465; RV32IZFINXZDINX:       # %bb.0:
466; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
467; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
468; RV32IZFINXZDINX-NEXT:    call __fixunsdfdi
469; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
470; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
471; RV32IZFINXZDINX-NEXT:    ret
472;
473; RV64IZFINXZDINX-LABEL: fcvt_lu_d:
474; RV64IZFINXZDINX:       # %bb.0:
475; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
476; RV64IZFINXZDINX-NEXT:    ret
477;
478; RV32I-LABEL: fcvt_lu_d:
479; RV32I:       # %bb.0:
480; RV32I-NEXT:    addi sp, sp, -16
481; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
482; RV32I-NEXT:    call __fixunsdfdi
483; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
484; RV32I-NEXT:    addi sp, sp, 16
485; RV32I-NEXT:    ret
486;
487; RV64I-LABEL: fcvt_lu_d:
488; RV64I:       # %bb.0:
489; RV64I-NEXT:    addi sp, sp, -16
490; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
491; RV64I-NEXT:    call __fixunsdfdi
492; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
493; RV64I-NEXT:    addi sp, sp, 16
494; RV64I-NEXT:    ret
495  %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %a, metadata !"fpexcept.strict")
496  ret i64 %1
497}
498declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
499
500define double @fcvt_d_l(i64 %a) nounwind strictfp {
501; RV32IFD-LABEL: fcvt_d_l:
502; RV32IFD:       # %bb.0:
503; RV32IFD-NEXT:    addi sp, sp, -16
504; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
505; RV32IFD-NEXT:    call __floatdidf
506; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
507; RV32IFD-NEXT:    addi sp, sp, 16
508; RV32IFD-NEXT:    ret
509;
510; RV64IFD-LABEL: fcvt_d_l:
511; RV64IFD:       # %bb.0:
512; RV64IFD-NEXT:    fcvt.d.l fa0, a0
513; RV64IFD-NEXT:    ret
514;
515; RV32IZFINXZDINX-LABEL: fcvt_d_l:
516; RV32IZFINXZDINX:       # %bb.0:
517; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
518; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
519; RV32IZFINXZDINX-NEXT:    call __floatdidf
520; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
521; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
522; RV32IZFINXZDINX-NEXT:    ret
523;
524; RV64IZFINXZDINX-LABEL: fcvt_d_l:
525; RV64IZFINXZDINX:       # %bb.0:
526; RV64IZFINXZDINX-NEXT:    fcvt.d.l a0, a0
527; RV64IZFINXZDINX-NEXT:    ret
528;
529; RV32I-LABEL: fcvt_d_l:
530; RV32I:       # %bb.0:
531; RV32I-NEXT:    addi sp, sp, -16
532; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
533; RV32I-NEXT:    call __floatdidf
534; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
535; RV32I-NEXT:    addi sp, sp, 16
536; RV32I-NEXT:    ret
537;
538; RV64I-LABEL: fcvt_d_l:
539; RV64I:       # %bb.0:
540; RV64I-NEXT:    addi sp, sp, -16
541; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
542; RV64I-NEXT:    call __floatdidf
543; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
544; RV64I-NEXT:    addi sp, sp, 16
545; RV64I-NEXT:    ret
546  %1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
547  ret double %1
548}
549declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
550
551define double @fcvt_d_lu(i64 %a) nounwind strictfp {
552; RV32IFD-LABEL: fcvt_d_lu:
553; RV32IFD:       # %bb.0:
554; RV32IFD-NEXT:    addi sp, sp, -16
555; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
556; RV32IFD-NEXT:    call __floatundidf
557; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
558; RV32IFD-NEXT:    addi sp, sp, 16
559; RV32IFD-NEXT:    ret
560;
561; RV64IFD-LABEL: fcvt_d_lu:
562; RV64IFD:       # %bb.0:
563; RV64IFD-NEXT:    fcvt.d.lu fa0, a0
564; RV64IFD-NEXT:    ret
565;
566; RV32IZFINXZDINX-LABEL: fcvt_d_lu:
567; RV32IZFINXZDINX:       # %bb.0:
568; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
569; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
570; RV32IZFINXZDINX-NEXT:    call __floatundidf
571; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
572; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
573; RV32IZFINXZDINX-NEXT:    ret
574;
575; RV64IZFINXZDINX-LABEL: fcvt_d_lu:
576; RV64IZFINXZDINX:       # %bb.0:
577; RV64IZFINXZDINX-NEXT:    fcvt.d.lu a0, a0
578; RV64IZFINXZDINX-NEXT:    ret
579;
580; RV32I-LABEL: fcvt_d_lu:
581; RV32I:       # %bb.0:
582; RV32I-NEXT:    addi sp, sp, -16
583; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
584; RV32I-NEXT:    call __floatundidf
585; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
586; RV32I-NEXT:    addi sp, sp, 16
587; RV32I-NEXT:    ret
588;
589; RV64I-LABEL: fcvt_d_lu:
590; RV64I:       # %bb.0:
591; RV64I-NEXT:    addi sp, sp, -16
592; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
593; RV64I-NEXT:    call __floatundidf
594; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
595; RV64I-NEXT:    addi sp, sp, 16
596; RV64I-NEXT:    ret
597  %1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
598  ret double %1
599}
600declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
601
602define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
603; CHECKIFD-LABEL: fcvt_d_w_i8:
604; CHECKIFD:       # %bb.0:
605; CHECKIFD-NEXT:    fcvt.d.w fa0, a0
606; CHECKIFD-NEXT:    ret
607;
608; RV32IZFINXZDINX-LABEL: fcvt_d_w_i8:
609; RV32IZFINXZDINX:       # %bb.0:
610; RV32IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
611; RV32IZFINXZDINX-NEXT:    ret
612;
613; RV64IZFINXZDINX-LABEL: fcvt_d_w_i8:
614; RV64IZFINXZDINX:       # %bb.0:
615; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
616; RV64IZFINXZDINX-NEXT:    ret
617;
618; RV32I-LABEL: fcvt_d_w_i8:
619; RV32I:       # %bb.0:
620; RV32I-NEXT:    addi sp, sp, -16
621; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
622; RV32I-NEXT:    call __floatsidf
623; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
624; RV32I-NEXT:    addi sp, sp, 16
625; RV32I-NEXT:    ret
626;
627; RV64I-LABEL: fcvt_d_w_i8:
628; RV64I:       # %bb.0:
629; RV64I-NEXT:    addi sp, sp, -16
630; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
631; RV64I-NEXT:    call __floatsidf
632; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
633; RV64I-NEXT:    addi sp, sp, 16
634; RV64I-NEXT:    ret
635  %1 = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
636  ret double %1
637}
638declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metadata)
639
640define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
641; CHECKIFD-LABEL: fcvt_d_wu_i8:
642; CHECKIFD:       # %bb.0:
643; CHECKIFD-NEXT:    fcvt.d.wu fa0, a0
644; CHECKIFD-NEXT:    ret
645;
646; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i8:
647; RV32IZFINXZDINX:       # %bb.0:
648; RV32IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
649; RV32IZFINXZDINX-NEXT:    ret
650;
651; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i8:
652; RV64IZFINXZDINX:       # %bb.0:
653; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
654; RV64IZFINXZDINX-NEXT:    ret
655;
656; RV32I-LABEL: fcvt_d_wu_i8:
657; RV32I:       # %bb.0:
658; RV32I-NEXT:    addi sp, sp, -16
659; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
660; RV32I-NEXT:    call __floatunsidf
661; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
662; RV32I-NEXT:    addi sp, sp, 16
663; RV32I-NEXT:    ret
664;
665; RV64I-LABEL: fcvt_d_wu_i8:
666; RV64I:       # %bb.0:
667; RV64I-NEXT:    addi sp, sp, -16
668; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
669; RV64I-NEXT:    call __floatunsidf
670; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
671; RV64I-NEXT:    addi sp, sp, 16
672; RV64I-NEXT:    ret
673  %1 = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
674  ret double %1
675}
676declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metadata)
677
678define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
679; CHECKIFD-LABEL: fcvt_d_w_i16:
680; CHECKIFD:       # %bb.0:
681; CHECKIFD-NEXT:    fcvt.d.w fa0, a0
682; CHECKIFD-NEXT:    ret
683;
684; RV32IZFINXZDINX-LABEL: fcvt_d_w_i16:
685; RV32IZFINXZDINX:       # %bb.0:
686; RV32IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
687; RV32IZFINXZDINX-NEXT:    ret
688;
689; RV64IZFINXZDINX-LABEL: fcvt_d_w_i16:
690; RV64IZFINXZDINX:       # %bb.0:
691; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
692; RV64IZFINXZDINX-NEXT:    ret
693;
694; RV32I-LABEL: fcvt_d_w_i16:
695; RV32I:       # %bb.0:
696; RV32I-NEXT:    addi sp, sp, -16
697; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
698; RV32I-NEXT:    call __floatsidf
699; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
700; RV32I-NEXT:    addi sp, sp, 16
701; RV32I-NEXT:    ret
702;
703; RV64I-LABEL: fcvt_d_w_i16:
704; RV64I:       # %bb.0:
705; RV64I-NEXT:    addi sp, sp, -16
706; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
707; RV64I-NEXT:    call __floatsidf
708; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
709; RV64I-NEXT:    addi sp, sp, 16
710; RV64I-NEXT:    ret
711  %1 = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
712  ret double %1
713}
714declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, metadata)
715
716define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
717; CHECKIFD-LABEL: fcvt_d_wu_i16:
718; CHECKIFD:       # %bb.0:
719; CHECKIFD-NEXT:    fcvt.d.wu fa0, a0
720; CHECKIFD-NEXT:    ret
721;
722; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i16:
723; RV32IZFINXZDINX:       # %bb.0:
724; RV32IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
725; RV32IZFINXZDINX-NEXT:    ret
726;
727; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i16:
728; RV64IZFINXZDINX:       # %bb.0:
729; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
730; RV64IZFINXZDINX-NEXT:    ret
731;
732; RV32I-LABEL: fcvt_d_wu_i16:
733; RV32I:       # %bb.0:
734; RV32I-NEXT:    addi sp, sp, -16
735; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
736; RV32I-NEXT:    call __floatunsidf
737; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
738; RV32I-NEXT:    addi sp, sp, 16
739; RV32I-NEXT:    ret
740;
741; RV64I-LABEL: fcvt_d_wu_i16:
742; RV64I:       # %bb.0:
743; RV64I-NEXT:    addi sp, sp, -16
744; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
745; RV64I-NEXT:    call __floatunsidf
746; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
747; RV64I-NEXT:    addi sp, sp, 16
748; RV64I-NEXT:    ret
749  %1 = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
750  ret double %1
751}
752declare double @llvm.experimental.constrained.uitofp.f64.i16(i16, metadata, metadata)
753
754; Make sure we select W version of addi on RV64.
755define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
756; RV32IFD-LABEL: fcvt_d_w_demanded_bits:
757; RV32IFD:       # %bb.0:
758; RV32IFD-NEXT:    addi a0, a0, 1
759; RV32IFD-NEXT:    fcvt.d.w fa5, a0
760; RV32IFD-NEXT:    fsd fa5, 0(a1)
761; RV32IFD-NEXT:    ret
762;
763; RV64IFD-LABEL: fcvt_d_w_demanded_bits:
764; RV64IFD:       # %bb.0:
765; RV64IFD-NEXT:    addiw a0, a0, 1
766; RV64IFD-NEXT:    fcvt.d.w fa5, a0
767; RV64IFD-NEXT:    fsd fa5, 0(a1)
768; RV64IFD-NEXT:    ret
769;
770; RV32IZFINXZDINX-LABEL: fcvt_d_w_demanded_bits:
771; RV32IZFINXZDINX:       # %bb.0:
772; RV32IZFINXZDINX-NEXT:    addi a0, a0, 1
773; RV32IZFINXZDINX-NEXT:    fcvt.d.w a2, a0
774; RV32IZFINXZDINX-NEXT:    sw a2, 0(a1)
775; RV32IZFINXZDINX-NEXT:    sw a3, 4(a1)
776; RV32IZFINXZDINX-NEXT:    ret
777;
778; RV64IZFINXZDINX-LABEL: fcvt_d_w_demanded_bits:
779; RV64IZFINXZDINX:       # %bb.0:
780; RV64IZFINXZDINX-NEXT:    addiw a0, a0, 1
781; RV64IZFINXZDINX-NEXT:    fcvt.d.w a2, a0
782; RV64IZFINXZDINX-NEXT:    sd a2, 0(a1)
783; RV64IZFINXZDINX-NEXT:    ret
784;
785; RV32I-LABEL: fcvt_d_w_demanded_bits:
786; RV32I:       # %bb.0:
787; RV32I-NEXT:    addi sp, sp, -16
788; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
789; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
790; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
791; RV32I-NEXT:    mv s0, a1
792; RV32I-NEXT:    addi s1, a0, 1
793; RV32I-NEXT:    mv a0, s1
794; RV32I-NEXT:    call __floatsidf
795; RV32I-NEXT:    sw a0, 0(s0)
796; RV32I-NEXT:    sw a1, 4(s0)
797; RV32I-NEXT:    mv a0, s1
798; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
799; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
800; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
801; RV32I-NEXT:    addi sp, sp, 16
802; RV32I-NEXT:    ret
803;
804; RV64I-LABEL: fcvt_d_w_demanded_bits:
805; RV64I:       # %bb.0:
806; RV64I-NEXT:    addi sp, sp, -32
807; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
808; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
809; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
810; RV64I-NEXT:    mv s0, a1
811; RV64I-NEXT:    addiw s1, a0, 1
812; RV64I-NEXT:    mv a0, s1
813; RV64I-NEXT:    call __floatsidf
814; RV64I-NEXT:    sd a0, 0(s0)
815; RV64I-NEXT:    mv a0, s1
816; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
817; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
818; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
819; RV64I-NEXT:    addi sp, sp, 32
820; RV64I-NEXT:    ret
821  %3 = add i32 %0, 1
822  %4 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
823  store double %4, ptr %1, align 8
824  ret i32 %3
825}
826
827; Make sure we select W version of addi on RV64.
828define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
829; RV32IFD-LABEL: fcvt_d_wu_demanded_bits:
830; RV32IFD:       # %bb.0:
831; RV32IFD-NEXT:    addi a0, a0, 1
832; RV32IFD-NEXT:    fcvt.d.wu fa5, a0
833; RV32IFD-NEXT:    fsd fa5, 0(a1)
834; RV32IFD-NEXT:    ret
835;
836; RV64IFD-LABEL: fcvt_d_wu_demanded_bits:
837; RV64IFD:       # %bb.0:
838; RV64IFD-NEXT:    addiw a0, a0, 1
839; RV64IFD-NEXT:    fcvt.d.wu fa5, a0
840; RV64IFD-NEXT:    fsd fa5, 0(a1)
841; RV64IFD-NEXT:    ret
842;
843; RV32IZFINXZDINX-LABEL: fcvt_d_wu_demanded_bits:
844; RV32IZFINXZDINX:       # %bb.0:
845; RV32IZFINXZDINX-NEXT:    addi a0, a0, 1
846; RV32IZFINXZDINX-NEXT:    fcvt.d.wu a2, a0
847; RV32IZFINXZDINX-NEXT:    sw a2, 0(a1)
848; RV32IZFINXZDINX-NEXT:    sw a3, 4(a1)
849; RV32IZFINXZDINX-NEXT:    ret
850;
851; RV64IZFINXZDINX-LABEL: fcvt_d_wu_demanded_bits:
852; RV64IZFINXZDINX:       # %bb.0:
853; RV64IZFINXZDINX-NEXT:    addiw a0, a0, 1
854; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a2, a0
855; RV64IZFINXZDINX-NEXT:    sd a2, 0(a1)
856; RV64IZFINXZDINX-NEXT:    ret
857;
858; RV32I-LABEL: fcvt_d_wu_demanded_bits:
859; RV32I:       # %bb.0:
860; RV32I-NEXT:    addi sp, sp, -16
861; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
862; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
863; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
864; RV32I-NEXT:    mv s0, a1
865; RV32I-NEXT:    addi s1, a0, 1
866; RV32I-NEXT:    mv a0, s1
867; RV32I-NEXT:    call __floatunsidf
868; RV32I-NEXT:    sw a0, 0(s0)
869; RV32I-NEXT:    sw a1, 4(s0)
870; RV32I-NEXT:    mv a0, s1
871; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
872; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
873; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
874; RV32I-NEXT:    addi sp, sp, 16
875; RV32I-NEXT:    ret
876;
877; RV64I-LABEL: fcvt_d_wu_demanded_bits:
878; RV64I:       # %bb.0:
879; RV64I-NEXT:    addi sp, sp, -32
880; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
881; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
882; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
883; RV64I-NEXT:    mv s0, a1
884; RV64I-NEXT:    addiw s1, a0, 1
885; RV64I-NEXT:    mv a0, s1
886; RV64I-NEXT:    call __floatunsidf
887; RV64I-NEXT:    sd a0, 0(s0)
888; RV64I-NEXT:    mv a0, s1
889; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
890; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
891; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
892; RV64I-NEXT:    addi sp, sp, 32
893; RV64I-NEXT:    ret
894  %3 = add i32 %0, 1
895  %4 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
896  store double %4, ptr %1, align 8
897  ret i32 %3
898}
899