xref: /llvm-project/llvm/test/CodeGen/RISCV/float-convert-strict.ll (revision 0a4e1c518bbca5f3bced6ded6dd71d2fe6622ac3)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3; RUN:   -disable-strictnode-mutation -target-abi=ilp32f \
4; RUN:   | FileCheck -check-prefixes=CHECKIF,RV32IF %s
5; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
6; RUN:   -disable-strictnode-mutation -target-abi=lp64f \
7; RUN:   | FileCheck -check-prefixes=CHECKIF,RV64IF %s
8; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
9; RUN:   -disable-strictnode-mutation -target-abi=ilp32 \
10; RUN:   | FileCheck -check-prefixes=CHECKIZFINX,RV32IZFINX %s
11; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
12; RUN:   -disable-strictnode-mutation -target-abi=lp64 \
13; RUN:   | FileCheck -check-prefixes=CHECKIZFINX,RV64IZFINX %s
14; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
15; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
16; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
17; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
18
19; NOTE: The rounding mode metadata does not effect which instruction is
20; selected. Dynamic rounding mode is always used for operations that
21; support rounding mode.
22
23define i32 @fcvt_w_s(float %a) nounwind strictfp {
24; CHECKIF-LABEL: fcvt_w_s:
25; CHECKIF:       # %bb.0:
26; CHECKIF-NEXT:    fcvt.w.s a0, fa0, rtz
27; CHECKIF-NEXT:    ret
28;
29; CHECKIZFINX-LABEL: fcvt_w_s:
30; CHECKIZFINX:       # %bb.0:
31; CHECKIZFINX-NEXT:    fcvt.w.s a0, a0, rtz
32; CHECKIZFINX-NEXT:    ret
33;
34; RV32I-LABEL: fcvt_w_s:
35; RV32I:       # %bb.0:
36; RV32I-NEXT:    addi sp, sp, -16
37; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
38; RV32I-NEXT:    call __fixsfsi
39; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
40; RV32I-NEXT:    addi sp, sp, 16
41; RV32I-NEXT:    ret
42;
43; RV64I-LABEL: fcvt_w_s:
44; RV64I:       # %bb.0:
45; RV64I-NEXT:    addi sp, sp, -16
46; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
47; RV64I-NEXT:    call __fixsfsi
48; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
49; RV64I-NEXT:    addi sp, sp, 16
50; RV64I-NEXT:    ret
51  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict")
52  ret i32 %1
53}
54declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
55
56define i32 @fcvt_wu_s(float %a) nounwind strictfp {
57; CHECKIF-LABEL: fcvt_wu_s:
58; CHECKIF:       # %bb.0:
59; CHECKIF-NEXT:    fcvt.wu.s a0, fa0, rtz
60; CHECKIF-NEXT:    ret
61;
62; CHECKIZFINX-LABEL: fcvt_wu_s:
63; CHECKIZFINX:       # %bb.0:
64; CHECKIZFINX-NEXT:    fcvt.wu.s a0, a0, rtz
65; CHECKIZFINX-NEXT:    ret
66;
67; RV32I-LABEL: fcvt_wu_s:
68; RV32I:       # %bb.0:
69; RV32I-NEXT:    addi sp, sp, -16
70; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
71; RV32I-NEXT:    call __fixunssfsi
72; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
73; RV32I-NEXT:    addi sp, sp, 16
74; RV32I-NEXT:    ret
75;
76; RV64I-LABEL: fcvt_wu_s:
77; RV64I:       # %bb.0:
78; RV64I-NEXT:    addi sp, sp, -16
79; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
80; RV64I-NEXT:    call __fixunssfsi
81; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
82; RV64I-NEXT:    addi sp, sp, 16
83; RV64I-NEXT:    ret
84  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict")
85  ret i32 %1
86}
87declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
88
89; Test where the fptoui has multiple uses, one of which causes a sext to be
90; inserted on RV64.
91define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind strictfp {
92; CHECKIF-LABEL: fcvt_wu_s_multiple_use:
93; CHECKIF:       # %bb.0:
94; CHECKIF-NEXT:    fcvt.wu.s a0, fa0, rtz
95; CHECKIF-NEXT:    seqz a1, a0
96; CHECKIF-NEXT:    add a0, a0, a1
97; CHECKIF-NEXT:    ret
98;
99; CHECKIZFINX-LABEL: fcvt_wu_s_multiple_use:
100; CHECKIZFINX:       # %bb.0:
101; CHECKIZFINX-NEXT:    fcvt.wu.s a0, a0, rtz
102; CHECKIZFINX-NEXT:    seqz a1, a0
103; CHECKIZFINX-NEXT:    add a0, a0, a1
104; CHECKIZFINX-NEXT:    ret
105;
106; RV32I-LABEL: fcvt_wu_s_multiple_use:
107; RV32I:       # %bb.0:
108; RV32I-NEXT:    addi sp, sp, -16
109; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
110; RV32I-NEXT:    call __fixunssfsi
111; RV32I-NEXT:    seqz a1, a0
112; RV32I-NEXT:    add a0, a0, a1
113; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
114; RV32I-NEXT:    addi sp, sp, 16
115; RV32I-NEXT:    ret
116;
117; RV64I-LABEL: fcvt_wu_s_multiple_use:
118; RV64I:       # %bb.0:
119; RV64I-NEXT:    addi sp, sp, -16
120; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
121; RV64I-NEXT:    call __fixunssfsi
122; RV64I-NEXT:    seqz a1, a0
123; RV64I-NEXT:    add a0, a0, a1
124; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
125; RV64I-NEXT:    addi sp, sp, 16
126; RV64I-NEXT:    ret
127  %a = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict")
128  %b = icmp eq i32 %a, 0
129  %c = select i1 %b, i32 1, i32 %a
130  ret i32 %c
131}
132
133define float @fcvt_s_w(i32 %a) nounwind strictfp {
134; CHECKIF-LABEL: fcvt_s_w:
135; CHECKIF:       # %bb.0:
136; CHECKIF-NEXT:    fcvt.s.w fa0, a0
137; CHECKIF-NEXT:    ret
138;
139; CHECKIZFINX-LABEL: fcvt_s_w:
140; CHECKIZFINX:       # %bb.0:
141; CHECKIZFINX-NEXT:    fcvt.s.w a0, a0
142; CHECKIZFINX-NEXT:    ret
143;
144; RV32I-LABEL: fcvt_s_w:
145; RV32I:       # %bb.0:
146; RV32I-NEXT:    addi sp, sp, -16
147; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
148; RV32I-NEXT:    call __floatsisf
149; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
150; RV32I-NEXT:    addi sp, sp, 16
151; RV32I-NEXT:    ret
152;
153; RV64I-LABEL: fcvt_s_w:
154; RV64I:       # %bb.0:
155; RV64I-NEXT:    addi sp, sp, -16
156; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
157; RV64I-NEXT:    sext.w a0, a0
158; RV64I-NEXT:    call __floatsisf
159; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
160; RV64I-NEXT:    addi sp, sp, 16
161; RV64I-NEXT:    ret
162  %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
163  ret float %1
164}
165declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
166
167define float @fcvt_s_w_load(ptr %p) nounwind strictfp {
168; CHECKIF-LABEL: fcvt_s_w_load:
169; CHECKIF:       # %bb.0:
170; CHECKIF-NEXT:    lw a0, 0(a0)
171; CHECKIF-NEXT:    fcvt.s.w fa0, a0
172; CHECKIF-NEXT:    ret
173;
174; CHECKIZFINX-LABEL: fcvt_s_w_load:
175; CHECKIZFINX:       # %bb.0:
176; CHECKIZFINX-NEXT:    lw a0, 0(a0)
177; CHECKIZFINX-NEXT:    fcvt.s.w a0, a0
178; CHECKIZFINX-NEXT:    ret
179;
180; RV32I-LABEL: fcvt_s_w_load:
181; RV32I:       # %bb.0:
182; RV32I-NEXT:    addi sp, sp, -16
183; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
184; RV32I-NEXT:    lw a0, 0(a0)
185; RV32I-NEXT:    call __floatsisf
186; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
187; RV32I-NEXT:    addi sp, sp, 16
188; RV32I-NEXT:    ret
189;
190; RV64I-LABEL: fcvt_s_w_load:
191; RV64I:       # %bb.0:
192; RV64I-NEXT:    addi sp, sp, -16
193; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
194; RV64I-NEXT:    lw a0, 0(a0)
195; RV64I-NEXT:    call __floatsisf
196; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
197; RV64I-NEXT:    addi sp, sp, 16
198; RV64I-NEXT:    ret
199  %a = load i32, ptr %p
200  %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
201  ret float %1
202}
203
204define float @fcvt_s_wu(i32 %a) nounwind strictfp {
205; CHECKIF-LABEL: fcvt_s_wu:
206; CHECKIF:       # %bb.0:
207; CHECKIF-NEXT:    fcvt.s.wu fa0, a0
208; CHECKIF-NEXT:    ret
209;
210; CHECKIZFINX-LABEL: fcvt_s_wu:
211; CHECKIZFINX:       # %bb.0:
212; CHECKIZFINX-NEXT:    fcvt.s.wu a0, a0
213; CHECKIZFINX-NEXT:    ret
214;
215; RV32I-LABEL: fcvt_s_wu:
216; RV32I:       # %bb.0:
217; RV32I-NEXT:    addi sp, sp, -16
218; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
219; RV32I-NEXT:    call __floatunsisf
220; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
221; RV32I-NEXT:    addi sp, sp, 16
222; RV32I-NEXT:    ret
223;
224; RV64I-LABEL: fcvt_s_wu:
225; RV64I:       # %bb.0:
226; RV64I-NEXT:    addi sp, sp, -16
227; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
228; RV64I-NEXT:    sext.w a0, a0
229; RV64I-NEXT:    call __floatunsisf
230; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
231; RV64I-NEXT:    addi sp, sp, 16
232; RV64I-NEXT:    ret
233  %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
234  ret float %1
235}
236declare float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata, metadata)
237
238define float @fcvt_s_wu_load(ptr %p) nounwind strictfp {
239; RV32IF-LABEL: fcvt_s_wu_load:
240; RV32IF:       # %bb.0:
241; RV32IF-NEXT:    lw a0, 0(a0)
242; RV32IF-NEXT:    fcvt.s.wu fa0, a0
243; RV32IF-NEXT:    ret
244;
245; RV64IF-LABEL: fcvt_s_wu_load:
246; RV64IF:       # %bb.0:
247; RV64IF-NEXT:    lwu a0, 0(a0)
248; RV64IF-NEXT:    fcvt.s.wu fa0, a0
249; RV64IF-NEXT:    ret
250;
251; RV32IZFINX-LABEL: fcvt_s_wu_load:
252; RV32IZFINX:       # %bb.0:
253; RV32IZFINX-NEXT:    lw a0, 0(a0)
254; RV32IZFINX-NEXT:    fcvt.s.wu a0, a0
255; RV32IZFINX-NEXT:    ret
256;
257; RV64IZFINX-LABEL: fcvt_s_wu_load:
258; RV64IZFINX:       # %bb.0:
259; RV64IZFINX-NEXT:    lwu a0, 0(a0)
260; RV64IZFINX-NEXT:    fcvt.s.wu a0, a0
261; RV64IZFINX-NEXT:    ret
262;
263; RV32I-LABEL: fcvt_s_wu_load:
264; RV32I:       # %bb.0:
265; RV32I-NEXT:    addi sp, sp, -16
266; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
267; RV32I-NEXT:    lw a0, 0(a0)
268; RV32I-NEXT:    call __floatunsisf
269; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
270; RV32I-NEXT:    addi sp, sp, 16
271; RV32I-NEXT:    ret
272;
273; RV64I-LABEL: fcvt_s_wu_load:
274; RV64I:       # %bb.0:
275; RV64I-NEXT:    addi sp, sp, -16
276; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
277; RV64I-NEXT:    lw a0, 0(a0)
278; RV64I-NEXT:    call __floatunsisf
279; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
280; RV64I-NEXT:    addi sp, sp, 16
281; RV64I-NEXT:    ret
282  %a = load i32, ptr %p
283  %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
284  ret float %1
285}
286
287define i64 @fcvt_l_s(float %a) nounwind strictfp {
288; RV32IF-LABEL: fcvt_l_s:
289; RV32IF:       # %bb.0:
290; RV32IF-NEXT:    addi sp, sp, -16
291; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
292; RV32IF-NEXT:    call __fixsfdi
293; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
294; RV32IF-NEXT:    addi sp, sp, 16
295; RV32IF-NEXT:    ret
296;
297; RV64IF-LABEL: fcvt_l_s:
298; RV64IF:       # %bb.0:
299; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
300; RV64IF-NEXT:    ret
301;
302; RV32IZFINX-LABEL: fcvt_l_s:
303; RV32IZFINX:       # %bb.0:
304; RV32IZFINX-NEXT:    addi sp, sp, -16
305; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
306; RV32IZFINX-NEXT:    call __fixsfdi
307; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
308; RV32IZFINX-NEXT:    addi sp, sp, 16
309; RV32IZFINX-NEXT:    ret
310;
311; RV64IZFINX-LABEL: fcvt_l_s:
312; RV64IZFINX:       # %bb.0:
313; RV64IZFINX-NEXT:    fcvt.l.s a0, a0, rtz
314; RV64IZFINX-NEXT:    ret
315;
316; RV32I-LABEL: fcvt_l_s:
317; RV32I:       # %bb.0:
318; RV32I-NEXT:    addi sp, sp, -16
319; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
320; RV32I-NEXT:    call __fixsfdi
321; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
322; RV32I-NEXT:    addi sp, sp, 16
323; RV32I-NEXT:    ret
324;
325; RV64I-LABEL: fcvt_l_s:
326; RV64I:       # %bb.0:
327; RV64I-NEXT:    addi sp, sp, -16
328; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
329; RV64I-NEXT:    call __fixsfdi
330; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
331; RV64I-NEXT:    addi sp, sp, 16
332; RV64I-NEXT:    ret
333  %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %a, metadata !"fpexcept.strict")
334  ret i64 %1
335}
336declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
337
338define i64 @fcvt_lu_s(float %a) nounwind strictfp {
339; RV32IF-LABEL: fcvt_lu_s:
340; RV32IF:       # %bb.0:
341; RV32IF-NEXT:    addi sp, sp, -16
342; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
343; RV32IF-NEXT:    call __fixunssfdi
344; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
345; RV32IF-NEXT:    addi sp, sp, 16
346; RV32IF-NEXT:    ret
347;
348; RV64IF-LABEL: fcvt_lu_s:
349; RV64IF:       # %bb.0:
350; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
351; RV64IF-NEXT:    ret
352;
353; RV32IZFINX-LABEL: fcvt_lu_s:
354; RV32IZFINX:       # %bb.0:
355; RV32IZFINX-NEXT:    addi sp, sp, -16
356; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
357; RV32IZFINX-NEXT:    call __fixunssfdi
358; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
359; RV32IZFINX-NEXT:    addi sp, sp, 16
360; RV32IZFINX-NEXT:    ret
361;
362; RV64IZFINX-LABEL: fcvt_lu_s:
363; RV64IZFINX:       # %bb.0:
364; RV64IZFINX-NEXT:    fcvt.lu.s a0, a0, rtz
365; RV64IZFINX-NEXT:    ret
366;
367; RV32I-LABEL: fcvt_lu_s:
368; RV32I:       # %bb.0:
369; RV32I-NEXT:    addi sp, sp, -16
370; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
371; RV32I-NEXT:    call __fixunssfdi
372; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
373; RV32I-NEXT:    addi sp, sp, 16
374; RV32I-NEXT:    ret
375;
376; RV64I-LABEL: fcvt_lu_s:
377; RV64I:       # %bb.0:
378; RV64I-NEXT:    addi sp, sp, -16
379; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
380; RV64I-NEXT:    call __fixunssfdi
381; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
382; RV64I-NEXT:    addi sp, sp, 16
383; RV64I-NEXT:    ret
384  %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %a, metadata !"fpexcept.strict")
385  ret i64 %1
386}
387declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
388
389define float @fcvt_s_l(i64 %a) nounwind strictfp {
390; RV32IF-LABEL: fcvt_s_l:
391; RV32IF:       # %bb.0:
392; RV32IF-NEXT:    addi sp, sp, -16
393; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
394; RV32IF-NEXT:    call __floatdisf
395; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
396; RV32IF-NEXT:    addi sp, sp, 16
397; RV32IF-NEXT:    ret
398;
399; RV64IF-LABEL: fcvt_s_l:
400; RV64IF:       # %bb.0:
401; RV64IF-NEXT:    fcvt.s.l fa0, a0
402; RV64IF-NEXT:    ret
403;
404; RV32IZFINX-LABEL: fcvt_s_l:
405; RV32IZFINX:       # %bb.0:
406; RV32IZFINX-NEXT:    addi sp, sp, -16
407; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
408; RV32IZFINX-NEXT:    call __floatdisf
409; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
410; RV32IZFINX-NEXT:    addi sp, sp, 16
411; RV32IZFINX-NEXT:    ret
412;
413; RV64IZFINX-LABEL: fcvt_s_l:
414; RV64IZFINX:       # %bb.0:
415; RV64IZFINX-NEXT:    fcvt.s.l a0, a0
416; RV64IZFINX-NEXT:    ret
417;
418; RV32I-LABEL: fcvt_s_l:
419; RV32I:       # %bb.0:
420; RV32I-NEXT:    addi sp, sp, -16
421; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
422; RV32I-NEXT:    call __floatdisf
423; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
424; RV32I-NEXT:    addi sp, sp, 16
425; RV32I-NEXT:    ret
426;
427; RV64I-LABEL: fcvt_s_l:
428; RV64I:       # %bb.0:
429; RV64I-NEXT:    addi sp, sp, -16
430; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
431; RV64I-NEXT:    call __floatdisf
432; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
433; RV64I-NEXT:    addi sp, sp, 16
434; RV64I-NEXT:    ret
435  %1 = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
436  ret float %1
437}
438declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
439
440define float @fcvt_s_lu(i64 %a) nounwind strictfp {
441; RV32IF-LABEL: fcvt_s_lu:
442; RV32IF:       # %bb.0:
443; RV32IF-NEXT:    addi sp, sp, -16
444; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
445; RV32IF-NEXT:    call __floatundisf
446; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
447; RV32IF-NEXT:    addi sp, sp, 16
448; RV32IF-NEXT:    ret
449;
450; RV64IF-LABEL: fcvt_s_lu:
451; RV64IF:       # %bb.0:
452; RV64IF-NEXT:    fcvt.s.lu fa0, a0
453; RV64IF-NEXT:    ret
454;
455; RV32IZFINX-LABEL: fcvt_s_lu:
456; RV32IZFINX:       # %bb.0:
457; RV32IZFINX-NEXT:    addi sp, sp, -16
458; RV32IZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
459; RV32IZFINX-NEXT:    call __floatundisf
460; RV32IZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
461; RV32IZFINX-NEXT:    addi sp, sp, 16
462; RV32IZFINX-NEXT:    ret
463;
464; RV64IZFINX-LABEL: fcvt_s_lu:
465; RV64IZFINX:       # %bb.0:
466; RV64IZFINX-NEXT:    fcvt.s.lu a0, a0
467; RV64IZFINX-NEXT:    ret
468;
469; RV32I-LABEL: fcvt_s_lu:
470; RV32I:       # %bb.0:
471; RV32I-NEXT:    addi sp, sp, -16
472; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
473; RV32I-NEXT:    call __floatundisf
474; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
475; RV32I-NEXT:    addi sp, sp, 16
476; RV32I-NEXT:    ret
477;
478; RV64I-LABEL: fcvt_s_lu:
479; RV64I:       # %bb.0:
480; RV64I-NEXT:    addi sp, sp, -16
481; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
482; RV64I-NEXT:    call __floatundisf
483; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
484; RV64I-NEXT:    addi sp, sp, 16
485; RV64I-NEXT:    ret
486  %1 = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
487  ret float %1
488}
489declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
490
491define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp {
492; CHECKIF-LABEL: fcvt_s_w_i8:
493; CHECKIF:       # %bb.0:
494; CHECKIF-NEXT:    fcvt.s.w fa0, a0
495; CHECKIF-NEXT:    ret
496;
497; CHECKIZFINX-LABEL: fcvt_s_w_i8:
498; CHECKIZFINX:       # %bb.0:
499; CHECKIZFINX-NEXT:    fcvt.s.w a0, a0
500; CHECKIZFINX-NEXT:    ret
501;
502; RV32I-LABEL: fcvt_s_w_i8:
503; RV32I:       # %bb.0:
504; RV32I-NEXT:    addi sp, sp, -16
505; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
506; RV32I-NEXT:    call __floatsisf
507; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
508; RV32I-NEXT:    addi sp, sp, 16
509; RV32I-NEXT:    ret
510;
511; RV64I-LABEL: fcvt_s_w_i8:
512; RV64I:       # %bb.0:
513; RV64I-NEXT:    addi sp, sp, -16
514; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
515; RV64I-NEXT:    call __floatsisf
516; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
517; RV64I-NEXT:    addi sp, sp, 16
518; RV64I-NEXT:    ret
519  %1 = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
520  ret float %1
521}
522declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata)
523
524define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp {
525; CHECKIF-LABEL: fcvt_s_wu_i8:
526; CHECKIF:       # %bb.0:
527; CHECKIF-NEXT:    fcvt.s.wu fa0, a0
528; CHECKIF-NEXT:    ret
529;
530; CHECKIZFINX-LABEL: fcvt_s_wu_i8:
531; CHECKIZFINX:       # %bb.0:
532; CHECKIZFINX-NEXT:    fcvt.s.wu a0, a0
533; CHECKIZFINX-NEXT:    ret
534;
535; RV32I-LABEL: fcvt_s_wu_i8:
536; RV32I:       # %bb.0:
537; RV32I-NEXT:    addi sp, sp, -16
538; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
539; RV32I-NEXT:    call __floatunsisf
540; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
541; RV32I-NEXT:    addi sp, sp, 16
542; RV32I-NEXT:    ret
543;
544; RV64I-LABEL: fcvt_s_wu_i8:
545; RV64I:       # %bb.0:
546; RV64I-NEXT:    addi sp, sp, -16
547; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
548; RV64I-NEXT:    call __floatunsisf
549; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
550; RV64I-NEXT:    addi sp, sp, 16
551; RV64I-NEXT:    ret
552  %1 = call float @llvm.experimental.constrained.uitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
553  ret float %1
554}
555declare float @llvm.experimental.constrained.uitofp.f32.i8(i8, metadata, metadata)
556
557define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp {
558; CHECKIF-LABEL: fcvt_s_w_i16:
559; CHECKIF:       # %bb.0:
560; CHECKIF-NEXT:    fcvt.s.w fa0, a0
561; CHECKIF-NEXT:    ret
562;
563; CHECKIZFINX-LABEL: fcvt_s_w_i16:
564; CHECKIZFINX:       # %bb.0:
565; CHECKIZFINX-NEXT:    fcvt.s.w a0, a0
566; CHECKIZFINX-NEXT:    ret
567;
568; RV32I-LABEL: fcvt_s_w_i16:
569; RV32I:       # %bb.0:
570; RV32I-NEXT:    addi sp, sp, -16
571; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
572; RV32I-NEXT:    call __floatsisf
573; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
574; RV32I-NEXT:    addi sp, sp, 16
575; RV32I-NEXT:    ret
576;
577; RV64I-LABEL: fcvt_s_w_i16:
578; RV64I:       # %bb.0:
579; RV64I-NEXT:    addi sp, sp, -16
580; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
581; RV64I-NEXT:    call __floatsisf
582; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
583; RV64I-NEXT:    addi sp, sp, 16
584; RV64I-NEXT:    ret
585  %1 = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
586  ret float %1
587}
588declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata)
589
590define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp {
591; CHECKIF-LABEL: fcvt_s_wu_i16:
592; CHECKIF:       # %bb.0:
593; CHECKIF-NEXT:    fcvt.s.wu fa0, a0
594; CHECKIF-NEXT:    ret
595;
596; CHECKIZFINX-LABEL: fcvt_s_wu_i16:
597; CHECKIZFINX:       # %bb.0:
598; CHECKIZFINX-NEXT:    fcvt.s.wu a0, a0
599; CHECKIZFINX-NEXT:    ret
600;
601; RV32I-LABEL: fcvt_s_wu_i16:
602; RV32I:       # %bb.0:
603; RV32I-NEXT:    addi sp, sp, -16
604; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
605; RV32I-NEXT:    call __floatunsisf
606; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
607; RV32I-NEXT:    addi sp, sp, 16
608; RV32I-NEXT:    ret
609;
610; RV64I-LABEL: fcvt_s_wu_i16:
611; RV64I:       # %bb.0:
612; RV64I-NEXT:    addi sp, sp, -16
613; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
614; RV64I-NEXT:    call __floatunsisf
615; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
616; RV64I-NEXT:    addi sp, sp, 16
617; RV64I-NEXT:    ret
618  %1 = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
619  ret float %1
620}
621declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata)
622
623; Make sure we select W version of addi on RV64.
624define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
625; RV32IF-LABEL: fcvt_s_w_demanded_bits:
626; RV32IF:       # %bb.0:
627; RV32IF-NEXT:    addi a0, a0, 1
628; RV32IF-NEXT:    fcvt.s.w fa5, a0
629; RV32IF-NEXT:    fsw fa5, 0(a1)
630; RV32IF-NEXT:    ret
631;
632; RV64IF-LABEL: fcvt_s_w_demanded_bits:
633; RV64IF:       # %bb.0:
634; RV64IF-NEXT:    addiw a0, a0, 1
635; RV64IF-NEXT:    fcvt.s.w fa5, a0
636; RV64IF-NEXT:    fsw fa5, 0(a1)
637; RV64IF-NEXT:    ret
638;
639; RV32IZFINX-LABEL: fcvt_s_w_demanded_bits:
640; RV32IZFINX:       # %bb.0:
641; RV32IZFINX-NEXT:    addi a0, a0, 1
642; RV32IZFINX-NEXT:    fcvt.s.w a2, a0
643; RV32IZFINX-NEXT:    sw a2, 0(a1)
644; RV32IZFINX-NEXT:    ret
645;
646; RV64IZFINX-LABEL: fcvt_s_w_demanded_bits:
647; RV64IZFINX:       # %bb.0:
648; RV64IZFINX-NEXT:    addiw a0, a0, 1
649; RV64IZFINX-NEXT:    fcvt.s.w a2, a0
650; RV64IZFINX-NEXT:    sw a2, 0(a1)
651; RV64IZFINX-NEXT:    ret
652;
653; RV32I-LABEL: fcvt_s_w_demanded_bits:
654; RV32I:       # %bb.0:
655; RV32I-NEXT:    addi sp, sp, -16
656; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
657; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
658; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
659; RV32I-NEXT:    mv s0, a1
660; RV32I-NEXT:    addi s1, a0, 1
661; RV32I-NEXT:    mv a0, s1
662; RV32I-NEXT:    call __floatsisf
663; RV32I-NEXT:    sw a0, 0(s0)
664; RV32I-NEXT:    mv a0, s1
665; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
666; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
667; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
668; RV32I-NEXT:    addi sp, sp, 16
669; RV32I-NEXT:    ret
670;
671; RV64I-LABEL: fcvt_s_w_demanded_bits:
672; RV64I:       # %bb.0:
673; RV64I-NEXT:    addi sp, sp, -32
674; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
675; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
676; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
677; RV64I-NEXT:    mv s0, a1
678; RV64I-NEXT:    addiw s1, a0, 1
679; RV64I-NEXT:    mv a0, s1
680; RV64I-NEXT:    call __floatsisf
681; RV64I-NEXT:    sw a0, 0(s0)
682; RV64I-NEXT:    mv a0, s1
683; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
684; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
685; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
686; RV64I-NEXT:    addi sp, sp, 32
687; RV64I-NEXT:    ret
688  %3 = add i32 %0, 1
689  %4 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
690  store float %4, ptr %1, align 4
691  ret i32 %3
692}
693
694; Make sure we select W version of addi on RV64.
695define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
696; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
697; RV32IF:       # %bb.0:
698; RV32IF-NEXT:    addi a0, a0, 1
699; RV32IF-NEXT:    fcvt.s.wu fa5, a0
700; RV32IF-NEXT:    fsw fa5, 0(a1)
701; RV32IF-NEXT:    ret
702;
703; RV64IF-LABEL: fcvt_s_wu_demanded_bits:
704; RV64IF:       # %bb.0:
705; RV64IF-NEXT:    addiw a0, a0, 1
706; RV64IF-NEXT:    fcvt.s.wu fa5, a0
707; RV64IF-NEXT:    fsw fa5, 0(a1)
708; RV64IF-NEXT:    ret
709;
710; RV32IZFINX-LABEL: fcvt_s_wu_demanded_bits:
711; RV32IZFINX:       # %bb.0:
712; RV32IZFINX-NEXT:    addi a0, a0, 1
713; RV32IZFINX-NEXT:    fcvt.s.wu a2, a0
714; RV32IZFINX-NEXT:    sw a2, 0(a1)
715; RV32IZFINX-NEXT:    ret
716;
717; RV64IZFINX-LABEL: fcvt_s_wu_demanded_bits:
718; RV64IZFINX:       # %bb.0:
719; RV64IZFINX-NEXT:    addiw a0, a0, 1
720; RV64IZFINX-NEXT:    fcvt.s.wu a2, a0
721; RV64IZFINX-NEXT:    sw a2, 0(a1)
722; RV64IZFINX-NEXT:    ret
723;
724; RV32I-LABEL: fcvt_s_wu_demanded_bits:
725; RV32I:       # %bb.0:
726; RV32I-NEXT:    addi sp, sp, -16
727; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
728; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
729; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
730; RV32I-NEXT:    mv s0, a1
731; RV32I-NEXT:    addi s1, a0, 1
732; RV32I-NEXT:    mv a0, s1
733; RV32I-NEXT:    call __floatunsisf
734; RV32I-NEXT:    sw a0, 0(s0)
735; RV32I-NEXT:    mv a0, s1
736; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
737; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
738; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
739; RV32I-NEXT:    addi sp, sp, 16
740; RV32I-NEXT:    ret
741;
742; RV64I-LABEL: fcvt_s_wu_demanded_bits:
743; RV64I:       # %bb.0:
744; RV64I-NEXT:    addi sp, sp, -32
745; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
746; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
747; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
748; RV64I-NEXT:    mv s0, a1
749; RV64I-NEXT:    addiw s1, a0, 1
750; RV64I-NEXT:    mv a0, s1
751; RV64I-NEXT:    call __floatunsisf
752; RV64I-NEXT:    sw a0, 0(s0)
753; RV64I-NEXT:    mv a0, s1
754; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
755; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
756; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
757; RV64I-NEXT:    addi sp, sp, 32
758; RV64I-NEXT:    ret
759  %3 = add i32 %0, 1
760  %4 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
761  store float %4, ptr %1, align 4
762  ret i32 %3
763}
764