xref: /llvm-project/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll (revision 115872902b9b056d42e24273f93a2be7c93d2f54)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -global-isel -mattr=+f -verify-machineinstrs < %s \
3; RUN:   -target-abi=ilp32f | FileCheck -check-prefixes=CHECKIF,RV32IF %s
4; RUN: llc -mtriple=riscv64 -global-isel -mattr=+f -verify-machineinstrs < %s \
5; RUN:   -target-abi=lp64f | FileCheck -check-prefixes=CHECKIF,RV64IF %s
6; RUN: llc -mtriple=riscv32 -global-isel -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefix=RV32I %s
8; RUN: llc -mtriple=riscv64 -global-isel -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefix=RV64I %s
10
11define i32 @fcvt_w_s(float %a) nounwind {
12; CHECKIF-LABEL: fcvt_w_s:
13; CHECKIF:       # %bb.0:
14; CHECKIF-NEXT:    fcvt.w.s a0, fa0, rtz
15; CHECKIF-NEXT:    ret
16;
17; RV32I-LABEL: fcvt_w_s:
18; RV32I:       # %bb.0:
19; RV32I-NEXT:    addi sp, sp, -16
20; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
21; RV32I-NEXT:    call __fixsfsi
22; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
23; RV32I-NEXT:    addi sp, sp, 16
24; RV32I-NEXT:    ret
25;
26; RV64I-LABEL: fcvt_w_s:
27; RV64I:       # %bb.0:
28; RV64I-NEXT:    addi sp, sp, -16
29; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
30; RV64I-NEXT:    call __fixsfsi
31; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
32; RV64I-NEXT:    addi sp, sp, 16
33; RV64I-NEXT:    ret
34  %1 = fptosi float %a to i32
35  ret i32 %1
36}
37
38define i32 @fcvt_wu_s(float %a) nounwind {
39; CHECKIF-LABEL: fcvt_wu_s:
40; CHECKIF:       # %bb.0:
41; CHECKIF-NEXT:    fcvt.wu.s a0, fa0, rtz
42; CHECKIF-NEXT:    ret
43;
44; RV32I-LABEL: fcvt_wu_s:
45; RV32I:       # %bb.0:
46; RV32I-NEXT:    addi sp, sp, -16
47; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
48; RV32I-NEXT:    call __fixunssfsi
49; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
50; RV32I-NEXT:    addi sp, sp, 16
51; RV32I-NEXT:    ret
52;
53; RV64I-LABEL: fcvt_wu_s:
54; RV64I:       # %bb.0:
55; RV64I-NEXT:    addi sp, sp, -16
56; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
57; RV64I-NEXT:    call __fixunssfsi
58; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
59; RV64I-NEXT:    addi sp, sp, 16
60; RV64I-NEXT:    ret
61  %1 = fptoui float %a to i32
62  ret i32 %1
63}
64
65; Test where the fptoui has multiple uses, one of which causes a sext to be
66; inserted on RV64.
67define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
68; CHECKIF-LABEL: fcvt_wu_s_multiple_use:
69; CHECKIF:       # %bb.0:
70; CHECKIF-NEXT:    fcvt.wu.s a0, fa0, rtz
71; CHECKIF-NEXT:    bnez a0, .LBB2_2
72; CHECKIF-NEXT:  # %bb.1:
73; CHECKIF-NEXT:    li a0, 1
74; CHECKIF-NEXT:  .LBB2_2:
75; CHECKIF-NEXT:    ret
76;
77; RV32I-LABEL: fcvt_wu_s_multiple_use:
78; RV32I:       # %bb.0:
79; RV32I-NEXT:    addi sp, sp, -16
80; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
81; RV32I-NEXT:    call __fixunssfsi
82; RV32I-NEXT:    bnez a0, .LBB2_2
83; RV32I-NEXT:  # %bb.1:
84; RV32I-NEXT:    li a0, 1
85; RV32I-NEXT:  .LBB2_2:
86; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
87; RV32I-NEXT:    addi sp, sp, 16
88; RV32I-NEXT:    ret
89;
90; RV64I-LABEL: fcvt_wu_s_multiple_use:
91; RV64I:       # %bb.0:
92; RV64I-NEXT:    addi sp, sp, -16
93; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
94; RV64I-NEXT:    call __fixunssfsi
95; RV64I-NEXT:    sext.w a1, a0
96; RV64I-NEXT:    bnez a1, .LBB2_2
97; RV64I-NEXT:  # %bb.1:
98; RV64I-NEXT:    li a0, 1
99; RV64I-NEXT:  .LBB2_2:
100; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
101; RV64I-NEXT:    addi sp, sp, 16
102; RV64I-NEXT:    ret
103  %a = fptoui float %x to i32
104  %b = icmp eq i32 %a, 0
105  %c = select i1 %b, i32 1, i32 %a
106  ret i32 %c
107}
108
109define signext i32 @fmv_x_w(float %a, float %b) nounwind {
110; RV32IF-LABEL: fmv_x_w:
111; RV32IF:       # %bb.0:
112; RV32IF-NEXT:    fadd.s fa5, fa0, fa1
113; RV32IF-NEXT:    fmv.x.w a0, fa5
114; RV32IF-NEXT:    ret
115;
116; RV64IF-LABEL: fmv_x_w:
117; RV64IF:       # %bb.0:
118; RV64IF-NEXT:    fadd.s fa5, fa0, fa1
119; RV64IF-NEXT:    fmv.x.w a0, fa5
120; RV64IF-NEXT:    sext.w a0, a0
121; RV64IF-NEXT:    ret
122;
123; RV32I-LABEL: fmv_x_w:
124; RV32I:       # %bb.0:
125; RV32I-NEXT:    addi sp, sp, -16
126; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
127; RV32I-NEXT:    call __addsf3
128; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
129; RV32I-NEXT:    addi sp, sp, 16
130; RV32I-NEXT:    ret
131;
132; RV64I-LABEL: fmv_x_w:
133; RV64I:       # %bb.0:
134; RV64I-NEXT:    addi sp, sp, -16
135; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
136; RV64I-NEXT:    call __addsf3
137; RV64I-NEXT:    sext.w a0, a0
138; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
139; RV64I-NEXT:    addi sp, sp, 16
140; RV64I-NEXT:    ret
141; Ensure fmv.x.w is generated even for a soft float calling convention
142  %1 = fadd float %a, %b
143  %2 = bitcast float %1 to i32
144  ret i32 %2
145}
146
147define float @fcvt_s_w(i32 %a) nounwind {
148; CHECKIF-LABEL: fcvt_s_w:
149; CHECKIF:       # %bb.0:
150; CHECKIF-NEXT:    fcvt.s.w fa0, a0
151; CHECKIF-NEXT:    ret
152;
153; RV32I-LABEL: fcvt_s_w:
154; RV32I:       # %bb.0:
155; RV32I-NEXT:    addi sp, sp, -16
156; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
157; RV32I-NEXT:    call __floatsisf
158; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
159; RV32I-NEXT:    addi sp, sp, 16
160; RV32I-NEXT:    ret
161;
162; RV64I-LABEL: fcvt_s_w:
163; RV64I:       # %bb.0:
164; RV64I-NEXT:    addi sp, sp, -16
165; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
166; RV64I-NEXT:    sext.w a0, a0
167; RV64I-NEXT:    call __floatsisf
168; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
169; RV64I-NEXT:    addi sp, sp, 16
170; RV64I-NEXT:    ret
171  %1 = sitofp i32 %a to float
172  ret float %1
173}
174
175define float @fcvt_s_w_load(ptr %p) nounwind {
176; CHECKIF-LABEL: fcvt_s_w_load:
177; CHECKIF:       # %bb.0:
178; CHECKIF-NEXT:    lw a0, 0(a0)
179; CHECKIF-NEXT:    fcvt.s.w fa0, a0
180; CHECKIF-NEXT:    ret
181;
182; RV32I-LABEL: fcvt_s_w_load:
183; RV32I:       # %bb.0:
184; RV32I-NEXT:    addi sp, sp, -16
185; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
186; RV32I-NEXT:    lw a0, 0(a0)
187; RV32I-NEXT:    call __floatsisf
188; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
189; RV32I-NEXT:    addi sp, sp, 16
190; RV32I-NEXT:    ret
191;
192; RV64I-LABEL: fcvt_s_w_load:
193; RV64I:       # %bb.0:
194; RV64I-NEXT:    addi sp, sp, -16
195; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
196; RV64I-NEXT:    lw a0, 0(a0)
197; RV64I-NEXT:    call __floatsisf
198; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
199; RV64I-NEXT:    addi sp, sp, 16
200; RV64I-NEXT:    ret
201  %a = load i32, ptr %p
202  %1 = sitofp i32 %a to float
203  ret float %1
204}
205
206define float @fcvt_s_wu(i32 %a) nounwind {
207; CHECKIF-LABEL: fcvt_s_wu:
208; CHECKIF:       # %bb.0:
209; CHECKIF-NEXT:    fcvt.s.wu fa0, a0
210; CHECKIF-NEXT:    ret
211;
212; RV32I-LABEL: fcvt_s_wu:
213; RV32I:       # %bb.0:
214; RV32I-NEXT:    addi sp, sp, -16
215; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
216; RV32I-NEXT:    call __floatunsisf
217; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
218; RV32I-NEXT:    addi sp, sp, 16
219; RV32I-NEXT:    ret
220;
221; RV64I-LABEL: fcvt_s_wu:
222; RV64I:       # %bb.0:
223; RV64I-NEXT:    addi sp, sp, -16
224; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
225; RV64I-NEXT:    sext.w a0, a0
226; RV64I-NEXT:    call __floatunsisf
227; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
228; RV64I-NEXT:    addi sp, sp, 16
229; RV64I-NEXT:    ret
230  %1 = uitofp i32 %a to float
231  ret float %1
232}
233
234define float @fcvt_s_wu_load(ptr %p) nounwind {
235; RV32IF-LABEL: fcvt_s_wu_load:
236; RV32IF:       # %bb.0:
237; RV32IF-NEXT:    lw a0, 0(a0)
238; RV32IF-NEXT:    fcvt.s.wu fa0, a0
239; RV32IF-NEXT:    ret
240;
241; RV64IF-LABEL: fcvt_s_wu_load:
242; RV64IF:       # %bb.0:
243; RV64IF-NEXT:    lwu a0, 0(a0)
244; RV64IF-NEXT:    fcvt.s.wu fa0, a0
245; RV64IF-NEXT:    ret
246;
247; RV32I-LABEL: fcvt_s_wu_load:
248; RV32I:       # %bb.0:
249; RV32I-NEXT:    addi sp, sp, -16
250; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
251; RV32I-NEXT:    lw a0, 0(a0)
252; RV32I-NEXT:    call __floatunsisf
253; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
254; RV32I-NEXT:    addi sp, sp, 16
255; RV32I-NEXT:    ret
256;
257; RV64I-LABEL: fcvt_s_wu_load:
258; RV64I:       # %bb.0:
259; RV64I-NEXT:    addi sp, sp, -16
260; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
261; RV64I-NEXT:    lw a0, 0(a0)
262; RV64I-NEXT:    call __floatunsisf
263; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
264; RV64I-NEXT:    addi sp, sp, 16
265; RV64I-NEXT:    ret
266  %a = load i32, ptr %p
267  %1 = uitofp i32 %a to float
268  ret float %1
269}
270
271define float @fmv_w_x(i32 %a, i32 %b) nounwind {
272; CHECKIF-LABEL: fmv_w_x:
273; CHECKIF:       # %bb.0:
274; CHECKIF-NEXT:    fmv.w.x fa5, a0
275; CHECKIF-NEXT:    fmv.w.x fa4, a1
276; CHECKIF-NEXT:    fadd.s fa0, fa5, fa4
277; CHECKIF-NEXT:    ret
278;
279; RV32I-LABEL: fmv_w_x:
280; RV32I:       # %bb.0:
281; RV32I-NEXT:    addi sp, sp, -16
282; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
283; RV32I-NEXT:    call __addsf3
284; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
285; RV32I-NEXT:    addi sp, sp, 16
286; RV32I-NEXT:    ret
287;
288; RV64I-LABEL: fmv_w_x:
289; RV64I:       # %bb.0:
290; RV64I-NEXT:    addi sp, sp, -16
291; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
292; RV64I-NEXT:    call __addsf3
293; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
294; RV64I-NEXT:    addi sp, sp, 16
295; RV64I-NEXT:    ret
296; Ensure fmv.w.x is generated even for a soft float calling convention
297  %1 = bitcast i32 %a to float
298  %2 = bitcast i32 %b to float
299  %3 = fadd float %1, %2
300  ret float %3
301}
302
303define i64 @fcvt_l_s(float %a) nounwind {
304; RV32IF-LABEL: fcvt_l_s:
305; RV32IF:       # %bb.0:
306; RV32IF-NEXT:    addi sp, sp, -16
307; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
308; RV32IF-NEXT:    call __fixsfdi
309; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
310; RV32IF-NEXT:    addi sp, sp, 16
311; RV32IF-NEXT:    ret
312;
313; RV64IF-LABEL: fcvt_l_s:
314; RV64IF:       # %bb.0:
315; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
316; RV64IF-NEXT:    ret
317;
318; RV32I-LABEL: fcvt_l_s:
319; RV32I:       # %bb.0:
320; RV32I-NEXT:    addi sp, sp, -16
321; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
322; RV32I-NEXT:    call __fixsfdi
323; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
324; RV32I-NEXT:    addi sp, sp, 16
325; RV32I-NEXT:    ret
326;
327; RV64I-LABEL: fcvt_l_s:
328; RV64I:       # %bb.0:
329; RV64I-NEXT:    addi sp, sp, -16
330; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
331; RV64I-NEXT:    call __fixsfdi
332; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
333; RV64I-NEXT:    addi sp, sp, 16
334; RV64I-NEXT:    ret
335  %1 = fptosi float %a to i64
336  ret i64 %1
337}
338
339define i64 @fcvt_lu_s(float %a) nounwind {
340; RV32IF-LABEL: fcvt_lu_s:
341; RV32IF:       # %bb.0:
342; RV32IF-NEXT:    addi sp, sp, -16
343; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
344; RV32IF-NEXT:    call __fixunssfdi
345; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
346; RV32IF-NEXT:    addi sp, sp, 16
347; RV32IF-NEXT:    ret
348;
349; RV64IF-LABEL: fcvt_lu_s:
350; RV64IF:       # %bb.0:
351; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
352; RV64IF-NEXT:    ret
353;
354; RV32I-LABEL: fcvt_lu_s:
355; RV32I:       # %bb.0:
356; RV32I-NEXT:    addi sp, sp, -16
357; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
358; RV32I-NEXT:    call __fixunssfdi
359; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
360; RV32I-NEXT:    addi sp, sp, 16
361; RV32I-NEXT:    ret
362;
363; RV64I-LABEL: fcvt_lu_s:
364; RV64I:       # %bb.0:
365; RV64I-NEXT:    addi sp, sp, -16
366; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
367; RV64I-NEXT:    call __fixunssfdi
368; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
369; RV64I-NEXT:    addi sp, sp, 16
370; RV64I-NEXT:    ret
371  %1 = fptoui float %a to i64
372  ret i64 %1
373}
374
375define float @fcvt_s_l(i64 %a) nounwind {
376; RV32IF-LABEL: fcvt_s_l:
377; RV32IF:       # %bb.0:
378; RV32IF-NEXT:    addi sp, sp, -16
379; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
380; RV32IF-NEXT:    call __floatdisf
381; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
382; RV32IF-NEXT:    addi sp, sp, 16
383; RV32IF-NEXT:    ret
384;
385; RV64IF-LABEL: fcvt_s_l:
386; RV64IF:       # %bb.0:
387; RV64IF-NEXT:    fcvt.s.l fa0, a0
388; RV64IF-NEXT:    ret
389;
390; RV32I-LABEL: fcvt_s_l:
391; RV32I:       # %bb.0:
392; RV32I-NEXT:    addi sp, sp, -16
393; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
394; RV32I-NEXT:    call __floatdisf
395; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
396; RV32I-NEXT:    addi sp, sp, 16
397; RV32I-NEXT:    ret
398;
399; RV64I-LABEL: fcvt_s_l:
400; RV64I:       # %bb.0:
401; RV64I-NEXT:    addi sp, sp, -16
402; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
403; RV64I-NEXT:    call __floatdisf
404; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
405; RV64I-NEXT:    addi sp, sp, 16
406; RV64I-NEXT:    ret
407  %1 = sitofp i64 %a to float
408  ret float %1
409}
410
411define float @fcvt_s_lu(i64 %a) nounwind {
412; RV32IF-LABEL: fcvt_s_lu:
413; RV32IF:       # %bb.0:
414; RV32IF-NEXT:    addi sp, sp, -16
415; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
416; RV32IF-NEXT:    call __floatundisf
417; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
418; RV32IF-NEXT:    addi sp, sp, 16
419; RV32IF-NEXT:    ret
420;
421; RV64IF-LABEL: fcvt_s_lu:
422; RV64IF:       # %bb.0:
423; RV64IF-NEXT:    fcvt.s.lu fa0, a0
424; RV64IF-NEXT:    ret
425;
426; RV32I-LABEL: fcvt_s_lu:
427; RV32I:       # %bb.0:
428; RV32I-NEXT:    addi sp, sp, -16
429; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
430; RV32I-NEXT:    call __floatundisf
431; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
432; RV32I-NEXT:    addi sp, sp, 16
433; RV32I-NEXT:    ret
434;
435; RV64I-LABEL: fcvt_s_lu:
436; RV64I:       # %bb.0:
437; RV64I-NEXT:    addi sp, sp, -16
438; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
439; RV64I-NEXT:    call __floatundisf
440; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
441; RV64I-NEXT:    addi sp, sp, 16
442; RV64I-NEXT:    ret
443  %1 = uitofp i64 %a to float
444  ret float %1
445}
446
447define float @fcvt_s_w_i8(i8 signext %a) nounwind {
448; CHECKIF-LABEL: fcvt_s_w_i8:
449; CHECKIF:       # %bb.0:
450; CHECKIF-NEXT:    fcvt.s.w fa0, a0
451; CHECKIF-NEXT:    ret
452;
453; RV32I-LABEL: fcvt_s_w_i8:
454; RV32I:       # %bb.0:
455; RV32I-NEXT:    addi sp, sp, -16
456; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
457; RV32I-NEXT:    call __floatsisf
458; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
459; RV32I-NEXT:    addi sp, sp, 16
460; RV32I-NEXT:    ret
461;
462; RV64I-LABEL: fcvt_s_w_i8:
463; RV64I:       # %bb.0:
464; RV64I-NEXT:    addi sp, sp, -16
465; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
466; RV64I-NEXT:    call __floatsisf
467; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
468; RV64I-NEXT:    addi sp, sp, 16
469; RV64I-NEXT:    ret
470  %1 = sitofp i8 %a to float
471  ret float %1
472}
473
474define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
475; CHECKIF-LABEL: fcvt_s_wu_i8:
476; CHECKIF:       # %bb.0:
477; CHECKIF-NEXT:    fcvt.s.wu fa0, a0
478; CHECKIF-NEXT:    ret
479;
480; RV32I-LABEL: fcvt_s_wu_i8:
481; RV32I:       # %bb.0:
482; RV32I-NEXT:    addi sp, sp, -16
483; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
484; RV32I-NEXT:    call __floatunsisf
485; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
486; RV32I-NEXT:    addi sp, sp, 16
487; RV32I-NEXT:    ret
488;
489; RV64I-LABEL: fcvt_s_wu_i8:
490; RV64I:       # %bb.0:
491; RV64I-NEXT:    addi sp, sp, -16
492; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
493; RV64I-NEXT:    call __floatunsisf
494; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
495; RV64I-NEXT:    addi sp, sp, 16
496; RV64I-NEXT:    ret
497  %1 = uitofp i8 %a to float
498  ret float %1
499}
500
501define float @fcvt_s_w_i16(i16 signext %a) nounwind {
502; CHECKIF-LABEL: fcvt_s_w_i16:
503; CHECKIF:       # %bb.0:
504; CHECKIF-NEXT:    fcvt.s.w fa0, a0
505; CHECKIF-NEXT:    ret
506;
507; RV32I-LABEL: fcvt_s_w_i16:
508; RV32I:       # %bb.0:
509; RV32I-NEXT:    addi sp, sp, -16
510; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
511; RV32I-NEXT:    call __floatsisf
512; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
513; RV32I-NEXT:    addi sp, sp, 16
514; RV32I-NEXT:    ret
515;
516; RV64I-LABEL: fcvt_s_w_i16:
517; RV64I:       # %bb.0:
518; RV64I-NEXT:    addi sp, sp, -16
519; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
520; RV64I-NEXT:    call __floatsisf
521; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
522; RV64I-NEXT:    addi sp, sp, 16
523; RV64I-NEXT:    ret
524  %1 = sitofp i16 %a to float
525  ret float %1
526}
527
528define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
529; CHECKIF-LABEL: fcvt_s_wu_i16:
530; CHECKIF:       # %bb.0:
531; CHECKIF-NEXT:    fcvt.s.wu fa0, a0
532; CHECKIF-NEXT:    ret
533;
534; RV32I-LABEL: fcvt_s_wu_i16:
535; RV32I:       # %bb.0:
536; RV32I-NEXT:    addi sp, sp, -16
537; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
538; RV32I-NEXT:    call __floatunsisf
539; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
540; RV32I-NEXT:    addi sp, sp, 16
541; RV32I-NEXT:    ret
542;
543; RV64I-LABEL: fcvt_s_wu_i16:
544; RV64I:       # %bb.0:
545; RV64I-NEXT:    addi sp, sp, -16
546; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
547; RV64I-NEXT:    call __floatunsisf
548; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
549; RV64I-NEXT:    addi sp, sp, 16
550; RV64I-NEXT:    ret
551  %1 = uitofp i16 %a to float
552  ret float %1
553}
554
555; Make sure we select W version of addi on RV64.
556define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
557; RV32IF-LABEL: fcvt_s_w_demanded_bits:
558; RV32IF:       # %bb.0:
559; RV32IF-NEXT:    addi a0, a0, 1
560; RV32IF-NEXT:    fcvt.s.w fa5, a0
561; RV32IF-NEXT:    fsw fa5, 0(a1)
562; RV32IF-NEXT:    ret
563;
564; RV64IF-LABEL: fcvt_s_w_demanded_bits:
565; RV64IF:       # %bb.0:
566; RV64IF-NEXT:    addiw a0, a0, 1
567; RV64IF-NEXT:    fcvt.s.w fa5, a0
568; RV64IF-NEXT:    fsw fa5, 0(a1)
569; RV64IF-NEXT:    ret
570;
571; RV32I-LABEL: fcvt_s_w_demanded_bits:
572; RV32I:       # %bb.0:
573; RV32I-NEXT:    addi sp, sp, -16
574; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
575; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
576; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
577; RV32I-NEXT:    mv s0, a1
578; RV32I-NEXT:    addi s1, a0, 1
579; RV32I-NEXT:    mv a0, s1
580; RV32I-NEXT:    call __floatsisf
581; RV32I-NEXT:    sw a0, 0(s0)
582; RV32I-NEXT:    mv a0, s1
583; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
584; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
585; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
586; RV32I-NEXT:    addi sp, sp, 16
587; RV32I-NEXT:    ret
588;
589; RV64I-LABEL: fcvt_s_w_demanded_bits:
590; RV64I:       # %bb.0:
591; RV64I-NEXT:    addi sp, sp, -32
592; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
593; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
594; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
595; RV64I-NEXT:    mv s0, a1
596; RV64I-NEXT:    addiw s1, a0, 1
597; RV64I-NEXT:    mv a0, s1
598; RV64I-NEXT:    call __floatsisf
599; RV64I-NEXT:    sw a0, 0(s0)
600; RV64I-NEXT:    mv a0, s1
601; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
602; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
603; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
604; RV64I-NEXT:    addi sp, sp, 32
605; RV64I-NEXT:    ret
606  %3 = add i32 %0, 1
607  %4 = sitofp i32 %3 to float
608  store float %4, ptr %1, align 4
609  ret i32 %3
610}
611
612; Make sure we select W version of addi on RV64.
613define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
614; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
615; RV32IF:       # %bb.0:
616; RV32IF-NEXT:    addi a0, a0, 1
617; RV32IF-NEXT:    fcvt.s.wu fa5, a0
618; RV32IF-NEXT:    fsw fa5, 0(a1)
619; RV32IF-NEXT:    ret
620;
621; RV64IF-LABEL: fcvt_s_wu_demanded_bits:
622; RV64IF:       # %bb.0:
623; RV64IF-NEXT:    addiw a0, a0, 1
624; RV64IF-NEXT:    fcvt.s.wu fa5, a0
625; RV64IF-NEXT:    fsw fa5, 0(a1)
626; RV64IF-NEXT:    ret
627;
628; RV32I-LABEL: fcvt_s_wu_demanded_bits:
629; RV32I:       # %bb.0:
630; RV32I-NEXT:    addi sp, sp, -16
631; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
632; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
633; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
634; RV32I-NEXT:    mv s0, a1
635; RV32I-NEXT:    addi s1, a0, 1
636; RV32I-NEXT:    mv a0, s1
637; RV32I-NEXT:    call __floatunsisf
638; RV32I-NEXT:    sw a0, 0(s0)
639; RV32I-NEXT:    mv a0, s1
640; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
641; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
642; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
643; RV32I-NEXT:    addi sp, sp, 16
644; RV32I-NEXT:    ret
645;
646; RV64I-LABEL: fcvt_s_wu_demanded_bits:
647; RV64I:       # %bb.0:
648; RV64I-NEXT:    addi sp, sp, -32
649; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
650; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
651; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
652; RV64I-NEXT:    mv s0, a1
653; RV64I-NEXT:    addiw s1, a0, 1
654; RV64I-NEXT:    mv a0, s1
655; RV64I-NEXT:    call __floatunsisf
656; RV64I-NEXT:    sw a0, 0(s0)
657; RV64I-NEXT:    mv a0, s1
658; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
659; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
660; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
661; RV64I-NEXT:    addi sp, sp, 32
662; RV64I-NEXT:    ret
663  %3 = add i32 %0, 1
664  %4 = uitofp i32 %3 to float
665  store float %4, ptr %1, align 4
666  ret i32 %3
667}
668
669define signext i16 @fcvt_w_s_i16(float %a) nounwind {
670; RV32IF-LABEL: fcvt_w_s_i16:
671; RV32IF:       # %bb.0:
672; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
673; RV32IF-NEXT:    slli a0, a0, 16
674; RV32IF-NEXT:    srai a0, a0, 16
675; RV32IF-NEXT:    ret
676;
677; RV64IF-LABEL: fcvt_w_s_i16:
678; RV64IF:       # %bb.0:
679; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
680; RV64IF-NEXT:    slli a0, a0, 48
681; RV64IF-NEXT:    srai a0, a0, 48
682; RV64IF-NEXT:    ret
683;
684; RV32I-LABEL: fcvt_w_s_i16:
685; RV32I:       # %bb.0:
686; RV32I-NEXT:    addi sp, sp, -16
687; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
688; RV32I-NEXT:    call __fixsfsi
689; RV32I-NEXT:    slli a0, a0, 16
690; RV32I-NEXT:    srai a0, a0, 16
691; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
692; RV32I-NEXT:    addi sp, sp, 16
693; RV32I-NEXT:    ret
694;
695; RV64I-LABEL: fcvt_w_s_i16:
696; RV64I:       # %bb.0:
697; RV64I-NEXT:    addi sp, sp, -16
698; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
699; RV64I-NEXT:    call __fixsfsi
700; RV64I-NEXT:    slli a0, a0, 48
701; RV64I-NEXT:    srai a0, a0, 48
702; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
703; RV64I-NEXT:    addi sp, sp, 16
704; RV64I-NEXT:    ret
705  %1 = fptosi float %a to i16
706  ret i16 %1
707}
708
709define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
710; RV32IF-LABEL: fcvt_wu_s_i16:
711; RV32IF:       # %bb.0:
712; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
713; RV32IF-NEXT:    slli a0, a0, 16
714; RV32IF-NEXT:    srli a0, a0, 16
715; RV32IF-NEXT:    ret
716;
717; RV64IF-LABEL: fcvt_wu_s_i16:
718; RV64IF:       # %bb.0:
719; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
720; RV64IF-NEXT:    slli a0, a0, 48
721; RV64IF-NEXT:    srli a0, a0, 48
722; RV64IF-NEXT:    ret
723;
724; RV32I-LABEL: fcvt_wu_s_i16:
725; RV32I:       # %bb.0:
726; RV32I-NEXT:    addi sp, sp, -16
727; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
728; RV32I-NEXT:    call __fixunssfsi
729; RV32I-NEXT:    slli a0, a0, 16
730; RV32I-NEXT:    srli a0, a0, 16
731; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
732; RV32I-NEXT:    addi sp, sp, 16
733; RV32I-NEXT:    ret
734;
735; RV64I-LABEL: fcvt_wu_s_i16:
736; RV64I:       # %bb.0:
737; RV64I-NEXT:    addi sp, sp, -16
738; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
739; RV64I-NEXT:    call __fixunssfsi
740; RV64I-NEXT:    slli a0, a0, 48
741; RV64I-NEXT:    srli a0, a0, 48
742; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
743; RV64I-NEXT:    addi sp, sp, 16
744; RV64I-NEXT:    ret
745  %1 = fptoui float %a to i16
746  ret i16 %1
747}
748
749define signext i8 @fcvt_w_s_i8(float %a) nounwind {
750; RV32IF-LABEL: fcvt_w_s_i8:
751; RV32IF:       # %bb.0:
752; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
753; RV32IF-NEXT:    slli a0, a0, 24
754; RV32IF-NEXT:    srai a0, a0, 24
755; RV32IF-NEXT:    ret
756;
757; RV64IF-LABEL: fcvt_w_s_i8:
758; RV64IF:       # %bb.0:
759; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
760; RV64IF-NEXT:    slli a0, a0, 56
761; RV64IF-NEXT:    srai a0, a0, 56
762; RV64IF-NEXT:    ret
763;
764; RV32I-LABEL: fcvt_w_s_i8:
765; RV32I:       # %bb.0:
766; RV32I-NEXT:    addi sp, sp, -16
767; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
768; RV32I-NEXT:    call __fixsfsi
769; RV32I-NEXT:    slli a0, a0, 24
770; RV32I-NEXT:    srai a0, a0, 24
771; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
772; RV32I-NEXT:    addi sp, sp, 16
773; RV32I-NEXT:    ret
774;
775; RV64I-LABEL: fcvt_w_s_i8:
776; RV64I:       # %bb.0:
777; RV64I-NEXT:    addi sp, sp, -16
778; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
779; RV64I-NEXT:    call __fixsfsi
780; RV64I-NEXT:    slli a0, a0, 56
781; RV64I-NEXT:    srai a0, a0, 56
782; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
783; RV64I-NEXT:    addi sp, sp, 16
784; RV64I-NEXT:    ret
785  %1 = fptosi float %a to i8
786  ret i8 %1
787}
788
789define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
790; CHECKIF-LABEL: fcvt_wu_s_i8:
791; CHECKIF:       # %bb.0:
792; CHECKIF-NEXT:    fcvt.wu.s a0, fa0, rtz
793; CHECKIF-NEXT:    andi a0, a0, 255
794; CHECKIF-NEXT:    ret
795;
796; RV32I-LABEL: fcvt_wu_s_i8:
797; RV32I:       # %bb.0:
798; RV32I-NEXT:    addi sp, sp, -16
799; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
800; RV32I-NEXT:    call __fixunssfsi
801; RV32I-NEXT:    andi a0, a0, 255
802; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
803; RV32I-NEXT:    addi sp, sp, 16
804; RV32I-NEXT:    ret
805;
806; RV64I-LABEL: fcvt_wu_s_i8:
807; RV64I:       # %bb.0:
808; RV64I-NEXT:    addi sp, sp, -16
809; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
810; RV64I-NEXT:    call __fixunssfsi
811; RV64I-NEXT:    andi a0, a0, 255
812; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
813; RV64I-NEXT:    addi sp, sp, 16
814; RV64I-NEXT:    ret
815  %1 = fptoui float %a to i8
816  ret i8 %1
817}
818