xref: /llvm-project/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll (revision b2e69f52bb5da067109b9a7d1f73d0dd1a6bb5ad)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32F
3; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32D
4; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64F
5; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64D
6
7define signext i8 @convert_float_to_i8(float %a) nounwind {
8; LA32F-LABEL: convert_float_to_i8:
9; LA32F:       # %bb.0:
10; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
11; LA32F-NEXT:    movfr2gr.s $a0, $fa0
12; LA32F-NEXT:    ret
13;
14; LA32D-LABEL: convert_float_to_i8:
15; LA32D:       # %bb.0:
16; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
17; LA32D-NEXT:    movfr2gr.s $a0, $fa0
18; LA32D-NEXT:    ret
19;
20; LA64F-LABEL: convert_float_to_i8:
21; LA64F:       # %bb.0:
22; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
23; LA64F-NEXT:    movfr2gr.s $a0, $fa0
24; LA64F-NEXT:    ret
25;
26; LA64D-LABEL: convert_float_to_i8:
27; LA64D:       # %bb.0:
28; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
29; LA64D-NEXT:    movfr2gr.d $a0, $fa0
30; LA64D-NEXT:    ret
31  %1 = fptosi float %a to i8
32  ret i8 %1
33}
34
35define signext i16 @convert_float_to_i16(float %a) nounwind {
36; LA32F-LABEL: convert_float_to_i16:
37; LA32F:       # %bb.0:
38; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
39; LA32F-NEXT:    movfr2gr.s $a0, $fa0
40; LA32F-NEXT:    ret
41;
42; LA32D-LABEL: convert_float_to_i16:
43; LA32D:       # %bb.0:
44; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
45; LA32D-NEXT:    movfr2gr.s $a0, $fa0
46; LA32D-NEXT:    ret
47;
48; LA64F-LABEL: convert_float_to_i16:
49; LA64F:       # %bb.0:
50; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
51; LA64F-NEXT:    movfr2gr.s $a0, $fa0
52; LA64F-NEXT:    ret
53;
54; LA64D-LABEL: convert_float_to_i16:
55; LA64D:       # %bb.0:
56; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
57; LA64D-NEXT:    movfr2gr.d $a0, $fa0
58; LA64D-NEXT:    ret
59  %1 = fptosi float %a to i16
60  ret i16 %1
61}
62
63define i32 @convert_float_to_i32(float %a) nounwind {
64; LA32F-LABEL: convert_float_to_i32:
65; LA32F:       # %bb.0:
66; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
67; LA32F-NEXT:    movfr2gr.s $a0, $fa0
68; LA32F-NEXT:    ret
69;
70; LA32D-LABEL: convert_float_to_i32:
71; LA32D:       # %bb.0:
72; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
73; LA32D-NEXT:    movfr2gr.s $a0, $fa0
74; LA32D-NEXT:    ret
75;
76; LA64F-LABEL: convert_float_to_i32:
77; LA64F:       # %bb.0:
78; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
79; LA64F-NEXT:    movfr2gr.s $a0, $fa0
80; LA64F-NEXT:    ret
81;
82; LA64D-LABEL: convert_float_to_i32:
83; LA64D:       # %bb.0:
84; LA64D-NEXT:    ftintrz.w.s $fa0, $fa0
85; LA64D-NEXT:    movfr2gr.s $a0, $fa0
86; LA64D-NEXT:    ret
87  %1 = fptosi float %a to i32
88  ret i32 %1
89}
90
91define i64 @convert_float_to_i64(float %a) nounwind {
92; LA32F-LABEL: convert_float_to_i64:
93; LA32F:       # %bb.0:
94; LA32F-NEXT:    addi.w $sp, $sp, -16
95; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
96; LA32F-NEXT:    bl %plt(__fixsfdi)
97; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
98; LA32F-NEXT:    addi.w $sp, $sp, 16
99; LA32F-NEXT:    ret
100;
101; LA32D-LABEL: convert_float_to_i64:
102; LA32D:       # %bb.0:
103; LA32D-NEXT:    addi.w $sp, $sp, -16
104; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
105; LA32D-NEXT:    bl %plt(__fixsfdi)
106; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
107; LA32D-NEXT:    addi.w $sp, $sp, 16
108; LA32D-NEXT:    ret
109;
110; LA64F-LABEL: convert_float_to_i64:
111; LA64F:       # %bb.0:
112; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
113; LA64F-NEXT:    movfr2gr.s $a0, $fa0
114; LA64F-NEXT:    ret
115;
116; LA64D-LABEL: convert_float_to_i64:
117; LA64D:       # %bb.0:
118; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
119; LA64D-NEXT:    movfr2gr.d $a0, $fa0
120; LA64D-NEXT:    ret
121  %1 = fptosi float %a to i64
122  ret i64 %1
123}
124
125define zeroext i8 @convert_float_to_u8(float %a) nounwind {
126; LA32F-LABEL: convert_float_to_u8:
127; LA32F:       # %bb.0:
128; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
129; LA32F-NEXT:    movfr2gr.s $a0, $fa0
130; LA32F-NEXT:    ret
131;
132; LA32D-LABEL: convert_float_to_u8:
133; LA32D:       # %bb.0:
134; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
135; LA32D-NEXT:    movfr2gr.s $a0, $fa0
136; LA32D-NEXT:    ret
137;
138; LA64F-LABEL: convert_float_to_u8:
139; LA64F:       # %bb.0:
140; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
141; LA64F-NEXT:    movfr2gr.s $a0, $fa0
142; LA64F-NEXT:    ret
143;
144; LA64D-LABEL: convert_float_to_u8:
145; LA64D:       # %bb.0:
146; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
147; LA64D-NEXT:    movfr2gr.d $a0, $fa0
148; LA64D-NEXT:    ret
149  %1 = fptoui float %a to i8
150  ret i8 %1
151}
152
153define zeroext i16 @convert_float_to_u16(float %a) nounwind {
154; LA32F-LABEL: convert_float_to_u16:
155; LA32F:       # %bb.0:
156; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
157; LA32F-NEXT:    movfr2gr.s $a0, $fa0
158; LA32F-NEXT:    ret
159;
160; LA32D-LABEL: convert_float_to_u16:
161; LA32D:       # %bb.0:
162; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
163; LA32D-NEXT:    movfr2gr.s $a0, $fa0
164; LA32D-NEXT:    ret
165;
166; LA64F-LABEL: convert_float_to_u16:
167; LA64F:       # %bb.0:
168; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
169; LA64F-NEXT:    movfr2gr.s $a0, $fa0
170; LA64F-NEXT:    ret
171;
172; LA64D-LABEL: convert_float_to_u16:
173; LA64D:       # %bb.0:
174; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
175; LA64D-NEXT:    movfr2gr.d $a0, $fa0
176; LA64D-NEXT:    ret
177  %1 = fptoui float %a to i16
178  ret i16 %1
179}
180
181define i32 @convert_float_to_u32(float %a) nounwind {
182; LA32F-LABEL: convert_float_to_u32:
183; LA32F:       # %bb.0:
184; LA32F-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
185; LA32F-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI6_0)
186; LA32F-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
187; LA32F-NEXT:    fsub.s $fa1, $fa0, $fa1
188; LA32F-NEXT:    ftintrz.w.s $fa1, $fa1
189; LA32F-NEXT:    movfr2gr.s $a0, $fa1
190; LA32F-NEXT:    lu12i.w $a1, -524288
191; LA32F-NEXT:    xor $a0, $a0, $a1
192; LA32F-NEXT:    movcf2gr $a1, $fcc0
193; LA32F-NEXT:    masknez $a0, $a0, $a1
194; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
195; LA32F-NEXT:    movfr2gr.s $a2, $fa0
196; LA32F-NEXT:    maskeqz $a1, $a2, $a1
197; LA32F-NEXT:    or $a0, $a1, $a0
198; LA32F-NEXT:    ret
199;
200; LA32D-LABEL: convert_float_to_u32:
201; LA32D:       # %bb.0:
202; LA32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
203; LA32D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI6_0)
204; LA32D-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
205; LA32D-NEXT:    fsub.s $fa1, $fa0, $fa1
206; LA32D-NEXT:    ftintrz.w.s $fa1, $fa1
207; LA32D-NEXT:    movfr2gr.s $a0, $fa1
208; LA32D-NEXT:    lu12i.w $a1, -524288
209; LA32D-NEXT:    xor $a0, $a0, $a1
210; LA32D-NEXT:    movcf2gr $a1, $fcc0
211; LA32D-NEXT:    masknez $a0, $a0, $a1
212; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
213; LA32D-NEXT:    movfr2gr.s $a2, $fa0
214; LA32D-NEXT:    maskeqz $a1, $a2, $a1
215; LA32D-NEXT:    or $a0, $a1, $a0
216; LA32D-NEXT:    ret
217;
218; LA64F-LABEL: convert_float_to_u32:
219; LA64F:       # %bb.0:
220; LA64F-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI6_0)
221; LA64F-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI6_0)
222; LA64F-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
223; LA64F-NEXT:    fsub.s $fa1, $fa0, $fa1
224; LA64F-NEXT:    ftintrz.w.s $fa1, $fa1
225; LA64F-NEXT:    movfr2gr.s $a0, $fa1
226; LA64F-NEXT:    lu12i.w $a1, -524288
227; LA64F-NEXT:    xor $a0, $a0, $a1
228; LA64F-NEXT:    movcf2gr $a1, $fcc0
229; LA64F-NEXT:    masknez $a0, $a0, $a1
230; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
231; LA64F-NEXT:    movfr2gr.s $a2, $fa0
232; LA64F-NEXT:    maskeqz $a1, $a2, $a1
233; LA64F-NEXT:    or $a0, $a1, $a0
234; LA64F-NEXT:    ret
235;
236; LA64D-LABEL: convert_float_to_u32:
237; LA64D:       # %bb.0:
238; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
239; LA64D-NEXT:    movfr2gr.d $a0, $fa0
240; LA64D-NEXT:    ret
241  %1 = fptoui float %a to i32
242  ret i32 %1
243}
244
245define i64 @convert_float_to_u64(float %a) nounwind {
246; LA32F-LABEL: convert_float_to_u64:
247; LA32F:       # %bb.0:
248; LA32F-NEXT:    addi.w $sp, $sp, -16
249; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
250; LA32F-NEXT:    bl %plt(__fixunssfdi)
251; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
252; LA32F-NEXT:    addi.w $sp, $sp, 16
253; LA32F-NEXT:    ret
254;
255; LA32D-LABEL: convert_float_to_u64:
256; LA32D:       # %bb.0:
257; LA32D-NEXT:    addi.w $sp, $sp, -16
258; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
259; LA32D-NEXT:    bl %plt(__fixunssfdi)
260; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
261; LA32D-NEXT:    addi.w $sp, $sp, 16
262; LA32D-NEXT:    ret
263;
264; LA64F-LABEL: convert_float_to_u64:
265; LA64F:       # %bb.0:
266; LA64F-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI7_0)
267; LA64F-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI7_0)
268; LA64F-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
269; LA64F-NEXT:    fsub.s $fa1, $fa0, $fa1
270; LA64F-NEXT:    ftintrz.w.s $fa1, $fa1
271; LA64F-NEXT:    movfr2gr.s $a0, $fa1
272; LA64F-NEXT:    lu52i.d $a1, $zero, -2048
273; LA64F-NEXT:    xor $a0, $a0, $a1
274; LA64F-NEXT:    movcf2gr $a1, $fcc0
275; LA64F-NEXT:    masknez $a0, $a0, $a1
276; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
277; LA64F-NEXT:    movfr2gr.s $a2, $fa0
278; LA64F-NEXT:    maskeqz $a1, $a2, $a1
279; LA64F-NEXT:    or $a0, $a1, $a0
280; LA64F-NEXT:    ret
281;
282; LA64D-LABEL: convert_float_to_u64:
283; LA64D:       # %bb.0:
284; LA64D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI7_0)
285; LA64D-NEXT:    fld.s $fa1, $a0, %pc_lo12(.LCPI7_0)
286; LA64D-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
287; LA64D-NEXT:    fsub.s $fa1, $fa0, $fa1
288; LA64D-NEXT:    ftintrz.l.s $fa1, $fa1
289; LA64D-NEXT:    movfr2gr.d $a0, $fa1
290; LA64D-NEXT:    lu52i.d $a1, $zero, -2048
291; LA64D-NEXT:    xor $a0, $a0, $a1
292; LA64D-NEXT:    movcf2gr $a1, $fcc0
293; LA64D-NEXT:    masknez $a0, $a0, $a1
294; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
295; LA64D-NEXT:    movfr2gr.d $a2, $fa0
296; LA64D-NEXT:    maskeqz $a1, $a2, $a1
297; LA64D-NEXT:    or $a0, $a1, $a0
298; LA64D-NEXT:    ret
299  %1 = fptoui float %a to i64
300  ret i64 %1
301}
302
303define float @convert_i8_to_float(i8 signext %a) nounwind {
304; LA32F-LABEL: convert_i8_to_float:
305; LA32F:       # %bb.0:
306; LA32F-NEXT:    movgr2fr.w $fa0, $a0
307; LA32F-NEXT:    ffint.s.w $fa0, $fa0
308; LA32F-NEXT:    ret
309;
310; LA32D-LABEL: convert_i8_to_float:
311; LA32D:       # %bb.0:
312; LA32D-NEXT:    movgr2fr.w $fa0, $a0
313; LA32D-NEXT:    ffint.s.w $fa0, $fa0
314; LA32D-NEXT:    ret
315;
316; LA64F-LABEL: convert_i8_to_float:
317; LA64F:       # %bb.0:
318; LA64F-NEXT:    movgr2fr.w $fa0, $a0
319; LA64F-NEXT:    ffint.s.w $fa0, $fa0
320; LA64F-NEXT:    ret
321;
322; LA64D-LABEL: convert_i8_to_float:
323; LA64D:       # %bb.0:
324; LA64D-NEXT:    movgr2fr.w $fa0, $a0
325; LA64D-NEXT:    ffint.s.w $fa0, $fa0
326; LA64D-NEXT:    ret
327  %1 = sitofp i8 %a to float
328  ret float %1
329}
330
331define float @convert_i16_to_float(i16 signext %a) nounwind {
332; LA32F-LABEL: convert_i16_to_float:
333; LA32F:       # %bb.0:
334; LA32F-NEXT:    movgr2fr.w $fa0, $a0
335; LA32F-NEXT:    ffint.s.w $fa0, $fa0
336; LA32F-NEXT:    ret
337;
338; LA32D-LABEL: convert_i16_to_float:
339; LA32D:       # %bb.0:
340; LA32D-NEXT:    movgr2fr.w $fa0, $a0
341; LA32D-NEXT:    ffint.s.w $fa0, $fa0
342; LA32D-NEXT:    ret
343;
344; LA64F-LABEL: convert_i16_to_float:
345; LA64F:       # %bb.0:
346; LA64F-NEXT:    movgr2fr.w $fa0, $a0
347; LA64F-NEXT:    ffint.s.w $fa0, $fa0
348; LA64F-NEXT:    ret
349;
350; LA64D-LABEL: convert_i16_to_float:
351; LA64D:       # %bb.0:
352; LA64D-NEXT:    movgr2fr.w $fa0, $a0
353; LA64D-NEXT:    ffint.s.w $fa0, $fa0
354; LA64D-NEXT:    ret
355  %1 = sitofp i16 %a to float
356  ret float %1
357}
358
359define float @convert_i32_to_float(i32 %a) nounwind {
360; LA32F-LABEL: convert_i32_to_float:
361; LA32F:       # %bb.0:
362; LA32F-NEXT:    movgr2fr.w $fa0, $a0
363; LA32F-NEXT:    ffint.s.w $fa0, $fa0
364; LA32F-NEXT:    ret
365;
366; LA32D-LABEL: convert_i32_to_float:
367; LA32D:       # %bb.0:
368; LA32D-NEXT:    movgr2fr.w $fa0, $a0
369; LA32D-NEXT:    ffint.s.w $fa0, $fa0
370; LA32D-NEXT:    ret
371;
372; LA64F-LABEL: convert_i32_to_float:
373; LA64F:       # %bb.0:
374; LA64F-NEXT:    movgr2fr.w $fa0, $a0
375; LA64F-NEXT:    ffint.s.w $fa0, $fa0
376; LA64F-NEXT:    ret
377;
378; LA64D-LABEL: convert_i32_to_float:
379; LA64D:       # %bb.0:
380; LA64D-NEXT:    movgr2fr.w $fa0, $a0
381; LA64D-NEXT:    ffint.s.w $fa0, $fa0
382; LA64D-NEXT:    ret
383  %1 = sitofp i32 %a to float
384  ret float %1
385}
386
387define float @convert_i64_to_float(i64 %a) nounwind {
388; LA32F-LABEL: convert_i64_to_float:
389; LA32F:       # %bb.0:
390; LA32F-NEXT:    addi.w $sp, $sp, -16
391; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
392; LA32F-NEXT:    bl %plt(__floatdisf)
393; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
394; LA32F-NEXT:    addi.w $sp, $sp, 16
395; LA32F-NEXT:    ret
396;
397; LA32D-LABEL: convert_i64_to_float:
398; LA32D:       # %bb.0:
399; LA32D-NEXT:    addi.w $sp, $sp, -16
400; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
401; LA32D-NEXT:    bl %plt(__floatdisf)
402; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
403; LA32D-NEXT:    addi.w $sp, $sp, 16
404; LA32D-NEXT:    ret
405;
406; LA64F-LABEL: convert_i64_to_float:
407; LA64F:       # %bb.0:
408; LA64F-NEXT:    addi.d $sp, $sp, -16
409; LA64F-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
410; LA64F-NEXT:    bl %plt(__floatdisf)
411; LA64F-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
412; LA64F-NEXT:    addi.d $sp, $sp, 16
413; LA64F-NEXT:    ret
414;
415; LA64D-LABEL: convert_i64_to_float:
416; LA64D:       # %bb.0:
417; LA64D-NEXT:    movgr2fr.d $fa0, $a0
418; LA64D-NEXT:    ffint.s.l $fa0, $fa0
419; LA64D-NEXT:    ret
420  %1 = sitofp i64 %a to float
421  ret float %1
422}
423
424define float @convert_u8_to_float(i8 zeroext %a) nounwind {
425; LA32F-LABEL: convert_u8_to_float:
426; LA32F:       # %bb.0:
427; LA32F-NEXT:    movgr2fr.w $fa0, $a0
428; LA32F-NEXT:    ffint.s.w $fa0, $fa0
429; LA32F-NEXT:    ret
430;
431; LA32D-LABEL: convert_u8_to_float:
432; LA32D:       # %bb.0:
433; LA32D-NEXT:    movgr2fr.w $fa0, $a0
434; LA32D-NEXT:    ffint.s.w $fa0, $fa0
435; LA32D-NEXT:    ret
436;
437; LA64F-LABEL: convert_u8_to_float:
438; LA64F:       # %bb.0:
439; LA64F-NEXT:    movgr2fr.w $fa0, $a0
440; LA64F-NEXT:    ffint.s.w $fa0, $fa0
441; LA64F-NEXT:    ret
442;
443; LA64D-LABEL: convert_u8_to_float:
444; LA64D:       # %bb.0:
445; LA64D-NEXT:    movgr2fr.w $fa0, $a0
446; LA64D-NEXT:    ffint.s.w $fa0, $fa0
447; LA64D-NEXT:    ret
448  %1 = uitofp i8 %a to float
449  ret float %1
450}
451
452define float @convert_u16_to_float(i16 zeroext %a) nounwind {
453; LA32F-LABEL: convert_u16_to_float:
454; LA32F:       # %bb.0:
455; LA32F-NEXT:    movgr2fr.w $fa0, $a0
456; LA32F-NEXT:    ffint.s.w $fa0, $fa0
457; LA32F-NEXT:    ret
458;
459; LA32D-LABEL: convert_u16_to_float:
460; LA32D:       # %bb.0:
461; LA32D-NEXT:    movgr2fr.w $fa0, $a0
462; LA32D-NEXT:    ffint.s.w $fa0, $fa0
463; LA32D-NEXT:    ret
464;
465; LA64F-LABEL: convert_u16_to_float:
466; LA64F:       # %bb.0:
467; LA64F-NEXT:    movgr2fr.w $fa0, $a0
468; LA64F-NEXT:    ffint.s.w $fa0, $fa0
469; LA64F-NEXT:    ret
470;
471; LA64D-LABEL: convert_u16_to_float:
472; LA64D:       # %bb.0:
473; LA64D-NEXT:    movgr2fr.w $fa0, $a0
474; LA64D-NEXT:    ffint.s.w $fa0, $fa0
475; LA64D-NEXT:    ret
476  %1 = uitofp i16 %a to float
477  ret float %1
478}
479
480define float @convert_u32_to_float(i32 %a) nounwind {
481; LA32F-LABEL: convert_u32_to_float:
482; LA32F:       # %bb.0:
483; LA32F-NEXT:    srli.w $a1, $a0, 1
484; LA32F-NEXT:    andi $a2, $a0, 1
485; LA32F-NEXT:    or $a1, $a2, $a1
486; LA32F-NEXT:    movgr2fr.w $fa0, $a1
487; LA32F-NEXT:    ffint.s.w $fa0, $fa0
488; LA32F-NEXT:    fadd.s $fa0, $fa0, $fa0
489; LA32F-NEXT:    slti $a1, $a0, 0
490; LA32F-NEXT:    movgr2fr.w $fa1, $a0
491; LA32F-NEXT:    ffint.s.w $fa1, $fa1
492; LA32F-NEXT:    movgr2cf $fcc0, $a1
493; LA32F-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
494; LA32F-NEXT:    ret
495;
496; LA32D-LABEL: convert_u32_to_float:
497; LA32D:       # %bb.0:
498; LA32D-NEXT:    addi.w $sp, $sp, -16
499; LA32D-NEXT:    lu12i.w $a1, 275200
500; LA32D-NEXT:    st.w $a1, $sp, 12
501; LA32D-NEXT:    st.w $a0, $sp, 8
502; LA32D-NEXT:    fld.d $fa0, $sp, 8
503; LA32D-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI14_0)
504; LA32D-NEXT:    fld.d $fa1, $a0, %pc_lo12(.LCPI14_0)
505; LA32D-NEXT:    fsub.d $fa0, $fa0, $fa1
506; LA32D-NEXT:    fcvt.s.d $fa0, $fa0
507; LA32D-NEXT:    addi.w $sp, $sp, 16
508; LA32D-NEXT:    ret
509;
510; LA64F-LABEL: convert_u32_to_float:
511; LA64F:       # %bb.0:
512; LA64F-NEXT:    addi.d $sp, $sp, -16
513; LA64F-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
514; LA64F-NEXT:    bstrpick.d $a0, $a0, 31, 0
515; LA64F-NEXT:    bl %plt(__floatundisf)
516; LA64F-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
517; LA64F-NEXT:    addi.d $sp, $sp, 16
518; LA64F-NEXT:    ret
519;
520; LA64D-LABEL: convert_u32_to_float:
521; LA64D:       # %bb.0:
522; LA64D-NEXT:    bstrpick.d $a0, $a0, 31, 0
523; LA64D-NEXT:    movgr2fr.d $fa0, $a0
524; LA64D-NEXT:    ffint.s.l $fa0, $fa0
525; LA64D-NEXT:    ret
526  %1 = uitofp i32 %a to float
527  ret float %1
528}
529
530define float @convert_u64_to_float(i64 %a) nounwind {
531; LA32F-LABEL: convert_u64_to_float:
532; LA32F:       # %bb.0:
533; LA32F-NEXT:    addi.w $sp, $sp, -16
534; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
535; LA32F-NEXT:    bl %plt(__floatundisf)
536; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
537; LA32F-NEXT:    addi.w $sp, $sp, 16
538; LA32F-NEXT:    ret
539;
540; LA32D-LABEL: convert_u64_to_float:
541; LA32D:       # %bb.0:
542; LA32D-NEXT:    addi.w $sp, $sp, -16
543; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
544; LA32D-NEXT:    bl %plt(__floatundisf)
545; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
546; LA32D-NEXT:    addi.w $sp, $sp, 16
547; LA32D-NEXT:    ret
548;
549; LA64F-LABEL: convert_u64_to_float:
550; LA64F:       # %bb.0:
551; LA64F-NEXT:    addi.d $sp, $sp, -16
552; LA64F-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
553; LA64F-NEXT:    bl %plt(__floatundisf)
554; LA64F-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
555; LA64F-NEXT:    addi.d $sp, $sp, 16
556; LA64F-NEXT:    ret
557;
558; LA64D-LABEL: convert_u64_to_float:
559; LA64D:       # %bb.0:
560; LA64D-NEXT:    srli.d $a1, $a0, 1
561; LA64D-NEXT:    andi $a2, $a0, 1
562; LA64D-NEXT:    or $a1, $a2, $a1
563; LA64D-NEXT:    movgr2fr.d $fa0, $a1
564; LA64D-NEXT:    ffint.s.l $fa0, $fa0
565; LA64D-NEXT:    fadd.s $fa0, $fa0, $fa0
566; LA64D-NEXT:    slti $a1, $a0, 0
567; LA64D-NEXT:    movgr2fr.d $fa1, $a0
568; LA64D-NEXT:    ffint.s.l $fa1, $fa1
569; LA64D-NEXT:    movgr2cf $fcc0, $a1
570; LA64D-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
571; LA64D-NEXT:    ret
572  %1 = uitofp i64 %a to float
573  ret float %1
574}
575
576define i32 @bitcast_float_to_i32(float %a) nounwind {
577; LA32F-LABEL: bitcast_float_to_i32:
578; LA32F:       # %bb.0:
579; LA32F-NEXT:    movfr2gr.s $a0, $fa0
580; LA32F-NEXT:    ret
581;
582; LA32D-LABEL: bitcast_float_to_i32:
583; LA32D:       # %bb.0:
584; LA32D-NEXT:    movfr2gr.s $a0, $fa0
585; LA32D-NEXT:    ret
586;
587; LA64F-LABEL: bitcast_float_to_i32:
588; LA64F:       # %bb.0:
589; LA64F-NEXT:    movfr2gr.s $a0, $fa0
590; LA64F-NEXT:    ret
591;
592; LA64D-LABEL: bitcast_float_to_i32:
593; LA64D:       # %bb.0:
594; LA64D-NEXT:    movfr2gr.s $a0, $fa0
595; LA64D-NEXT:    ret
596  %1 = bitcast float %a to i32
597  ret i32 %1
598}
599
600define float @bitcast_i32_to_float(i32 %a) nounwind {
601; LA32F-LABEL: bitcast_i32_to_float:
602; LA32F:       # %bb.0:
603; LA32F-NEXT:    movgr2fr.w $fa0, $a0
604; LA32F-NEXT:    ret
605;
606; LA32D-LABEL: bitcast_i32_to_float:
607; LA32D:       # %bb.0:
608; LA32D-NEXT:    movgr2fr.w $fa0, $a0
609; LA32D-NEXT:    ret
610;
611; LA64F-LABEL: bitcast_i32_to_float:
612; LA64F:       # %bb.0:
613; LA64F-NEXT:    movgr2fr.w $fa0, $a0
614; LA64F-NEXT:    ret
615;
616; LA64D-LABEL: bitcast_i32_to_float:
617; LA64D:       # %bb.0:
618; LA64D-NEXT:    movgr2fr.w $fa0, $a0
619; LA64D-NEXT:    ret
620  %1 = bitcast i32 %a to float
621  ret float %1
622}
623