xref: /llvm-project/llvm/test/CodeGen/PowerPC/fp-strict-conv-spe.ll (revision c42f0a6e6476971974cb3f52c1138dbd8f9cca1f)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names < %s -mcpu=e500 \
3; RUN:   -mtriple=powerpc-unknown-linux-gnu -mattr=spe | FileCheck %s \
4; RUN:   -check-prefix=SPE
5
6declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
7declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
8declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
9declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
10
11declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
12declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
13declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
14declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
15
16declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
17declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
18declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
19declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
20
21declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
22declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
23declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
24declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
25
26define i32 @d_to_i32(double %m) #0 {
27; SPE-LABEL: d_to_i32:
28; SPE:       # %bb.0: # %entry
29; SPE-NEXT:    evmergelo r3, r3, r4
30; SPE-NEXT:    efdctsiz r3, r3
31; SPE-NEXT:    blr
32entry:
33  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict") #0
34  ret i32 %conv
35}
36
37define i64 @d_to_i64(double %m) #0 {
38; SPE-LABEL: d_to_i64:
39; SPE:       # %bb.0: # %entry
40; SPE-NEXT:    mflr r0
41; SPE-NEXT:    stwu r1, -16(r1)
42; SPE-NEXT:    stw r0, 20(r1)
43; SPE-NEXT:    .cfi_def_cfa_offset 16
44; SPE-NEXT:    .cfi_offset lr, 4
45; SPE-NEXT:    evmergelo r4, r3, r4
46; SPE-NEXT:    evmergehi r3, r4, r4
47; SPE-NEXT:    bl __fixdfdi
48; SPE-NEXT:    lwz r0, 20(r1)
49; SPE-NEXT:    addi r1, r1, 16
50; SPE-NEXT:    mtlr r0
51; SPE-NEXT:    blr
52entry:
53  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict") #0
54  ret i64 %conv
55}
56
57define i64 @d_to_u64(double %m) #0 {
58; SPE-LABEL: d_to_u64:
59; SPE:       # %bb.0: # %entry
60; SPE-NEXT:    mflr r0
61; SPE-NEXT:    stwu r1, -16(r1)
62; SPE-NEXT:    stw r0, 20(r1)
63; SPE-NEXT:    .cfi_def_cfa_offset 16
64; SPE-NEXT:    .cfi_offset lr, 4
65; SPE-NEXT:    evmergelo r4, r3, r4
66; SPE-NEXT:    evmergehi r3, r4, r4
67; SPE-NEXT:    bl __fixunsdfdi
68; SPE-NEXT:    lwz r0, 20(r1)
69; SPE-NEXT:    addi r1, r1, 16
70; SPE-NEXT:    mtlr r0
71; SPE-NEXT:    blr
72entry:
73  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict") #0
74  ret i64 %conv
75}
76
77define zeroext i32 @d_to_u32(double %m) #0 {
78; SPE-LABEL: d_to_u32:
79; SPE:       # %bb.0: # %entry
80; SPE-NEXT:    evmergelo r3, r3, r4
81; SPE-NEXT:    efdctuiz r3, r3
82; SPE-NEXT:    blr
83entry:
84  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict") #0
85  ret i32 %conv
86}
87
88define signext i32 @f_to_i32(float %m) #0 {
89; SPE-LABEL: f_to_i32:
90; SPE:       # %bb.0: # %entry
91; SPE-NEXT:    efsctsiz r3, r3
92; SPE-NEXT:    blr
93entry:
94  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict") #0
95  ret i32 %conv
96}
97
98define i64 @f_to_i64(float %m) #0 {
99; SPE-LABEL: f_to_i64:
100; SPE:       # %bb.0: # %entry
101; SPE-NEXT:    mflr r0
102; SPE-NEXT:    stwu r1, -16(r1)
103; SPE-NEXT:    stw r0, 20(r1)
104; SPE-NEXT:    .cfi_def_cfa_offset 16
105; SPE-NEXT:    .cfi_offset lr, 4
106; SPE-NEXT:    bl __fixsfdi
107; SPE-NEXT:    lwz r0, 20(r1)
108; SPE-NEXT:    addi r1, r1, 16
109; SPE-NEXT:    mtlr r0
110; SPE-NEXT:    blr
111entry:
112  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict") #0
113  ret i64 %conv
114}
115
116define i64 @f_to_u64(float %m) #0 {
117; SPE-LABEL: f_to_u64:
118; SPE:       # %bb.0: # %entry
119; SPE-NEXT:    mflr r0
120; SPE-NEXT:    stwu r1, -16(r1)
121; SPE-NEXT:    stw r0, 20(r1)
122; SPE-NEXT:    .cfi_def_cfa_offset 16
123; SPE-NEXT:    .cfi_offset lr, 4
124; SPE-NEXT:    bl __fixunssfdi
125; SPE-NEXT:    lwz r0, 20(r1)
126; SPE-NEXT:    addi r1, r1, 16
127; SPE-NEXT:    mtlr r0
128; SPE-NEXT:    blr
129entry:
130  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict") #0
131  ret i64 %conv
132}
133
134define zeroext i32 @f_to_u32(float %m) #0 {
135; SPE-LABEL: f_to_u32:
136; SPE:       # %bb.0: # %entry
137; SPE-NEXT:    efsctuiz r3, r3
138; SPE-NEXT:    blr
139entry:
140  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict") #0
141  ret i32 %conv
142}
143
144define double @i32_to_d(i32 signext %m) #0 {
145; SPE-LABEL: i32_to_d:
146; SPE:       # %bb.0: # %entry
147; SPE-NEXT:    efdcfsi r4, r3
148; SPE-NEXT:    evmergehi r3, r4, r4
149; SPE-NEXT:    blr
150entry:
151  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
152  ret double %conv
153}
154
155define double @i64_to_d(i64 %m) #0 {
156; SPE-LABEL: i64_to_d:
157; SPE:       # %bb.0: # %entry
158; SPE-NEXT:    mflr r0
159; SPE-NEXT:    stwu r1, -16(r1)
160; SPE-NEXT:    stw r0, 20(r1)
161; SPE-NEXT:    .cfi_def_cfa_offset 16
162; SPE-NEXT:    .cfi_offset lr, 4
163; SPE-NEXT:    bl __floatdidf
164; SPE-NEXT:    evmergelo r4, r3, r4
165; SPE-NEXT:    evmergehi r3, r4, r4
166; SPE-NEXT:    lwz r0, 20(r1)
167; SPE-NEXT:    addi r1, r1, 16
168; SPE-NEXT:    mtlr r0
169; SPE-NEXT:    blr
170entry:
171  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
172  ret double %conv
173}
174
175define double @u32_to_d(i32 zeroext %m) #0 {
176; SPE-LABEL: u32_to_d:
177; SPE:       # %bb.0: # %entry
178; SPE-NEXT:    efdcfui r4, r3
179; SPE-NEXT:    evmergehi r3, r4, r4
180; SPE-NEXT:    blr
181entry:
182  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
183  ret double %conv
184}
185
186define double @u64_to_d(i64 %m) #0 {
187; SPE-LABEL: u64_to_d:
188; SPE:       # %bb.0: # %entry
189; SPE-NEXT:    mflr r0
190; SPE-NEXT:    stwu r1, -16(r1)
191; SPE-NEXT:    stw r0, 20(r1)
192; SPE-NEXT:    .cfi_def_cfa_offset 16
193; SPE-NEXT:    .cfi_offset lr, 4
194; SPE-NEXT:    bl __floatundidf
195; SPE-NEXT:    evmergelo r4, r3, r4
196; SPE-NEXT:    evmergehi r3, r4, r4
197; SPE-NEXT:    lwz r0, 20(r1)
198; SPE-NEXT:    addi r1, r1, 16
199; SPE-NEXT:    mtlr r0
200; SPE-NEXT:    blr
201entry:
202  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
203  ret double %conv
204}
205
206define float @i32_to_f(i32 signext %m) #0 {
207; SPE-LABEL: i32_to_f:
208; SPE:       # %bb.0: # %entry
209; SPE-NEXT:    efscfsi r3, r3
210; SPE-NEXT:    blr
211entry:
212  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
213  ret float %conv
214}
215
216define float @i64_to_f(i64 %m) #0 {
217; SPE-LABEL: i64_to_f:
218; SPE:       # %bb.0: # %entry
219; SPE-NEXT:    mflr r0
220; SPE-NEXT:    stwu r1, -16(r1)
221; SPE-NEXT:    stw r0, 20(r1)
222; SPE-NEXT:    .cfi_def_cfa_offset 16
223; SPE-NEXT:    .cfi_offset lr, 4
224; SPE-NEXT:    bl __floatdisf
225; SPE-NEXT:    lwz r0, 20(r1)
226; SPE-NEXT:    addi r1, r1, 16
227; SPE-NEXT:    mtlr r0
228; SPE-NEXT:    blr
229entry:
230  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
231  ret float %conv
232}
233
234define float @u32_to_f(i32 zeroext %m) #0 {
235; SPE-LABEL: u32_to_f:
236; SPE:       # %bb.0: # %entry
237; SPE-NEXT:    efscfui r3, r3
238; SPE-NEXT:    blr
239entry:
240  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
241  ret float %conv
242}
243
244define float @u64_to_f(i64 %m) #0 {
245; SPE-LABEL: u64_to_f:
246; SPE:       # %bb.0: # %entry
247; SPE-NEXT:    mflr r0
248; SPE-NEXT:    stwu r1, -16(r1)
249; SPE-NEXT:    stw r0, 20(r1)
250; SPE-NEXT:    .cfi_def_cfa_offset 16
251; SPE-NEXT:    .cfi_offset lr, 4
252; SPE-NEXT:    bl __floatundisf
253; SPE-NEXT:    lwz r0, 20(r1)
254; SPE-NEXT:    addi r1, r1, 16
255; SPE-NEXT:    mtlr r0
256; SPE-NEXT:    blr
257entry:
258  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
259  ret float %conv
260}
261
262attributes #0 = { strictfp }
263