xref: /llvm-project/llvm/test/CodeGen/PowerPC/fp-strict-conv.ll (revision b922a3621116b404d868af8b74cab25ab78555be)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
3; RUN:   < %s -mtriple=powerpc64-unknown-linux -mcpu=pwr8 | FileCheck %s \
4; RUN:   --check-prefixes=CHECK,P8
5; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
6; RUN:   < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 | FileCheck %s \
7; RUN:   --check-prefixes=CHECK,P9
8; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
9; RUN:   < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr8 -mattr=-vsx | \
10; RUN:   FileCheck %s -check-prefix=NOVSX
11; RUN: llc -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 < %s -simplify-mir \
12; RUN:   -stop-after=machine-cp | FileCheck %s -check-prefix=MIR
13
14declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
15declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
16declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
17declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
18
19declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
20declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
21declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
22declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
23
24declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
25declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
26declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
27declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
28
29declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
30declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
31declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
32declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
33
34define i32 @d_to_i32(double %m) #0 {
35; CHECK-LABEL: d_to_i32:
36; CHECK:       # %bb.0: # %entry
37; CHECK-NEXT:    xscvdpsxws f0, f1
38; CHECK-NEXT:    mffprwz r3, f0
39; CHECK-NEXT:    blr
40;
41; NOVSX-LABEL: d_to_i32:
42; NOVSX:       # %bb.0: # %entry
43; NOVSX-NEXT:    fctiwz f0, f1
44; NOVSX-NEXT:    addi r3, r1, -4
45; NOVSX-NEXT:    stfiwx f0, 0, r3
46; NOVSX-NEXT:    lwz r3, -4(r1)
47; NOVSX-NEXT:    blr
48entry:
49  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict")
50  ret i32 %conv
51}
52
53define i64 @d_to_i64(double %m) #0 {
54; CHECK-LABEL: d_to_i64:
55; CHECK:       # %bb.0: # %entry
56; CHECK-NEXT:    xscvdpsxds f0, f1
57; CHECK-NEXT:    mffprd r3, f0
58; CHECK-NEXT:    blr
59;
60; NOVSX-LABEL: d_to_i64:
61; NOVSX:       # %bb.0: # %entry
62; NOVSX-NEXT:    fctidz f0, f1
63; NOVSX-NEXT:    stfd f0, -8(r1)
64; NOVSX-NEXT:    ld r3, -8(r1)
65; NOVSX-NEXT:    blr
66entry:
67  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict")
68  ret i64 %conv
69}
70
71define i64 @d_to_u64(double %m) #0 {
72; CHECK-LABEL: d_to_u64:
73; CHECK:       # %bb.0: # %entry
74; CHECK-NEXT:    xscvdpuxds f0, f1
75; CHECK-NEXT:    mffprd r3, f0
76; CHECK-NEXT:    blr
77;
78; NOVSX-LABEL: d_to_u64:
79; NOVSX:       # %bb.0: # %entry
80; NOVSX-NEXT:    fctiduz f0, f1
81; NOVSX-NEXT:    stfd f0, -8(r1)
82; NOVSX-NEXT:    ld r3, -8(r1)
83; NOVSX-NEXT:    blr
84entry:
85  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict")
86  ret i64 %conv
87}
88
89define zeroext i32 @d_to_u32(double %m) #0 {
90; CHECK-LABEL: d_to_u32:
91; CHECK:       # %bb.0: # %entry
92; CHECK-NEXT:    xscvdpuxws f0, f1
93; CHECK-NEXT:    mffprwz r3, f0
94; CHECK-NEXT:    blr
95;
96; NOVSX-LABEL: d_to_u32:
97; NOVSX:       # %bb.0: # %entry
98; NOVSX-NEXT:    fctiwuz f0, f1
99; NOVSX-NEXT:    addi r3, r1, -4
100; NOVSX-NEXT:    stfiwx f0, 0, r3
101; NOVSX-NEXT:    lwz r3, -4(r1)
102; NOVSX-NEXT:    blr
103entry:
104  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict")
105  ret i32 %conv
106}
107
108define signext i32 @f_to_i32(float %m) #0 {
109; CHECK-LABEL: f_to_i32:
110; CHECK:       # %bb.0: # %entry
111; CHECK-NEXT:    xscvdpsxws f0, f1
112; CHECK-NEXT:    mffprwz r3, f0
113; CHECK-NEXT:    extsw r3, r3
114; CHECK-NEXT:    blr
115;
116; NOVSX-LABEL: f_to_i32:
117; NOVSX:       # %bb.0: # %entry
118; NOVSX-NEXT:    fctiwz f0, f1
119; NOVSX-NEXT:    addi r3, r1, -4
120; NOVSX-NEXT:    stfiwx f0, 0, r3
121; NOVSX-NEXT:    lwa r3, -4(r1)
122; NOVSX-NEXT:    blr
123entry:
124  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict")
125  ret i32 %conv
126}
127
128define i64 @f_to_i64(float %m) #0 {
129; CHECK-LABEL: f_to_i64:
130; CHECK:       # %bb.0: # %entry
131; CHECK-NEXT:    xscvdpsxds f0, f1
132; CHECK-NEXT:    mffprd r3, f0
133; CHECK-NEXT:    blr
134;
135; NOVSX-LABEL: f_to_i64:
136; NOVSX:       # %bb.0: # %entry
137; NOVSX-NEXT:    fctidz f0, f1
138; NOVSX-NEXT:    stfd f0, -8(r1)
139; NOVSX-NEXT:    ld r3, -8(r1)
140; NOVSX-NEXT:    blr
141entry:
142  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict")
143  ret i64 %conv
144}
145
146define i64 @f_to_u64(float %m) #0 {
147; CHECK-LABEL: f_to_u64:
148; CHECK:       # %bb.0: # %entry
149; CHECK-NEXT:    xscvdpuxds f0, f1
150; CHECK-NEXT:    mffprd r3, f0
151; CHECK-NEXT:    blr
152;
153; NOVSX-LABEL: f_to_u64:
154; NOVSX:       # %bb.0: # %entry
155; NOVSX-NEXT:    fctiduz f0, f1
156; NOVSX-NEXT:    stfd f0, -8(r1)
157; NOVSX-NEXT:    ld r3, -8(r1)
158; NOVSX-NEXT:    blr
159entry:
160  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict")
161  ret i64 %conv
162}
163
164define zeroext i32 @f_to_u32(float %m) #0 {
165; CHECK-LABEL: f_to_u32:
166; CHECK:       # %bb.0: # %entry
167; CHECK-NEXT:    xscvdpuxws f0, f1
168; CHECK-NEXT:    mffprwz r3, f0
169; CHECK-NEXT:    blr
170;
171; NOVSX-LABEL: f_to_u32:
172; NOVSX:       # %bb.0: # %entry
173; NOVSX-NEXT:    fctiwuz f0, f1
174; NOVSX-NEXT:    addi r3, r1, -4
175; NOVSX-NEXT:    stfiwx f0, 0, r3
176; NOVSX-NEXT:    lwz r3, -4(r1)
177; NOVSX-NEXT:    blr
178entry:
179  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict")
180  ret i32 %conv
181}
182
183define double @i32_to_d(i32 signext %m) #0 {
184; CHECK-LABEL: i32_to_d:
185; CHECK:       # %bb.0: # %entry
186; CHECK-NEXT:    mtfprwa f0, r3
187; CHECK-NEXT:    xscvsxddp f1, f0
188; CHECK-NEXT:    blr
189;
190; NOVSX-LABEL: i32_to_d:
191; NOVSX:       # %bb.0: # %entry
192; NOVSX-NEXT:    stw r3, -4(r1)
193; NOVSX-NEXT:    addi r3, r1, -4
194; NOVSX-NEXT:    lfiwax f0, 0, r3
195; NOVSX-NEXT:    fcfid f1, f0
196; NOVSX-NEXT:    blr
197entry:
198  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
199  ret double %conv
200}
201
202define double @i64_to_d(i64 %m) #0 {
203; CHECK-LABEL: i64_to_d:
204; CHECK:       # %bb.0: # %entry
205; CHECK-NEXT:    mtfprd f0, r3
206; CHECK-NEXT:    xscvsxddp f1, f0
207; CHECK-NEXT:    blr
208;
209; NOVSX-LABEL: i64_to_d:
210; NOVSX:       # %bb.0: # %entry
211; NOVSX-NEXT:    std r3, -8(r1)
212; NOVSX-NEXT:    lfd f0, -8(r1)
213; NOVSX-NEXT:    fcfid f1, f0
214; NOVSX-NEXT:    blr
215entry:
216  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
217  ret double %conv
218}
219
220define double @u32_to_d(i32 zeroext %m) #0 {
221; CHECK-LABEL: u32_to_d:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    mtfprwz f0, r3
224; CHECK-NEXT:    xscvuxddp f1, f0
225; CHECK-NEXT:    blr
226;
227; NOVSX-LABEL: u32_to_d:
228; NOVSX:       # %bb.0: # %entry
229; NOVSX-NEXT:    stw r3, -4(r1)
230; NOVSX-NEXT:    addi r3, r1, -4
231; NOVSX-NEXT:    lfiwzx f0, 0, r3
232; NOVSX-NEXT:    fcfidu f1, f0
233; NOVSX-NEXT:    blr
234entry:
235  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
236  ret double %conv
237}
238
239define double @u64_to_d(i64 %m) #0 {
240; CHECK-LABEL: u64_to_d:
241; CHECK:       # %bb.0: # %entry
242; CHECK-NEXT:    mtfprd f0, r3
243; CHECK-NEXT:    xscvuxddp f1, f0
244; CHECK-NEXT:    blr
245;
246; NOVSX-LABEL: u64_to_d:
247; NOVSX:       # %bb.0: # %entry
248; NOVSX-NEXT:    std r3, -8(r1)
249; NOVSX-NEXT:    lfd f0, -8(r1)
250; NOVSX-NEXT:    fcfidu f1, f0
251; NOVSX-NEXT:    blr
252entry:
253  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
254  ret double %conv
255}
256
257define float @i32_to_f(i32 signext %m) #0 {
258; CHECK-LABEL: i32_to_f:
259; CHECK:       # %bb.0: # %entry
260; CHECK-NEXT:    mtfprwa f0, r3
261; CHECK-NEXT:    xscvsxdsp f1, f0
262; CHECK-NEXT:    blr
263;
264; NOVSX-LABEL: i32_to_f:
265; NOVSX:       # %bb.0: # %entry
266; NOVSX-NEXT:    stw r3, -4(r1)
267; NOVSX-NEXT:    addi r3, r1, -4
268; NOVSX-NEXT:    lfiwax f0, 0, r3
269; NOVSX-NEXT:    fcfids f1, f0
270; NOVSX-NEXT:    blr
271entry:
272  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
273  ret float %conv
274}
275
276define float @i64_to_f(i64 %m) #0 {
277; CHECK-LABEL: i64_to_f:
278; CHECK:       # %bb.0: # %entry
279; CHECK-NEXT:    mtfprd f0, r3
280; CHECK-NEXT:    xscvsxdsp f1, f0
281; CHECK-NEXT:    blr
282;
283; NOVSX-LABEL: i64_to_f:
284; NOVSX:       # %bb.0: # %entry
285; NOVSX-NEXT:    std r3, -8(r1)
286; NOVSX-NEXT:    lfd f0, -8(r1)
287; NOVSX-NEXT:    fcfids f1, f0
288; NOVSX-NEXT:    blr
289entry:
290  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
291  ret float %conv
292}
293
294define float @u32_to_f(i32 zeroext %m) #0 {
295; CHECK-LABEL: u32_to_f:
296; CHECK:       # %bb.0: # %entry
297; CHECK-NEXT:    mtfprwz f0, r3
298; CHECK-NEXT:    xscvuxdsp f1, f0
299; CHECK-NEXT:    blr
300;
301; NOVSX-LABEL: u32_to_f:
302; NOVSX:       # %bb.0: # %entry
303; NOVSX-NEXT:    stw r3, -4(r1)
304; NOVSX-NEXT:    addi r3, r1, -4
305; NOVSX-NEXT:    lfiwzx f0, 0, r3
306; NOVSX-NEXT:    fcfidus f1, f0
307; NOVSX-NEXT:    blr
308entry:
309  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
310  ret float %conv
311}
312
313define float @u64_to_f(i64 %m) #0 {
314; CHECK-LABEL: u64_to_f:
315; CHECK:       # %bb.0: # %entry
316; CHECK-NEXT:    mtfprd f0, r3
317; CHECK-NEXT:    xscvuxdsp f1, f0
318; CHECK-NEXT:    blr
319;
320; NOVSX-LABEL: u64_to_f:
321; NOVSX:       # %bb.0: # %entry
322; NOVSX-NEXT:    std r3, -8(r1)
323; NOVSX-NEXT:    lfd f0, -8(r1)
324; NOVSX-NEXT:    fcfidus f1, f0
325; NOVSX-NEXT:    blr
326entry:
327  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
328  ret float %conv
329}
330
331define void @d_to_i32_store(double %m, ptr %addr) #0 {
332; CHECK-LABEL: d_to_i32_store:
333; CHECK:       # %bb.0: # %entry
334; CHECK-NEXT:    xscvdpsxws f0, f1
335; CHECK-NEXT:    stfiwx f0, 0, r4
336; CHECK-NEXT:    blr
337;
338; NOVSX-LABEL: d_to_i32_store:
339; NOVSX:       # %bb.0: # %entry
340; NOVSX-NEXT:    fctiwz f0, f1
341; NOVSX-NEXT:    addi r3, r1, -4
342; NOVSX-NEXT:    stfiwx f0, 0, r3
343; NOVSX-NEXT:    lwz r3, -4(r1)
344; NOVSX-NEXT:    stw r3, 0(r4)
345; NOVSX-NEXT:    blr
346entry:
347  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict")
348  store i32 %conv, ptr %addr, align 4
349  ret void
350}
351
352define void @d_to_i64_store(double %m, ptr %addr) #0 {
353; P8-LABEL: d_to_i64_store:
354; P8:       # %bb.0: # %entry
355; P8-NEXT:    xscvdpsxds f0, f1
356; P8-NEXT:    stxsdx f0, 0, r4
357; P8-NEXT:    blr
358;
359; P9-LABEL: d_to_i64_store:
360; P9:       # %bb.0: # %entry
361; P9-NEXT:    xscvdpsxds v2, f1
362; P9-NEXT:    stxsd v2, 0(r4)
363; P9-NEXT:    blr
364;
365; NOVSX-LABEL: d_to_i64_store:
366; NOVSX:       # %bb.0: # %entry
367; NOVSX-NEXT:    fctidz f0, f1
368; NOVSX-NEXT:    stfd f0, -8(r1)
369; NOVSX-NEXT:    ld r3, -8(r1)
370; NOVSX-NEXT:    std r3, 0(r4)
371; NOVSX-NEXT:    blr
372entry:
373  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict")
374  store i64 %conv, ptr %addr, align 8
375  ret void
376}
377
378define void @d_to_u64_store(double %m, ptr %addr) #0 {
379; P8-LABEL: d_to_u64_store:
380; P8:       # %bb.0: # %entry
381; P8-NEXT:    xscvdpuxds f0, f1
382; P8-NEXT:    stxsdx f0, 0, r4
383; P8-NEXT:    blr
384;
385; P9-LABEL: d_to_u64_store:
386; P9:       # %bb.0: # %entry
387; P9-NEXT:    xscvdpuxds v2, f1
388; P9-NEXT:    stxsd v2, 0(r4)
389; P9-NEXT:    blr
390;
391; NOVSX-LABEL: d_to_u64_store:
392; NOVSX:       # %bb.0: # %entry
393; NOVSX-NEXT:    fctiduz f0, f1
394; NOVSX-NEXT:    stfd f0, -8(r1)
395; NOVSX-NEXT:    ld r3, -8(r1)
396; NOVSX-NEXT:    std r3, 0(r4)
397; NOVSX-NEXT:    blr
398entry:
399  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict")
400  store i64 %conv, ptr %addr, align 8
401  ret void
402}
403
404define void @d_to_u32_store(double %m, ptr %addr) #0 {
405; CHECK-LABEL: d_to_u32_store:
406; CHECK:       # %bb.0: # %entry
407; CHECK-NEXT:    xscvdpuxws f0, f1
408; CHECK-NEXT:    stfiwx f0, 0, r4
409; CHECK-NEXT:    blr
410;
411; NOVSX-LABEL: d_to_u32_store:
412; NOVSX:       # %bb.0: # %entry
413; NOVSX-NEXT:    fctiwuz f0, f1
414; NOVSX-NEXT:    addi r3, r1, -4
415; NOVSX-NEXT:    stfiwx f0, 0, r3
416; NOVSX-NEXT:    lwz r3, -4(r1)
417; NOVSX-NEXT:    stw r3, 0(r4)
418; NOVSX-NEXT:    blr
419entry:
420  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict")
421  store i32 %conv, ptr %addr, align 4
422  ret void
423}
424
425define void @f_to_i32_store(float %m, ptr %addr) #0 {
426; CHECK-LABEL: f_to_i32_store:
427; CHECK:       # %bb.0: # %entry
428; CHECK-NEXT:    xscvdpsxws f0, f1
429; CHECK-NEXT:    stfiwx f0, 0, r4
430; CHECK-NEXT:    blr
431;
432; NOVSX-LABEL: f_to_i32_store:
433; NOVSX:       # %bb.0: # %entry
434; NOVSX-NEXT:    fctiwz f0, f1
435; NOVSX-NEXT:    addi r3, r1, -4
436; NOVSX-NEXT:    stfiwx f0, 0, r3
437; NOVSX-NEXT:    lwz r3, -4(r1)
438; NOVSX-NEXT:    stw r3, 0(r4)
439; NOVSX-NEXT:    blr
440entry:
441  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict")
442  store i32 %conv, ptr %addr, align 4
443  ret void
444}
445
446define void @f_to_i64_store(float %m, ptr %addr) #0 {
447; P8-LABEL: f_to_i64_store:
448; P8:       # %bb.0: # %entry
449; P8-NEXT:    xscvdpsxds f0, f1
450; P8-NEXT:    stxsdx f0, 0, r4
451; P8-NEXT:    blr
452;
453; P9-LABEL: f_to_i64_store:
454; P9:       # %bb.0: # %entry
455; P9-NEXT:    xscvdpsxds v2, f1
456; P9-NEXT:    stxsd v2, 0(r4)
457; P9-NEXT:    blr
458;
459; NOVSX-LABEL: f_to_i64_store:
460; NOVSX:       # %bb.0: # %entry
461; NOVSX-NEXT:    fctidz f0, f1
462; NOVSX-NEXT:    stfd f0, -8(r1)
463; NOVSX-NEXT:    ld r3, -8(r1)
464; NOVSX-NEXT:    std r3, 0(r4)
465; NOVSX-NEXT:    blr
466entry:
467  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict")
468  store i64 %conv, ptr %addr, align 8
469  ret void
470}
471
472define void @f_to_u64_store(float %m, ptr %addr) #0 {
473; P8-LABEL: f_to_u64_store:
474; P8:       # %bb.0: # %entry
475; P8-NEXT:    xscvdpuxds f0, f1
476; P8-NEXT:    stxsdx f0, 0, r4
477; P8-NEXT:    blr
478;
479; P9-LABEL: f_to_u64_store:
480; P9:       # %bb.0: # %entry
481; P9-NEXT:    xscvdpuxds v2, f1
482; P9-NEXT:    stxsd v2, 0(r4)
483; P9-NEXT:    blr
484;
485; NOVSX-LABEL: f_to_u64_store:
486; NOVSX:       # %bb.0: # %entry
487; NOVSX-NEXT:    fctiduz f0, f1
488; NOVSX-NEXT:    stfd f0, -8(r1)
489; NOVSX-NEXT:    ld r3, -8(r1)
490; NOVSX-NEXT:    std r3, 0(r4)
491; NOVSX-NEXT:    blr
492entry:
493  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict")
494  store i64 %conv, ptr %addr, align 8
495  ret void
496}
497
498define void @f_to_u32_store(float %m, ptr %addr) #0 {
499; CHECK-LABEL: f_to_u32_store:
500; CHECK:       # %bb.0: # %entry
501; CHECK-NEXT:    xscvdpuxws f0, f1
502; CHECK-NEXT:    stfiwx f0, 0, r4
503; CHECK-NEXT:    blr
504;
505; NOVSX-LABEL: f_to_u32_store:
506; NOVSX:       # %bb.0: # %entry
507; NOVSX-NEXT:    fctiwuz f0, f1
508; NOVSX-NEXT:    addi r3, r1, -4
509; NOVSX-NEXT:    stfiwx f0, 0, r3
510; NOVSX-NEXT:    lwz r3, -4(r1)
511; NOVSX-NEXT:    stw r3, 0(r4)
512; NOVSX-NEXT:    blr
513entry:
514  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict")
515  store i32 %conv, ptr %addr, align 4
516  ret void
517}
518
519define double @load_i32_to_d(ptr %addr) #0 {
520; CHECK-LABEL: load_i32_to_d:
521; CHECK:       # %bb.0: # %entry
522; CHECK-NEXT:    lfiwax f0, 0, r3
523; CHECK-NEXT:    xscvsxddp f1, f0
524; CHECK-NEXT:    blr
525;
526; NOVSX-LABEL: load_i32_to_d:
527; NOVSX:       # %bb.0: # %entry
528; NOVSX-NEXT:    lfiwax f0, 0, r3
529; NOVSX-NEXT:    fcfid f1, f0
530; NOVSX-NEXT:    blr
531entry:
532  %m = load i32, ptr %addr, align 4
533  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
534  ret double %conv
535}
536
537define double @load_i64_to_d(ptr %addr) #0 {
538; CHECK-LABEL: load_i64_to_d:
539; CHECK:       # %bb.0: # %entry
540; CHECK-NEXT:    lfd f0, 0(r3)
541; CHECK-NEXT:    xscvsxddp f1, f0
542; CHECK-NEXT:    blr
543;
544; NOVSX-LABEL: load_i64_to_d:
545; NOVSX:       # %bb.0: # %entry
546; NOVSX-NEXT:    lfd f0, 0(r3)
547; NOVSX-NEXT:    fcfid f1, f0
548; NOVSX-NEXT:    blr
549entry:
550  %m = load i64, ptr %addr, align 8
551  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
552  ret double %conv
553}
554
555define double @load_u32_to_d(ptr %addr) #0 {
556; CHECK-LABEL: load_u32_to_d:
557; CHECK:       # %bb.0: # %entry
558; CHECK-NEXT:    lfiwzx f0, 0, r3
559; CHECK-NEXT:    xscvuxddp f1, f0
560; CHECK-NEXT:    blr
561;
562; NOVSX-LABEL: load_u32_to_d:
563; NOVSX:       # %bb.0: # %entry
564; NOVSX-NEXT:    lfiwzx f0, 0, r3
565; NOVSX-NEXT:    fcfidu f1, f0
566; NOVSX-NEXT:    blr
567entry:
568  %m = load i32, ptr %addr, align 4
569  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
570  ret double %conv
571}
572
573define double @load_u64_to_d(ptr %addr) #0 {
574; CHECK-LABEL: load_u64_to_d:
575; CHECK:       # %bb.0: # %entry
576; CHECK-NEXT:    lfd f0, 0(r3)
577; CHECK-NEXT:    xscvuxddp f1, f0
578; CHECK-NEXT:    blr
579;
580; NOVSX-LABEL: load_u64_to_d:
581; NOVSX:       # %bb.0: # %entry
582; NOVSX-NEXT:    lfd f0, 0(r3)
583; NOVSX-NEXT:    fcfidu f1, f0
584; NOVSX-NEXT:    blr
585entry:
586  %m = load i64, ptr %addr, align 8
587  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
588  ret double %conv
589}
590
591define float @load_i32_to_f(ptr %addr) #0 {
592; CHECK-LABEL: load_i32_to_f:
593; CHECK:       # %bb.0: # %entry
594; CHECK-NEXT:    lfiwax f0, 0, r3
595; CHECK-NEXT:    xscvsxdsp f1, f0
596; CHECK-NEXT:    blr
597;
598; NOVSX-LABEL: load_i32_to_f:
599; NOVSX:       # %bb.0: # %entry
600; NOVSX-NEXT:    lfiwax f0, 0, r3
601; NOVSX-NEXT:    fcfids f1, f0
602; NOVSX-NEXT:    blr
603entry:
604  %m = load i32, ptr %addr, align 4
605  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
606  ret float %conv
607}
608
609define float @load_i64_to_f(ptr %addr) #0 {
610; CHECK-LABEL: load_i64_to_f:
611; CHECK:       # %bb.0: # %entry
612; CHECK-NEXT:    lfd f0, 0(r3)
613; CHECK-NEXT:    xscvsxdsp f1, f0
614; CHECK-NEXT:    blr
615;
616; NOVSX-LABEL: load_i64_to_f:
617; NOVSX:       # %bb.0: # %entry
618; NOVSX-NEXT:    lfd f0, 0(r3)
619; NOVSX-NEXT:    fcfids f1, f0
620; NOVSX-NEXT:    blr
621entry:
622  %m = load i64, ptr %addr, align 8
623  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
624  ret float %conv
625}
626
627define float @load_u32_to_f(ptr %addr) #0 {
628; CHECK-LABEL: load_u32_to_f:
629; CHECK:       # %bb.0: # %entry
630; CHECK-NEXT:    lfiwzx f0, 0, r3
631; CHECK-NEXT:    xscvuxdsp f1, f0
632; CHECK-NEXT:    blr
633;
634; NOVSX-LABEL: load_u32_to_f:
635; NOVSX:       # %bb.0: # %entry
636; NOVSX-NEXT:    lfiwzx f0, 0, r3
637; NOVSX-NEXT:    fcfidus f1, f0
638; NOVSX-NEXT:    blr
639entry:
640  %m = load i32, ptr %addr, align 4
641  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
642  ret float %conv
643}
644
645define float @load_u64_to_f(ptr %addr) #0 {
646; CHECK-LABEL: load_u64_to_f:
647; CHECK:       # %bb.0: # %entry
648; CHECK-NEXT:    lfd f0, 0(r3)
649; CHECK-NEXT:    xscvuxdsp f1, f0
650; CHECK-NEXT:    blr
651;
652; NOVSX-LABEL: load_u64_to_f:
653; NOVSX:       # %bb.0: # %entry
654; NOVSX-NEXT:    lfd f0, 0(r3)
655; NOVSX-NEXT:    fcfidus f1, f0
656; NOVSX-NEXT:    blr
657entry:
658  %m = load i64, ptr %addr, align 8
659  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
660  ret float %conv
661}
662
663define void @fptoint_nofpexcept_f64(double %m, ptr %addr1, ptr %addr2) #0 {
664; MIR-LABEL: name: fptoint_nofpexcept_f64
665; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
666; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
667; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPSXDS
668; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPUXDS
669entry:
670  %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.ignore")
671  %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.ignore")
672  %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.ignore")
673  %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.ignore")
674  store volatile i32 %conv1, ptr %addr1, align 4
675  store volatile i32 %conv2, ptr %addr1, align 4
676  store volatile i64 %conv3, ptr %addr2, align 8
677  store volatile i64 %conv4, ptr %addr2, align 8
678  ret void
679}
680
681define void @fptoint_nofpexcept_f32(float %m, ptr %addr1, ptr %addr2) #0 {
682; MIR-LABEL: name: fptoint_nofpexcept_f32
683; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
684; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
685; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPSXDS
686; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPUXDS
687entry:
688  %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.ignore")
689  %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.ignore")
690  %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.ignore")
691  %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.ignore")
692  store volatile i32 %conv1, ptr %addr1, align 4
693  store volatile i32 %conv2, ptr %addr1, align 4
694  store volatile i64 %conv3, ptr %addr2, align 8
695  store volatile i64 %conv4, ptr %addr2, align 8
696  ret void
697}
698
699define void @inttofp_nofpexcept_i32(i32 %m, ptr %addr1, ptr %addr2) #0 {
700; MIR-LABEL: name: inttofp_nofpexcept_i32
701; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
702; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
703; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDDP
704; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDDP
705entry:
706  %conv1 = tail call float  @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
707  %conv2 = tail call float  @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
708  %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
709  %conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
710  store volatile float  %conv1, ptr  %addr1, align 4
711  store volatile float  %conv2, ptr  %addr1, align 4
712  store volatile double %conv3, ptr %addr2, align 8
713  store volatile double %conv4, ptr %addr2, align 8
714  ret void
715}
716
717define void @inttofp_nofpexcept_i64(i64 %m, ptr %addr1, ptr %addr2) #0 {
718; MIR-LABEL: name: inttofp_nofpexcept_i64
719; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
720; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
721; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDDP
722; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDDP
723entry:
724  %conv1 = tail call float  @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
725  %conv2 = tail call float  @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
726  %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
727  %conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
728  store volatile float  %conv1, ptr  %addr1, align 4
729  store volatile float  %conv2, ptr  %addr1, align 4
730  store volatile double %conv3, ptr %addr2, align 8
731  store volatile double %conv4, ptr %addr2, align 8
732  ret void
733}
734
735define <2 x double> @inttofp_nofpexcept_vec(<2 x i16> %m) #0 {
736; MIR-LABEL: name: inttofp_nofpexcept_vec
737; MIR: renamable $v{{[0-9]+}} = nofpexcept XVCVSXDDP
738entry:
739  %conv = tail call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
740  ret <2 x double> %conv
741}
742
743declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
744
745attributes #0 = { strictfp }
746