xref: /llvm-project/llvm/test/Bindings/llvm-c/float_ops.ll (revision d5c95302b9736b4e785c77463d7f2026b772ba1b)
1*d5c95302SBenji Smith; RUN: llvm-as < %s | llvm-dis > %t.orig
2*d5c95302SBenji Smith; RUN: llvm-as < %s | llvm-c-test --echo > %t.echo
3*d5c95302SBenji Smith; RUN: diff -w %t.orig %t.echo
4*d5c95302SBenji Smith;
5*d5c95302SBenji Smithsource_filename = "/test/Bindings/float_ops.ll"
6*d5c95302SBenji Smithtarget datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
7*d5c95302SBenji Smith
8*d5c95302SBenji Smith
9*d5c95302SBenji Smithdefine float @float_ops_f32(float %a, float %b) {
10*d5c95302SBenji Smith  %1 = fneg float %a
11*d5c95302SBenji Smith
12*d5c95302SBenji Smith  %2 = fadd float %a, %b
13*d5c95302SBenji Smith  %3 = fsub float %a, %b
14*d5c95302SBenji Smith  %4 = fmul float %a, %b
15*d5c95302SBenji Smith  %5 = fdiv float %a, %b
16*d5c95302SBenji Smith  %6 = frem float %a, %b
17*d5c95302SBenji Smith
18*d5c95302SBenji Smith  ret float %1
19*d5c95302SBenji Smith}
20*d5c95302SBenji Smith
21*d5c95302SBenji Smithdefine double @float_ops_f64(double %a, double %b) {
22*d5c95302SBenji Smith  %1 = fneg double %a
23*d5c95302SBenji Smith
24*d5c95302SBenji Smith  %2 = fadd double %a, %b
25*d5c95302SBenji Smith  %3 = fsub double %a, %b
26*d5c95302SBenji Smith  %4 = fmul double %a, %b
27*d5c95302SBenji Smith  %5 = fdiv double %a, %b
28*d5c95302SBenji Smith  %6 = frem double %a, %b
29*d5c95302SBenji Smith
30*d5c95302SBenji Smith  ret double %1
31*d5c95302SBenji Smith}
32*d5c95302SBenji Smith
33*d5c95302SBenji Smithdefine void @float_cmp_f32(float %a, float %b) {
34*d5c95302SBenji Smith  %1  = fcmp oeq float %a, %b
35*d5c95302SBenji Smith  %2  = fcmp ogt float %a, %b
36*d5c95302SBenji Smith  %3  = fcmp olt float %a, %b
37*d5c95302SBenji Smith  %4  = fcmp ole float %a, %b
38*d5c95302SBenji Smith  %5  = fcmp one float %a, %b
39*d5c95302SBenji Smith
40*d5c95302SBenji Smith  %6  = fcmp ueq float %a, %b
41*d5c95302SBenji Smith  %7  = fcmp ugt float %a, %b
42*d5c95302SBenji Smith  %8  = fcmp ult float %a, %b
43*d5c95302SBenji Smith  %9  = fcmp ule float %a, %b
44*d5c95302SBenji Smith  %10 = fcmp une float %a, %b
45*d5c95302SBenji Smith
46*d5c95302SBenji Smith  %11 = fcmp ord float %a, %b
47*d5c95302SBenji Smith  %12 = fcmp false float %a, %b
48*d5c95302SBenji Smith  %13 = fcmp true float %a, %b
49*d5c95302SBenji Smith
50*d5c95302SBenji Smith  ret void
51*d5c95302SBenji Smith}
52*d5c95302SBenji Smith
53*d5c95302SBenji Smithdefine void @float_cmp_f64(double %a, double %b) {
54*d5c95302SBenji Smith  %1  = fcmp oeq double %a, %b
55*d5c95302SBenji Smith  %2  = fcmp ogt double %a, %b
56*d5c95302SBenji Smith  %3  = fcmp olt double %a, %b
57*d5c95302SBenji Smith  %4  = fcmp ole double %a, %b
58*d5c95302SBenji Smith  %5  = fcmp one double %a, %b
59*d5c95302SBenji Smith
60*d5c95302SBenji Smith  %6  = fcmp ueq double %a, %b
61*d5c95302SBenji Smith  %7  = fcmp ugt double %a, %b
62*d5c95302SBenji Smith  %8  = fcmp ult double %a, %b
63*d5c95302SBenji Smith  %9  = fcmp ule double %a, %b
64*d5c95302SBenji Smith  %10 = fcmp une double %a, %b
65*d5c95302SBenji Smith
66*d5c95302SBenji Smith  %11 = fcmp ord double %a, %b
67*d5c95302SBenji Smith  %12 = fcmp false double %a, %b
68*d5c95302SBenji Smith  %13 = fcmp true double %a, %b
69*d5c95302SBenji Smith
70*d5c95302SBenji Smith  ret void
71*d5c95302SBenji Smith}
72*d5c95302SBenji Smith
73*d5c95302SBenji Smithdefine void @float_cmp_fast_f32(float %a, float %b) {
74*d5c95302SBenji Smith  %1  = fcmp fast oeq float %a, %b
75*d5c95302SBenji Smith  %2  = fcmp nsz ogt float %a, %b
76*d5c95302SBenji Smith  %3  = fcmp nsz nnan olt float %a, %b
77*d5c95302SBenji Smith  %4  = fcmp contract ole float %a, %b
78*d5c95302SBenji Smith  %5  = fcmp nnan one float %a, %b
79*d5c95302SBenji Smith
80*d5c95302SBenji Smith  %6  = fcmp nnan ninf nsz ueq float %a, %b
81*d5c95302SBenji Smith  %7  = fcmp arcp ugt float %a, %b
82*d5c95302SBenji Smith  %8  = fcmp fast ult float %a, %b
83*d5c95302SBenji Smith  %9  = fcmp fast ule float %a, %b
84*d5c95302SBenji Smith  %10 = fcmp fast une float %a, %b
85*d5c95302SBenji Smith
86*d5c95302SBenji Smith  %11 = fcmp fast ord float %a, %b
87*d5c95302SBenji Smith  %12 = fcmp nnan ninf false float %a, %b
88*d5c95302SBenji Smith  %13 = fcmp nnan ninf true float %a, %b
89*d5c95302SBenji Smith
90*d5c95302SBenji Smith  ret void
91*d5c95302SBenji Smith}
92*d5c95302SBenji Smith
93*d5c95302SBenji Smithdefine void @float_cmp_fast_f64(double %a, double %b) {
94*d5c95302SBenji Smith  %1  = fcmp fast oeq double %a, %b
95*d5c95302SBenji Smith  %2  = fcmp nsz ogt double %a, %b
96*d5c95302SBenji Smith  %3  = fcmp nsz nnan olt double %a, %b
97*d5c95302SBenji Smith  %4  = fcmp contract ole double %a, %b
98*d5c95302SBenji Smith  %5  = fcmp nnan one double %a, %b
99*d5c95302SBenji Smith
100*d5c95302SBenji Smith  %6  = fcmp nnan ninf nsz ueq double %a, %b
101*d5c95302SBenji Smith  %7  = fcmp arcp ugt double %a, %b
102*d5c95302SBenji Smith  %8  = fcmp fast ult double %a, %b
103*d5c95302SBenji Smith  %9  = fcmp fast ule double %a, %b
104*d5c95302SBenji Smith  %10 = fcmp fast une double %a, %b
105*d5c95302SBenji Smith
106*d5c95302SBenji Smith  %11 = fcmp fast ord double %a, %b
107*d5c95302SBenji Smith  %12 = fcmp nnan ninf false double %a, %b
108*d5c95302SBenji Smith  %13 = fcmp nnan ninf true double %a, %b
109*d5c95302SBenji Smith
110*d5c95302SBenji Smith  ret void
111*d5c95302SBenji Smith}
112*d5c95302SBenji Smith
113*d5c95302SBenji Smithdefine float @float_ops_fast_f32(float %a, float %b) {
114*d5c95302SBenji Smith  %1 = fneg nnan float %a
115*d5c95302SBenji Smith
116*d5c95302SBenji Smith  %2 = fadd ninf float %a, %b
117*d5c95302SBenji Smith  %3 = fsub nsz float %a, %b
118*d5c95302SBenji Smith  %4 = fmul arcp float %a, %b
119*d5c95302SBenji Smith  %5 = fdiv contract float %a, %b
120*d5c95302SBenji Smith  %6 = frem afn float %a, %b
121*d5c95302SBenji Smith
122*d5c95302SBenji Smith  %7 = fadd reassoc float %a, %b
123*d5c95302SBenji Smith  %8 = fadd reassoc float %7, %b
124*d5c95302SBenji Smith
125*d5c95302SBenji Smith  %9  = fadd fast float %a, %b
126*d5c95302SBenji Smith  %10 = fadd nnan nsz float %a, %b
127*d5c95302SBenji Smith  %11 = frem nnan nsz float %a, %b
128*d5c95302SBenji Smith  %12 = fdiv nnan nsz arcp float %a, %b
129*d5c95302SBenji Smith  %13 = fmul nnan nsz ninf contract float %a, %b
130*d5c95302SBenji Smith  %14 = fmul nnan nsz ninf arcp contract afn reassoc float %a, %b
131*d5c95302SBenji Smith
132*d5c95302SBenji Smith  ret float %1
133*d5c95302SBenji Smith}
134*d5c95302SBenji Smith
135*d5c95302SBenji Smithdefine double @float_ops_fast_f64(double %a, double %b) {
136*d5c95302SBenji Smith  %1 = fneg nnan double %a
137*d5c95302SBenji Smith
138*d5c95302SBenji Smith  %2 = fadd ninf double %a, %b
139*d5c95302SBenji Smith  %3 = fsub nsz double %a, %b
140*d5c95302SBenji Smith  %4 = fmul arcp double %a, %b
141*d5c95302SBenji Smith  %5 = fdiv contract double %a, %b
142*d5c95302SBenji Smith  %6 = frem afn double %a, %b
143*d5c95302SBenji Smith
144*d5c95302SBenji Smith  %7 = fadd reassoc double %a, %b
145*d5c95302SBenji Smith  %8 = fadd reassoc double %7, %b
146*d5c95302SBenji Smith
147*d5c95302SBenji Smith  %9  = fadd fast double %a, %b
148*d5c95302SBenji Smith  %10 = fadd nnan nsz double %a, %b
149*d5c95302SBenji Smith  %11 = frem nnan nsz double %a, %b
150*d5c95302SBenji Smith  %12 = fdiv nnan nsz arcp double %a, %b
151*d5c95302SBenji Smith  %13 = fmul nnan nsz ninf contract double %a, %b
152*d5c95302SBenji Smith  %14 = fmul nnan nsz ninf arcp contract afn reassoc double %a, %b
153*d5c95302SBenji Smith
154*d5c95302SBenji Smith  ret double %1
155*d5c95302SBenji Smith}
156*d5c95302SBenji Smith
157