xref: /llvm-project/llvm/test/CodeGen/X86/combine-fneg.ll (revision dd7a3d4d798e30dfe53b5bbbbcd9a23c24ea1af9)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse  | FileCheck %s --check-prefixes=X86-SSE,X86-SSE1
3; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86-SSE,X86-SSE2
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-sse2 | FileCheck %s --check-prefixes=X64-SSE,X64-SSE1
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X64-SSE,X64-SSE2
6
7; FNEG is defined as subtraction from -0.0.
8
9; This test verifies that we use an xor with a constant to flip the sign bits; no subtraction needed.
10define <4 x float> @t1(<4 x float> %Q) nounwind {
11; X86-SSE-LABEL: t1:
12; X86-SSE:       # %bb.0:
13; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
14; X86-SSE-NEXT:    retl
15;
16; X64-SSE-LABEL: t1:
17; X64-SSE:       # %bb.0:
18; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
19; X64-SSE-NEXT:    retq
20  %tmp = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %Q
21  ret <4 x float> %tmp
22}
23
24; Possibly misplaced test, but since we're checking undef scenarios...
25
26define float @scalar_fsub_neg0_undef(float %x) nounwind {
27; X86-SSE-LABEL: scalar_fsub_neg0_undef:
28; X86-SSE:       # %bb.0:
29; X86-SSE-NEXT:    fldz
30; X86-SSE-NEXT:    retl
31;
32; X64-SSE-LABEL: scalar_fsub_neg0_undef:
33; X64-SSE:       # %bb.0:
34; X64-SSE-NEXT:    retq
35  %r = fsub float -0.0, undef
36  ret float %r
37}
38
39define float @scalar_fneg_undef(float %x) nounwind {
40; X86-SSE-LABEL: scalar_fneg_undef:
41; X86-SSE:       # %bb.0:
42; X86-SSE-NEXT:    fldz
43; X86-SSE-NEXT:    retl
44;
45; X64-SSE-LABEL: scalar_fneg_undef:
46; X64-SSE:       # %bb.0:
47; X64-SSE-NEXT:    retq
48  %r = fneg float undef
49  ret float %r
50}
51
52define <4 x float> @fsub_neg0_undef(<4 x float> %Q) nounwind {
53; X86-SSE-LABEL: fsub_neg0_undef:
54; X86-SSE:       # %bb.0:
55; X86-SSE-NEXT:    retl
56;
57; X64-SSE-LABEL: fsub_neg0_undef:
58; X64-SSE:       # %bb.0:
59; X64-SSE-NEXT:    retq
60  %r = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, undef
61  ret <4 x float> %r
62}
63
64define <4 x float> @fneg_undef(<4 x float> %Q) nounwind {
65; X86-SSE-LABEL: fneg_undef:
66; X86-SSE:       # %bb.0:
67; X86-SSE-NEXT:    retl
68;
69; X64-SSE-LABEL: fneg_undef:
70; X64-SSE:       # %bb.0:
71; X64-SSE-NEXT:    retq
72  %r = fneg <4 x float> undef
73  ret <4 x float> %r
74}
75
76define <4 x float> @fsub_neg0_undef_elts_undef(<4 x float> %x) {
77; X86-SSE-LABEL: fsub_neg0_undef_elts_undef:
78; X86-SSE:       # %bb.0:
79; X86-SSE-NEXT:    retl
80;
81; X64-SSE-LABEL: fsub_neg0_undef_elts_undef:
82; X64-SSE:       # %bb.0:
83; X64-SSE-NEXT:    retq
84  %r = fsub <4 x float> <float -0.0, float undef, float undef, float -0.0>, undef
85  ret <4 x float> %r
86}
87
88; This test verifies that we generate an FP subtraction because "0.0 - x" is not an fneg.
89define <4 x float> @t2(<4 x float> %Q) nounwind {
90; X86-SSE-LABEL: t2:
91; X86-SSE:       # %bb.0:
92; X86-SSE-NEXT:    xorps %xmm1, %xmm1
93; X86-SSE-NEXT:    subps %xmm0, %xmm1
94; X86-SSE-NEXT:    movaps %xmm1, %xmm0
95; X86-SSE-NEXT:    retl
96;
97; X64-SSE-LABEL: t2:
98; X64-SSE:       # %bb.0:
99; X64-SSE-NEXT:    xorps %xmm1, %xmm1
100; X64-SSE-NEXT:    subps %xmm0, %xmm1
101; X64-SSE-NEXT:    movaps %xmm1, %xmm0
102; X64-SSE-NEXT:    retq
103  %tmp = fsub <4 x float> zeroinitializer, %Q
104  ret <4 x float> %tmp
105}
106
107; If we're bitcasting an integer to an FP vector, we should avoid the FPU/vector unit entirely.
108; Make sure that we're flipping the sign bit and only the sign bit of each float.
109; So instead of something like this:
110;    movd	%rdi, %xmm0
111;    xorps	.LCPI2_0(%rip), %xmm0
112;
113; We should generate:
114;    movabsq     (put sign bit mask in integer register))
115;    xorq        (flip sign bits)
116;    movd        (move to xmm return register)
117
118define <2 x float> @fneg_bitcast(i64 %i) nounwind {
119; X86-SSE1-LABEL: fneg_bitcast:
120; X86-SSE1:       # %bb.0:
121; X86-SSE1-NEXT:    pushl %ebp
122; X86-SSE1-NEXT:    movl %esp, %ebp
123; X86-SSE1-NEXT:    andl $-16, %esp
124; X86-SSE1-NEXT:    subl $16, %esp
125; X86-SSE1-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
126; X86-SSE1-NEXT:    movl 12(%ebp), %ecx
127; X86-SSE1-NEXT:    xorl %eax, %ecx
128; X86-SSE1-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
129; X86-SSE1-NEXT:    xorl 8(%ebp), %eax
130; X86-SSE1-NEXT:    movl %eax, (%esp)
131; X86-SSE1-NEXT:    movaps (%esp), %xmm0
132; X86-SSE1-NEXT:    movl %ebp, %esp
133; X86-SSE1-NEXT:    popl %ebp
134; X86-SSE1-NEXT:    retl
135;
136; X86-SSE2-LABEL: fneg_bitcast:
137; X86-SSE2:       # %bb.0:
138; X86-SSE2-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
139; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
140; X86-SSE2-NEXT:    xorl %eax, %ecx
141; X86-SSE2-NEXT:    movd %ecx, %xmm1
142; X86-SSE2-NEXT:    xorl {{[0-9]+}}(%esp), %eax
143; X86-SSE2-NEXT:    movd %eax, %xmm0
144; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
145; X86-SSE2-NEXT:    retl
146;
147; X64-SSE1-LABEL: fneg_bitcast:
148; X64-SSE1:       # %bb.0:
149; X64-SSE1-NEXT:    movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
150; X64-SSE1-NEXT:    xorq %rdi, %rax
151; X64-SSE1-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
152; X64-SSE1-NEXT:    movaps -{{[0-9]+}}(%rsp), %xmm0
153; X64-SSE1-NEXT:    retq
154;
155; X64-SSE2-LABEL: fneg_bitcast:
156; X64-SSE2:       # %bb.0:
157; X64-SSE2-NEXT:    movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
158; X64-SSE2-NEXT:    xorq %rdi, %rax
159; X64-SSE2-NEXT:    movq %rax, %xmm0
160; X64-SSE2-NEXT:    retq
161  %bitcast = bitcast i64 %i to <2 x float>
162  %fneg = fsub <2 x float> <float -0.0, float -0.0>, %bitcast
163  ret <2 x float> %fneg
164}
165
166define <4 x float> @fneg_undef_elts_v4f32(<4 x float> %x) {
167; X86-SSE-LABEL: fneg_undef_elts_v4f32:
168; X86-SSE:       # %bb.0:
169; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
170; X86-SSE-NEXT:    retl
171;
172; X64-SSE-LABEL: fneg_undef_elts_v4f32:
173; X64-SSE:       # %bb.0:
174; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
175; X64-SSE-NEXT:    retq
176  %r = fsub <4 x float> <float -0.0, float undef, float undef, float -0.0>, %x
177  ret <4 x float> %r
178}
179
180; This isn't fneg, but similarly check that (X - 0.0) is simplified.
181
182define <4 x float> @fsub0_undef_elts_v4f32(<4 x float> %x) {
183; X86-SSE-LABEL: fsub0_undef_elts_v4f32:
184; X86-SSE:       # %bb.0:
185; X86-SSE-NEXT:    retl
186;
187; X64-SSE-LABEL: fsub0_undef_elts_v4f32:
188; X64-SSE:       # %bb.0:
189; X64-SSE-NEXT:    retq
190  %r = fsub <4 x float> %x, <float 0.0, float undef, float 0.0, float undef>
191  ret <4 x float> %r
192}
193
194define <4 x float> @fneg(<4 x float> %Q) nounwind {
195; X86-SSE-LABEL: fneg:
196; X86-SSE:       # %bb.0:
197; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
198; X86-SSE-NEXT:    retl
199;
200; X64-SSE-LABEL: fneg:
201; X64-SSE:       # %bb.0:
202; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
203; X64-SSE-NEXT:    retq
204  %tmp = fneg <4 x float> %Q
205  ret <4 x float> %tmp
206}
207
208; store(fneg(load())) - convert scalar to integer
209define void @fneg_int_rmw_half(ptr %ptr) nounwind {
210; X86-SSE-LABEL: fneg_int_rmw_half:
211; X86-SSE:       # %bb.0:
212; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
213; X86-SSE-NEXT:    xorb $-128, 1(%eax)
214; X86-SSE-NEXT:    retl
215;
216; X64-SSE-LABEL: fneg_int_rmw_half:
217; X64-SSE:       # %bb.0:
218; X64-SSE-NEXT:    xorb $-128, 1(%rdi)
219; X64-SSE-NEXT:    retq
220  %1 = load half, ptr %ptr
221  %2 = fneg half %1
222  store half %2, ptr %ptr
223  ret void
224}
225
226define void @fneg_int_bfloat(ptr %src, ptr %dst) nounwind {
227; X86-SSE-LABEL: fneg_int_bfloat:
228; X86-SSE:       # %bb.0:
229; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
230; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
231; X86-SSE-NEXT:    movzwl (%ecx), %ecx
232; X86-SSE-NEXT:    xorl $32768, %ecx # imm = 0x8000
233; X86-SSE-NEXT:    movw %cx, (%eax)
234; X86-SSE-NEXT:    retl
235;
236; X64-SSE-LABEL: fneg_int_bfloat:
237; X64-SSE:       # %bb.0:
238; X64-SSE-NEXT:    movzwl (%rdi), %eax
239; X64-SSE-NEXT:    xorl $32768, %eax # imm = 0x8000
240; X64-SSE-NEXT:    movw %ax, (%rsi)
241; X64-SSE-NEXT:    retq
242  %1 = load bfloat, ptr %src
243  %2 = fneg bfloat %1
244  store bfloat %2, ptr %dst
245  ret void
246}
247
248define void @fneg_int_rmw_f32(ptr %ptr) {
249; X86-SSE-LABEL: fneg_int_rmw_f32:
250; X86-SSE:       # %bb.0:
251; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
252; X86-SSE-NEXT:    xorb $-128, 3(%eax)
253; X86-SSE-NEXT:    retl
254;
255; X64-SSE-LABEL: fneg_int_rmw_f32:
256; X64-SSE:       # %bb.0:
257; X64-SSE-NEXT:    xorb $-128, 3(%rdi)
258; X64-SSE-NEXT:    retq
259  %1 = load float, ptr %ptr
260  %2 = fneg float %1
261  store float %2, ptr %ptr
262  ret void
263}
264
265define void @fneg_int_f64(ptr %src, ptr %dst) {
266; X86-SSE1-LABEL: fneg_int_f64:
267; X86-SSE1:       # %bb.0:
268; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
269; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
270; X86-SSE1-NEXT:    fldl (%ecx)
271; X86-SSE1-NEXT:    fchs
272; X86-SSE1-NEXT:    fstpl (%eax)
273; X86-SSE1-NEXT:    retl
274;
275; X86-SSE2-LABEL: fneg_int_f64:
276; X86-SSE2:       # %bb.0:
277; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
278; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
279; X86-SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
280; X86-SSE2-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
281; X86-SSE2-NEXT:    movlps %xmm0, (%eax)
282; X86-SSE2-NEXT:    retl
283;
284; X64-SSE-LABEL: fneg_int_f64:
285; X64-SSE:       # %bb.0:
286; X64-SSE-NEXT:    movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
287; X64-SSE-NEXT:    xorq (%rdi), %rax
288; X64-SSE-NEXT:    movq %rax, (%rsi)
289; X64-SSE-NEXT:    retq
290  %1 = load double, ptr %src
291  %2 = fneg double %1
292  store double %2, ptr %dst
293  ret void
294}
295
296; don't convert vector to scalar
297define void @fneg_int_v4f32(ptr %src, ptr %dst) {
298; X86-SSE-LABEL: fneg_int_v4f32:
299; X86-SSE:       # %bb.0:
300; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
301; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
302; X86-SSE-NEXT:    movaps (%ecx), %xmm0
303; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
304; X86-SSE-NEXT:    movaps %xmm0, (%eax)
305; X86-SSE-NEXT:    retl
306;
307; X64-SSE-LABEL: fneg_int_v4f32:
308; X64-SSE:       # %bb.0:
309; X64-SSE-NEXT:    movaps (%rdi), %xmm0
310; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
311; X64-SSE-NEXT:    movaps %xmm0, (%rsi)
312; X64-SSE-NEXT:    retq
313  %1 = load <4 x float>, ptr %src
314  %2 = fneg <4 x float> %1
315  store <4 x float> %2, ptr %dst
316  ret void
317}
318