xref: /llvm-project/llvm/test/CodeGen/X86/combine-abs.ll (revision e635520be888335dd59874038d33e60cca3a7143)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE42
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512F
6; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,AVX512VL
7
8; fold (abs c1) -> c2
9define <4 x i32> @combine_v4i32_abs_constant() {
10; SSE-LABEL: combine_v4i32_abs_constant:
11; SSE:       # %bb.0:
12; SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,1,3,2147483648]
13; SSE-NEXT:    retq
14;
15; AVX-LABEL: combine_v4i32_abs_constant:
16; AVX:       # %bb.0:
17; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [0,1,3,2147483648]
18; AVX-NEXT:    retq
19  %1 = call <4 x i32> @llvm.abs.v4i32(<4 x i32> <i32 0, i32 -1, i32 3, i32 -2147483648>, i1 false)
20  ret <4 x i32> %1
21}
22
23define <16 x i16> @combine_v16i16_abs_constant() {
24; SSE-LABEL: combine_v16i16_abs_constant:
25; SSE:       # %bb.0:
26; SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,1,1,3,3,7,7,255]
27; SSE-NEXT:    movaps {{.*#+}} xmm1 = [255,4096,4096,32767,32767,32768,32768,0]
28; SSE-NEXT:    retq
29;
30; AVX-LABEL: combine_v16i16_abs_constant:
31; AVX:       # %bb.0:
32; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [0,1,1,3,3,7,7,255,255,4096,4096,32767,32767,32768,32768,0]
33; AVX-NEXT:    retq
34  %1 = call <16 x i16> @llvm.abs.v16i16(<16 x i16> <i16 0, i16 1, i16 -1, i16 3, i16 -3, i16 7, i16 -7, i16 255, i16 -255, i16 4096, i16 -4096, i16 32767, i16 -32767, i16 -32768, i16 32768, i16 65536>, i1 false)
35  ret <16 x i16> %1
36}
37
38; fold (abs (abs x)) -> (abs x)
39define i32 @combine_i32_abs_abs(i32 %a) {
40; CHECK-LABEL: combine_i32_abs_abs:
41; CHECK:       # %bb.0:
42; CHECK-NEXT:    movl %edi, %eax
43; CHECK-NEXT:    negl %eax
44; CHECK-NEXT:    cmovsl %edi, %eax
45; CHECK-NEXT:    retq
46  %n1 = sub i32 zeroinitializer, %a
47  %b1 = icmp slt i32 %a, zeroinitializer
48  %a1 = select i1 %b1, i32 %n1, i32 %a
49  %n2 = sub i32 zeroinitializer, %a1
50  %b2 = icmp sgt i32 %a1, zeroinitializer
51  %a2 = select i1 %b2, i32 %a1, i32 %n2
52  ret i32 %a2
53}
54
55define <8 x i16> @combine_v8i16_abs_abs(<8 x i16> %a) {
56; SSE2-LABEL: combine_v8i16_abs_abs:
57; SSE2:       # %bb.0:
58; SSE2-NEXT:    pxor %xmm1, %xmm1
59; SSE2-NEXT:    psubw %xmm0, %xmm1
60; SSE2-NEXT:    pmaxsw %xmm1, %xmm0
61; SSE2-NEXT:    retq
62;
63; SSE42-LABEL: combine_v8i16_abs_abs:
64; SSE42:       # %bb.0:
65; SSE42-NEXT:    pabsw %xmm0, %xmm0
66; SSE42-NEXT:    retq
67;
68; AVX-LABEL: combine_v8i16_abs_abs:
69; AVX:       # %bb.0:
70; AVX-NEXT:    vpabsw %xmm0, %xmm0
71; AVX-NEXT:    retq
72  %a1 = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 false)
73  %s2 = ashr <8 x i16> %a1, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
74  %a2 = add <8 x i16> %a1, %s2
75  %x2 = xor <8 x i16> %a2, %s2
76  ret <8 x i16> %x2
77}
78
79define <32 x i8> @combine_v32i8_abs_abs(<32 x i8> %a) {
80; SSE2-LABEL: combine_v32i8_abs_abs:
81; SSE2:       # %bb.0:
82; SSE2-NEXT:    pxor %xmm2, %xmm2
83; SSE2-NEXT:    pxor %xmm3, %xmm3
84; SSE2-NEXT:    psubb %xmm0, %xmm3
85; SSE2-NEXT:    pminub %xmm3, %xmm0
86; SSE2-NEXT:    psubb %xmm1, %xmm2
87; SSE2-NEXT:    pminub %xmm2, %xmm1
88; SSE2-NEXT:    retq
89;
90; SSE42-LABEL: combine_v32i8_abs_abs:
91; SSE42:       # %bb.0:
92; SSE42-NEXT:    pabsb %xmm0, %xmm0
93; SSE42-NEXT:    pabsb %xmm1, %xmm1
94; SSE42-NEXT:    retq
95;
96; AVX-LABEL: combine_v32i8_abs_abs:
97; AVX:       # %bb.0:
98; AVX-NEXT:    vpabsb %ymm0, %ymm0
99; AVX-NEXT:    retq
100  %n1 = sub <32 x i8> zeroinitializer, %a
101  %b1 = icmp slt <32 x i8> %a, zeroinitializer
102  %a1 = select <32 x i1> %b1, <32 x i8> %n1, <32 x i8> %a
103  %a2 = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a1, i1 false)
104  ret <32 x i8> %a2
105}
106
107define <4 x i64> @combine_v4i64_abs_abs(<4 x i64> %a) {
108; SSE2-LABEL: combine_v4i64_abs_abs:
109; SSE2:       # %bb.0:
110; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
111; SSE2-NEXT:    psrad $31, %xmm2
112; SSE2-NEXT:    pxor %xmm2, %xmm0
113; SSE2-NEXT:    psubq %xmm2, %xmm0
114; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
115; SSE2-NEXT:    psrad $31, %xmm2
116; SSE2-NEXT:    pxor %xmm2, %xmm1
117; SSE2-NEXT:    psubq %xmm2, %xmm1
118; SSE2-NEXT:    retq
119;
120; SSE42-LABEL: combine_v4i64_abs_abs:
121; SSE42:       # %bb.0:
122; SSE42-NEXT:    movdqa %xmm0, %xmm2
123; SSE42-NEXT:    pxor %xmm3, %xmm3
124; SSE42-NEXT:    pxor %xmm4, %xmm4
125; SSE42-NEXT:    psubq %xmm0, %xmm4
126; SSE42-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
127; SSE42-NEXT:    psubq %xmm1, %xmm3
128; SSE42-NEXT:    movdqa %xmm1, %xmm0
129; SSE42-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
130; SSE42-NEXT:    movapd %xmm2, %xmm0
131; SSE42-NEXT:    retq
132;
133; AVX2-LABEL: combine_v4i64_abs_abs:
134; AVX2:       # %bb.0:
135; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
136; AVX2-NEXT:    vpsubq %ymm0, %ymm1, %ymm1
137; AVX2-NEXT:    vblendvpd %ymm0, %ymm1, %ymm0, %ymm0
138; AVX2-NEXT:    retq
139;
140; AVX512F-LABEL: combine_v4i64_abs_abs:
141; AVX512F:       # %bb.0:
142; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
143; AVX512F-NEXT:    vpabsq %zmm0, %zmm0
144; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
145; AVX512F-NEXT:    retq
146;
147; AVX512VL-LABEL: combine_v4i64_abs_abs:
148; AVX512VL:       # %bb.0:
149; AVX512VL-NEXT:    vpabsq %ymm0, %ymm0
150; AVX512VL-NEXT:    retq
151  %n1 = sub <4 x i64> zeroinitializer, %a
152  %b1 = icmp slt <4 x i64> %a, zeroinitializer
153  %a1 = select <4 x i1> %b1, <4 x i64> %n1, <4 x i64> %a
154  %n2 = sub <4 x i64> zeroinitializer, %a1
155  %b2 = icmp sgt <4 x i64> %a1, zeroinitializer
156  %a2 = select <4 x i1> %b2, <4 x i64> %a1, <4 x i64> %n2
157  ret <4 x i64> %a2
158}
159
160; fold (abs x) -> x iff not-negative
161define <16 x i8> @combine_v16i8_abs_constant(<16 x i8> %a) {
162; SSE-LABEL: combine_v16i8_abs_constant:
163; SSE:       # %bb.0:
164; SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
165; SSE-NEXT:    retq
166;
167; AVX2-LABEL: combine_v16i8_abs_constant:
168; AVX2:       # %bb.0:
169; AVX2-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
170; AVX2-NEXT:    retq
171;
172; AVX512F-LABEL: combine_v16i8_abs_constant:
173; AVX512F:       # %bb.0:
174; AVX512F-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
175; AVX512F-NEXT:    retq
176;
177; AVX512VL-LABEL: combine_v16i8_abs_constant:
178; AVX512VL:       # %bb.0:
179; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
180; AVX512VL-NEXT:    retq
181  %1 = insertelement <16 x i8> undef, i8 15, i32 0
182  %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
183  %3 = and <16 x i8> %a, %2
184  %4 = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %3, i1 false)
185  ret <16 x i8> %4
186}
187
188define <8 x i32> @combine_v8i32_abs_pos(<8 x i32> %a) {
189; SSE-LABEL: combine_v8i32_abs_pos:
190; SSE:       # %bb.0:
191; SSE-NEXT:    psrld $1, %xmm0
192; SSE-NEXT:    psrld $1, %xmm1
193; SSE-NEXT:    retq
194;
195; AVX-LABEL: combine_v8i32_abs_pos:
196; AVX:       # %bb.0:
197; AVX-NEXT:    vpsrld $1, %ymm0, %ymm0
198; AVX-NEXT:    retq
199  %1 = lshr <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
200  %2 = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %1, i1 false)
201  ret <8 x i32> %2
202}
203
204; (abs x) upper bits are known zero if x has extra sign bits
205define i32 @combine_i32_abs_zerosign(i32 %a) {
206; CHECK-LABEL: combine_i32_abs_zerosign:
207; CHECK:       # %bb.0:
208; CHECK-NEXT:    xorl %eax, %eax
209; CHECK-NEXT:    retq
210  %1 = ashr i32 %a, 15
211  %2 = call i32 @llvm.abs.i32(i32 %1, i1 false)
212  %3 = and i32 %2, -524288 ; 0xFFF80000
213  ret i32 %3
214}
215
216define <8 x i16> @combine_v8i16_abs_zerosign(<8 x i16> %a) {
217; SSE-LABEL: combine_v8i16_abs_zerosign:
218; SSE:       # %bb.0:
219; SSE-NEXT:    xorps %xmm0, %xmm0
220; SSE-NEXT:    retq
221;
222; AVX-LABEL: combine_v8i16_abs_zerosign:
223; AVX:       # %bb.0:
224; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
225; AVX-NEXT:    retq
226  %1 = ashr <8 x i16> %a, <i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14>
227  %2 = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %1, i1 false)
228  %3 = and <8 x i16> %2, <i16 32768, i16 32768, i16 32768, i16 32768, i16 32768, i16 32768, i16 32768, i16 32768>
229  ret <8 x i16> %3
230}
231
232; negative test - mask extends beyond known zero bits
233define i32 @combine_i32_abs_zerosign_negative(i32 %a) {
234; CHECK-LABEL: combine_i32_abs_zerosign_negative:
235; CHECK:       # %bb.0:
236; CHECK-NEXT:    sarl $3, %edi
237; CHECK-NEXT:    movl %edi, %eax
238; CHECK-NEXT:    negl %eax
239; CHECK-NEXT:    cmovsl %edi, %eax
240; CHECK-NEXT:    andl $536346624, %eax # imm = 0x1FF80000
241; CHECK-NEXT:    retq
242  %1 = ashr i32 %a, 3
243  %2 = call i32 @llvm.abs.i32(i32 %1, i1 false)
244  %3 = and i32 %2, -524288 ; 0xFFF80000
245  ret i32 %3
246}
247
248declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1) nounwind readnone
249declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) nounwind readnone
250declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1) nounwind readnone
251
252declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1) nounwind readnone
253declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1) nounwind readnone
254declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1) nounwind readnone
255