xref: /llvm-project/llvm/test/CodeGen/X86/mul-cmp.ll (revision c660a2f0ab1297b178fd06853c4991d0f07d8fa0)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-- -mattr=sse | FileCheck %s --check-prefixes=CHECK,SSE
3; RUN: llc < %s -mtriple=x86_64-- -mattr=avx | FileCheck %s --check-prefixes=CHECK,AVX
4
5; With no-wrap:
6; (X * Y) == 0 --> (X == 0) || (Y == 0)
7; (X * Y) != 0 --> (X != 0) && (Y != 0)
8
9define i1 @mul_nsw_eq0_i8(i8 %x, i8 %y) {
10; CHECK-LABEL: mul_nsw_eq0_i8:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    testb %sil, %sil
13; CHECK-NEXT:    sete %cl
14; CHECK-NEXT:    testb %dil, %dil
15; CHECK-NEXT:    sete %al
16; CHECK-NEXT:    orb %cl, %al
17; CHECK-NEXT:    retq
18  %m = mul nsw i8 %x, %y
19  %r = icmp eq i8 %m, 0
20  ret i1 %r
21}
22
23; negative test - not valid if mul can overflow
24
25define i1 @mul_eq0_i8(i8 %x, i8 %y) {
26; CHECK-LABEL: mul_eq0_i8:
27; CHECK:       # %bb.0:
28; CHECK-NEXT:    movl %edi, %eax
29; CHECK-NEXT:    # kill: def $al killed $al killed $eax
30; CHECK-NEXT:    mulb %sil
31; CHECK-NEXT:    testb %al, %al
32; CHECK-NEXT:    sete %al
33; CHECK-NEXT:    retq
34  %m = mul i8 %x, %y
35  %r = icmp eq i8 %m, 0
36  ret i1 %r
37}
38
39; negative test - don't try with minsize
40
41define i1 @mul_nsw_eq0_i8_size(i8 %x, i8 %y) minsize {
42; CHECK-LABEL: mul_nsw_eq0_i8_size:
43; CHECK:       # %bb.0:
44; CHECK-NEXT:    movl %edi, %eax
45; CHECK-NEXT:    # kill: def $al killed $al killed $eax
46; CHECK-NEXT:    mulb %sil
47; CHECK-NEXT:    testb %al, %al
48; CHECK-NEXT:    sete %al
49; CHECK-NEXT:    retq
50  %m = mul nsw i8 %x, %y
51  %r = icmp eq i8 %m, 0
52  ret i1 %r
53}
54
55define i1 @mul_nsw_ne0_i16(i16 %x, i16 %y) {
56; CHECK-LABEL: mul_nsw_ne0_i16:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    testw %si, %si
59; CHECK-NEXT:    setne %cl
60; CHECK-NEXT:    testw %di, %di
61; CHECK-NEXT:    setne %al
62; CHECK-NEXT:    andb %cl, %al
63; CHECK-NEXT:    retq
64  %m = mul nsw i16 %x, %y
65  %r = icmp ne i16 %m, 0
66  ret i1 %r
67}
68
69define i1 @mul_nuw_eq0_i32(i32 %x, i32 %y) {
70; CHECK-LABEL: mul_nuw_eq0_i32:
71; CHECK:       # %bb.0:
72; CHECK-NEXT:    testl %esi, %esi
73; CHECK-NEXT:    sete %cl
74; CHECK-NEXT:    testl %edi, %edi
75; CHECK-NEXT:    sete %al
76; CHECK-NEXT:    orb %cl, %al
77; CHECK-NEXT:    retq
78  %m = mul nuw i32 %x, %y
79  %r = icmp eq i32 %m, 0
80  ret i1 %r
81}
82
83define i1 @mul_nsw_nuw_ne0_i64(i64 %x, i64 %y) {
84; CHECK-LABEL: mul_nsw_nuw_ne0_i64:
85; CHECK:       # %bb.0:
86; CHECK-NEXT:    testq %rsi, %rsi
87; CHECK-NEXT:    setne %cl
88; CHECK-NEXT:    testq %rdi, %rdi
89; CHECK-NEXT:    setne %al
90; CHECK-NEXT:    andb %cl, %al
91; CHECK-NEXT:    retq
92  %m = mul nsw nuw i64 %x, %y
93  %r = icmp ne i64 %m, 0
94  ret i1 %r
95}
96
97define <16 x i1> @mul_nuw_eq0_v16i8(<16 x i8> %x, <16 x i8> %y) {
98; SSE-LABEL: mul_nuw_eq0_v16i8:
99; SSE:       # %bb.0:
100; SSE-NEXT:    pxor %xmm2, %xmm2
101; SSE-NEXT:    pcmpeqb %xmm2, %xmm1
102; SSE-NEXT:    pcmpeqb %xmm2, %xmm0
103; SSE-NEXT:    por %xmm1, %xmm0
104; SSE-NEXT:    retq
105;
106; AVX-LABEL: mul_nuw_eq0_v16i8:
107; AVX:       # %bb.0:
108; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
109; AVX-NEXT:    vpcmpeqb %xmm2, %xmm1, %xmm1
110; AVX-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm0
111; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
112; AVX-NEXT:    retq
113  %m = mul nuw <16 x i8> %x, %y
114  %r = icmp eq <16 x i8> %m, zeroinitializer
115  ret <16 x i1> %r
116}
117
118define <4 x i1> @mul_nsw_ne0_v4i32(<4 x i32> %x, <4 x i32> %y) {
119; SSE-LABEL: mul_nsw_ne0_v4i32:
120; SSE:       # %bb.0:
121; SSE-NEXT:    pxor %xmm2, %xmm2
122; SSE-NEXT:    pcmpeqd %xmm2, %xmm1
123; SSE-NEXT:    pcmpeqd %xmm2, %xmm0
124; SSE-NEXT:    por %xmm1, %xmm0
125; SSE-NEXT:    pcmpeqd %xmm1, %xmm1
126; SSE-NEXT:    pxor %xmm1, %xmm0
127; SSE-NEXT:    retq
128;
129; AVX-LABEL: mul_nsw_ne0_v4i32:
130; AVX:       # %bb.0:
131; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
132; AVX-NEXT:    vpcmpeqd %xmm2, %xmm1, %xmm1
133; AVX-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
134; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
135; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
136; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
137; AVX-NEXT:    retq
138  %m = mul nsw <4 x i32> %x, %y
139  %r = icmp ne <4 x i32> %m, zeroinitializer
140  ret <4 x i1> %r
141}
142
143; negative test - don't try with minsize
144; TODO: SSE would be much smaller if decomposed.
145
146define <4 x i1> @mul_nsw_ne0_v4i32_size(<4 x i32> %x, <4 x i32> %y) minsize {
147; SSE-LABEL: mul_nsw_ne0_v4i32_size:
148; SSE:       # %bb.0:
149; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
150; SSE-NEXT:    pmuludq %xmm1, %xmm0
151; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
152; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
153; SSE-NEXT:    pmuludq %xmm2, %xmm1
154; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
155; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
156; SSE-NEXT:    pxor %xmm1, %xmm1
157; SSE-NEXT:    pcmpeqd %xmm0, %xmm1
158; SSE-NEXT:    pcmpeqd %xmm0, %xmm0
159; SSE-NEXT:    pxor %xmm1, %xmm0
160; SSE-NEXT:    retq
161;
162; AVX-LABEL: mul_nsw_ne0_v4i32_size:
163; AVX:       # %bb.0:
164; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
165; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
166; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
167; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
168; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
169; AVX-NEXT:    retq
170  %m = mul nsw <4 x i32> %x, %y
171  %r = icmp ne <4 x i32> %m, zeroinitializer
172  ret <4 x i1> %r
173}
174