xref: /llvm-project/llvm/test/Transforms/InstCombine/icmp-mul-and.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4declare void @use(i8)
5
6define i1 @mul_mask_pow2_eq0(i8 %x) {
7; CHECK-LABEL: @mul_mask_pow2_eq0(
8; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[X:%.*]], 1
9; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[TMP1]], 0
10; CHECK-NEXT:    ret i1 [[CMP]]
11;
12  %mul = mul i8 %x, 44
13  %and = and i8 %mul, 4
14  %cmp = icmp eq i8 %and, 0
15  ret i1 %cmp
16}
17
18; TODO: Demanded bits does not convert the mul to shift,
19; but the 'and' could be of 'x' directly.
20
21define i1 @mul_mask_pow2_ne0_use1(i8 %x) {
22; CHECK-LABEL: @mul_mask_pow2_ne0_use1(
23; CHECK-NEXT:    [[MUL:%.*]] = mul i8 [[X:%.*]], 40
24; CHECK-NEXT:    call void @use(i8 [[MUL]])
25; CHECK-NEXT:    [[AND:%.*]] = and i8 [[MUL]], 8
26; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8 [[AND]], 0
27; CHECK-NEXT:    ret i1 [[CMP]]
28;
29  %mul = mul i8 %x, 40
30  call void @use(i8 %mul)
31  %and = and i8 %mul, 8
32  %cmp = icmp ne i8 %and, 0
33  ret i1 %cmp
34}
35
36; negative test - extra use of 'and' would require more instructions
37
38define i1 @mul_mask_pow2_ne0_use2(i8 %x) {
39; CHECK-LABEL: @mul_mask_pow2_ne0_use2(
40; CHECK-NEXT:    [[MUL:%.*]] = shl i8 [[X:%.*]], 3
41; CHECK-NEXT:    [[AND:%.*]] = and i8 [[MUL]], 8
42; CHECK-NEXT:    call void @use(i8 [[AND]])
43; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8 [[AND]], 0
44; CHECK-NEXT:    ret i1 [[CMP]]
45;
46  %mul = mul i8 %x, 40
47  %and = and i8 %mul, 8
48  call void @use(i8 %and)
49  %cmp = icmp ne i8 %and, 0
50  ret i1 %cmp
51}
52
53; non-equality predicates are converted to equality
54
55define i1 @mul_mask_pow2_sgt0(i8 %x) {
56; CHECK-LABEL: @mul_mask_pow2_sgt0(
57; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[X:%.*]], 1
58; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8 [[TMP1]], 0
59; CHECK-NEXT:    ret i1 [[CMP]]
60;
61  %mul = mul i8 %x, 44
62  %and = and i8 %mul, 4
63  %cmp = icmp sgt i8 %and, 0
64  ret i1 %cmp
65}
66
67; unnecessary mask bits are removed
68
69define i1 @mul_mask_fakepow2_ne0(i8 %x) {
70; CHECK-LABEL: @mul_mask_fakepow2_ne0(
71; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[X:%.*]], 1
72; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8 [[TMP1]], 0
73; CHECK-NEXT:    ret i1 [[CMP]]
74;
75  %mul = mul i8 %x, 44
76  %and = and i8 %mul, 5
77  %cmp = icmp ne i8 %and, 0
78  ret i1 %cmp
79}
80
81; non-zero cmp constant is converted
82
83define i1 @mul_mask_pow2_eq4(i8 %x) {
84; CHECK-LABEL: @mul_mask_pow2_eq4(
85; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[X:%.*]], 1
86; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8 [[TMP1]], 0
87; CHECK-NEXT:    ret i1 [[CMP]]
88;
89  %mul = mul i8 %x, 44
90  %and = and i8 %mul, 4
91  %cmp = icmp eq i8 %and, 4
92  ret i1 %cmp
93}
94
95; negative test - must be pow2 mask constant
96
97define i1 @mul_mask_notpow2_ne(i8 %x) {
98; CHECK-LABEL: @mul_mask_notpow2_ne(
99; CHECK-NEXT:    [[MUL:%.*]] = mul i8 [[X:%.*]], 12
100; CHECK-NEXT:    [[AND:%.*]] = and i8 [[MUL]], 12
101; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i8 [[AND]], 0
102; CHECK-NEXT:    ret i1 [[CMP]]
103;
104  %mul = mul i8 %x, 60
105  %and = and i8 %mul, 12
106  %cmp = icmp ne i8 %and, 0
107  ret i1 %cmp
108}
109
110define i1 @pr40493(i32 %area) {
111; CHECK-LABEL: @pr40493(
112; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[AREA:%.*]], 1
113; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP1]], 0
114; CHECK-NEXT:    ret i1 [[CMP]]
115;
116  %mul = mul i32 %area, 12
117  %rem = and i32 %mul, 4
118  %cmp = icmp eq i32 %rem, 0
119  ret i1 %cmp
120}
121
122define i1 @pr40493_neg1(i32 %area) {
123; CHECK-LABEL: @pr40493_neg1(
124; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[AREA:%.*]], 3
125; CHECK-NEXT:    [[REM:%.*]] = and i32 [[MUL]], 4
126; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[REM]], 0
127; CHECK-NEXT:    ret i1 [[CMP]]
128;
129  %mul = mul i32 %area, 11
130  %rem = and i32 %mul, 4
131  %cmp = icmp eq i32 %rem, 0
132  ret i1 %cmp
133}
134
135define i1 @pr40493_neg2(i32 %area) {
136; CHECK-LABEL: @pr40493_neg2(
137; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[AREA:%.*]], 12
138; CHECK-NEXT:    [[REM:%.*]] = and i32 [[MUL]], 12
139; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[REM]], 0
140; CHECK-NEXT:    ret i1 [[CMP]]
141;
142  %mul = mul i32 %area, 12
143  %rem = and i32 %mul, 15
144  %cmp = icmp eq i32 %rem, 0
145  ret i1 %cmp
146}
147
148define i32 @pr40493_neg3(i32 %area) {
149; CHECK-LABEL: @pr40493_neg3(
150; CHECK-NEXT:    [[MUL:%.*]] = shl i32 [[AREA:%.*]], 2
151; CHECK-NEXT:    [[REM:%.*]] = and i32 [[MUL]], 4
152; CHECK-NEXT:    ret i32 [[REM]]
153;
154  %mul = mul i32 %area, 12
155  %rem = and i32 %mul, 4
156  ret i32 %rem
157}
158
159define <4 x i1> @pr40493_vec1(<4 x i32> %area) {
160; CHECK-LABEL: @pr40493_vec1(
161; CHECK-NEXT:    [[TMP1:%.*]] = and <4 x i32> [[AREA:%.*]], splat (i32 1)
162; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <4 x i32> [[TMP1]], zeroinitializer
163; CHECK-NEXT:    ret <4 x i1> [[CMP]]
164;
165  %mul = mul <4 x i32> %area, <i32 12, i32 12, i32 12, i32 12>
166  %rem = and <4 x i32> %mul, <i32 4, i32 4, i32 4, i32 4>
167  %cmp = icmp eq <4 x i32> %rem, zeroinitializer
168  ret <4 x i1> %cmp
169}
170
171define <4 x i1> @pr40493_vec2(<4 x i32> %area) {
172; CHECK-LABEL: @pr40493_vec2(
173; CHECK-NEXT:    [[MUL:%.*]] = mul <4 x i32> [[AREA:%.*]], <i32 12, i32 12, i32 12, i32 undef>
174; CHECK-NEXT:    [[REM:%.*]] = and <4 x i32> [[MUL]], splat (i32 4)
175; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <4 x i32> [[REM]], zeroinitializer
176; CHECK-NEXT:    ret <4 x i1> [[CMP]]
177;
178  %mul = mul <4 x i32> %area, <i32 12, i32 12, i32 12, i32 undef>
179  %rem = and <4 x i32> %mul, <i32 4, i32 4, i32 4, i32 4>
180  %cmp = icmp eq <4 x i32> %rem, zeroinitializer
181  ret <4 x i1> %cmp
182}
183
184define <4 x i1> @pr40493_vec3(<4 x i32> %area) {
185; CHECK-LABEL: @pr40493_vec3(
186; CHECK-NEXT:    [[MUL:%.*]] = mul <4 x i32> [[AREA:%.*]], splat (i32 12)
187; CHECK-NEXT:    [[REM:%.*]] = and <4 x i32> [[MUL]], <i32 4, i32 4, i32 4, i32 undef>
188; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <4 x i32> [[REM]], zeroinitializer
189; CHECK-NEXT:    ret <4 x i1> [[CMP]]
190;
191  %mul = mul <4 x i32> %area, <i32 12, i32 12, i32 12, i32 12>
192  %rem = and <4 x i32> %mul, <i32 4, i32 4, i32 4, i32 undef>
193  %cmp = icmp eq <4 x i32> %rem, zeroinitializer
194  ret <4 x i1> %cmp
195}
196
197define <4 x i1> @pr40493_vec4(<4 x i32> %area) {
198; CHECK-LABEL: @pr40493_vec4(
199; CHECK-NEXT:    [[MUL:%.*]] = mul <4 x i32> [[AREA:%.*]], <i32 12, i32 12, i32 12, i32 undef>
200; CHECK-NEXT:    [[REM:%.*]] = and <4 x i32> [[MUL]], <i32 4, i32 4, i32 4, i32 undef>
201; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <4 x i32> [[REM]], zeroinitializer
202; CHECK-NEXT:    ret <4 x i1> [[CMP]]
203;
204  %mul = mul <4 x i32> %area, <i32 12, i32 12, i32 12, i32 undef>
205  %rem = and <4 x i32> %mul, <i32 4, i32 4, i32 4, i32 undef>
206  %cmp = icmp eq <4 x i32> %rem, zeroinitializer
207  ret <4 x i1> %cmp
208}
209
210define <4 x i1> @pr40493_vec5(<4 x i32> %area) {
211; CHECK-LABEL: @pr40493_vec5(
212; CHECK-NEXT:    [[MUL:%.*]] = mul <4 x i32> [[AREA:%.*]], <i32 12, i32 12, i32 20, i32 20>
213; CHECK-NEXT:    [[REM:%.*]] = and <4 x i32> [[MUL]], <i32 2, i32 4, i32 2, i32 4>
214; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <4 x i32> [[REM]], zeroinitializer
215; CHECK-NEXT:    ret <4 x i1> [[CMP]]
216;
217  %mul = mul <4 x i32> %area, <i32 12, i32 12, i32 20, i32 20>
218  %rem = and <4 x i32> %mul, <i32 2, i32 4, i32 2, i32 4>
219  %cmp = icmp eq <4 x i32> %rem, zeroinitializer
220  ret <4 x i1> %cmp
221}
222
223define i1 @pr51551(i32 %x, i32 %y) {
224; CHECK-LABEL: @pr51551(
225; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 3
226; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
227; CHECK-NEXT:    ret i1 [[CMP]]
228;
229  %t0 = and i32 %y, -7
230  %t1 = or i32 %t0, 1
231  %mul = mul nsw i32 %t1, %x
232  %and = and i32 %mul, 3
233  %cmp = icmp eq i32 %and, 0
234  ret i1 %cmp
235}
236
237define i1 @pr51551_2(i32 %x, i32 %y) {
238; CHECK-LABEL: @pr51551_2(
239; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 1
240; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
241; CHECK-NEXT:    ret i1 [[CMP]]
242;
243  %t0 = and i32 %y, -7
244  %t1 = or i32 %t0, 1
245  %mul = mul nsw i32 %t1, %x
246  %and = and i32 %mul, 1
247  %cmp = icmp eq i32 %and, 0
248  ret i1 %cmp
249}
250
251define i1 @pr51551_neg1(i32 %x, i32 %y) {
252; CHECK-LABEL: @pr51551_neg1(
253; CHECK-NEXT:    [[T0:%.*]] = and i32 [[Y:%.*]], 4
254; CHECK-NEXT:    [[T1:%.*]] = or disjoint i32 [[T0]], 1
255; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[T1]], [[X:%.*]]
256; CHECK-NEXT:    [[AND:%.*]] = and i32 [[MUL]], 7
257; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
258; CHECK-NEXT:    ret i1 [[CMP]]
259;
260  %t0 = and i32 %y, -3
261  %t1 = or i32 %t0, 1
262  %mul = mul nsw i32 %t1, %x
263  %and = and i32 %mul, 7
264  %cmp = icmp eq i32 %and, 0
265  ret i1 %cmp
266}
267
268define i1 @pr51551_neg2(i32 %x, i32 %y) {
269; CHECK-LABEL: @pr51551_neg2(
270; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[Y:%.*]] to i1
271; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[X:%.*]], 7
272; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[TMP2]], 0
273; CHECK-NEXT:    [[DOTNOT:%.*]] = xor i1 [[TMP1]], true
274; CHECK-NEXT:    [[CMP:%.*]] = select i1 [[DOTNOT]], i1 true, i1 [[CMP1]]
275; CHECK-NEXT:    ret i1 [[CMP]]
276;
277  %t0 = and i32 %y, -7
278  %mul = mul nsw i32 %t0, %x
279  %and = and i32 %mul, 7
280  %cmp = icmp eq i32 %and, 0
281  ret i1 %cmp
282}
283
284define i32 @pr51551_demand3bits(i32 %x, i32 %y) {
285; CHECK-LABEL: @pr51551_demand3bits(
286; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 7
287; CHECK-NEXT:    ret i32 [[AND]]
288;
289  %t0 = and i32 %y, -7
290  %t1 = or i32 %t0, 1
291  %mul = mul nsw i32 %t1, %x
292  %and = and i32 %mul, 7
293  ret i32 %and
294}
295