xref: /llvm-project/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll (revision 7b3bbd83c0c24087072ec5b22a76799ab31f87d5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2                  < %s | FileCheck %s --check-prefixes=X86,X86-SSE2,X86-BMI1
3; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi             < %s | FileCheck %s --check-prefixes=X86,X86-SSE2,X86-BMI1
4; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2       < %s | FileCheck %s --check-prefixes=X86,X86-SSE2,X86-BMI2
5; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=X86,X86-BMI2,AVX2
6; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2                  < %s | FileCheck %s --check-prefixes=X64,X64-SSE2,X64-BMI1
7; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi             < %s | FileCheck %s --check-prefixes=X64,X64-SSE2,X64-BMI1
8; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2       < %s | FileCheck %s --check-prefixes=X64,X64-SSE2,X64-BMI2
9; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=X64,X64-BMI2,AVX2
10
11; We are looking for the following pattern here:
12;   (X & (C << Y)) ==/!= 0
13; It may be optimal to hoist the constant:
14;   ((X l>> Y) & C) ==/!= 0
15
16;------------------------------------------------------------------------------;
17; A few scalar test
18;------------------------------------------------------------------------------;
19
20; i8 scalar
21
22define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
23; X86-LABEL: scalar_i8_signbit_eq:
24; X86:       # %bb.0:
25; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
26; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
27; X86-NEXT:    shrb %cl, %al
28; X86-NEXT:    testb $-128, %al
29; X86-NEXT:    sete %al
30; X86-NEXT:    retl
31;
32; X64-LABEL: scalar_i8_signbit_eq:
33; X64:       # %bb.0:
34; X64-NEXT:    movl %esi, %ecx
35; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
36; X64-NEXT:    shrb %cl, %dil
37; X64-NEXT:    testb $-128, %dil
38; X64-NEXT:    sete %al
39; X64-NEXT:    retq
40  %t0 = shl i8 128, %y
41  %t1 = and i8 %t0, %x
42  %res = icmp eq i8 %t1, 0
43  ret i1 %res
44}
45
46define i1 @scalar_i8_lowestbit_eq(i8 %x, i8 %y) nounwind {
47; X86-LABEL: scalar_i8_lowestbit_eq:
48; X86:       # %bb.0:
49; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
50; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
51; X86-NEXT:    btl %eax, %ecx
52; X86-NEXT:    setae %al
53; X86-NEXT:    retl
54;
55; X64-LABEL: scalar_i8_lowestbit_eq:
56; X64:       # %bb.0:
57; X64-NEXT:    btl %esi, %edi
58; X64-NEXT:    setae %al
59; X64-NEXT:    retq
60  %t0 = shl i8 1, %y
61  %t1 = and i8 %t0, %x
62  %res = icmp eq i8 %t1, 0
63  ret i1 %res
64}
65
66define i1 @scalar_i8_bitsinmiddle_eq(i8 %x, i8 %y) nounwind {
67; X86-LABEL: scalar_i8_bitsinmiddle_eq:
68; X86:       # %bb.0:
69; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
70; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
71; X86-NEXT:    shrb %cl, %al
72; X86-NEXT:    testb $24, %al
73; X86-NEXT:    sete %al
74; X86-NEXT:    retl
75;
76; X64-LABEL: scalar_i8_bitsinmiddle_eq:
77; X64:       # %bb.0:
78; X64-NEXT:    movl %esi, %ecx
79; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
80; X64-NEXT:    shrb %cl, %dil
81; X64-NEXT:    testb $24, %dil
82; X64-NEXT:    sete %al
83; X64-NEXT:    retq
84  %t0 = shl i8 24, %y
85  %t1 = and i8 %t0, %x
86  %res = icmp eq i8 %t1, 0
87  ret i1 %res
88}
89
90; i16 scalar
91
92define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
93; X86-BMI1-LABEL: scalar_i16_signbit_eq:
94; X86-BMI1:       # %bb.0:
95; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
96; X86-BMI1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
97; X86-BMI1-NEXT:    shrl %cl, %eax
98; X86-BMI1-NEXT:    testl $32768, %eax # imm = 0x8000
99; X86-BMI1-NEXT:    sete %al
100; X86-BMI1-NEXT:    retl
101;
102; X86-BMI2-LABEL: scalar_i16_signbit_eq:
103; X86-BMI2:       # %bb.0:
104; X86-BMI2-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
105; X86-BMI2-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
106; X86-BMI2-NEXT:    shrxl %ecx, %eax, %eax
107; X86-BMI2-NEXT:    testl $32768, %eax # imm = 0x8000
108; X86-BMI2-NEXT:    sete %al
109; X86-BMI2-NEXT:    retl
110;
111; X64-BMI1-LABEL: scalar_i16_signbit_eq:
112; X64-BMI1:       # %bb.0:
113; X64-BMI1-NEXT:    movl %esi, %ecx
114; X64-BMI1-NEXT:    movzwl %di, %eax
115; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
116; X64-BMI1-NEXT:    shrl %cl, %eax
117; X64-BMI1-NEXT:    testl $32768, %eax # imm = 0x8000
118; X64-BMI1-NEXT:    sete %al
119; X64-BMI1-NEXT:    retq
120;
121; X64-BMI2-LABEL: scalar_i16_signbit_eq:
122; X64-BMI2:       # %bb.0:
123; X64-BMI2-NEXT:    movzwl %di, %eax
124; X64-BMI2-NEXT:    shrxl %esi, %eax, %eax
125; X64-BMI2-NEXT:    testl $32768, %eax # imm = 0x8000
126; X64-BMI2-NEXT:    sete %al
127; X64-BMI2-NEXT:    retq
128  %t0 = shl i16 32768, %y
129  %t1 = and i16 %t0, %x
130  %res = icmp eq i16 %t1, 0
131  ret i1 %res
132}
133
134define i1 @scalar_i16_lowestbit_eq(i16 %x, i16 %y) nounwind {
135; X86-LABEL: scalar_i16_lowestbit_eq:
136; X86:       # %bb.0:
137; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
138; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
139; X86-NEXT:    btl %eax, %ecx
140; X86-NEXT:    setae %al
141; X86-NEXT:    retl
142;
143; X64-LABEL: scalar_i16_lowestbit_eq:
144; X64:       # %bb.0:
145; X64-NEXT:    btl %esi, %edi
146; X64-NEXT:    setae %al
147; X64-NEXT:    retq
148  %t0 = shl i16 1, %y
149  %t1 = and i16 %t0, %x
150  %res = icmp eq i16 %t1, 0
151  ret i1 %res
152}
153
154define i1 @scalar_i16_bitsinmiddle_eq(i16 %x, i16 %y) nounwind {
155; X86-BMI1-LABEL: scalar_i16_bitsinmiddle_eq:
156; X86-BMI1:       # %bb.0:
157; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
158; X86-BMI1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
159; X86-BMI1-NEXT:    shrl %cl, %eax
160; X86-BMI1-NEXT:    testl $4080, %eax # imm = 0xFF0
161; X86-BMI1-NEXT:    sete %al
162; X86-BMI1-NEXT:    retl
163;
164; X86-BMI2-LABEL: scalar_i16_bitsinmiddle_eq:
165; X86-BMI2:       # %bb.0:
166; X86-BMI2-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
167; X86-BMI2-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
168; X86-BMI2-NEXT:    shrxl %ecx, %eax, %eax
169; X86-BMI2-NEXT:    testl $4080, %eax # imm = 0xFF0
170; X86-BMI2-NEXT:    sete %al
171; X86-BMI2-NEXT:    retl
172;
173; X64-BMI1-LABEL: scalar_i16_bitsinmiddle_eq:
174; X64-BMI1:       # %bb.0:
175; X64-BMI1-NEXT:    movl %esi, %ecx
176; X64-BMI1-NEXT:    movzwl %di, %eax
177; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
178; X64-BMI1-NEXT:    shrl %cl, %eax
179; X64-BMI1-NEXT:    testl $4080, %eax # imm = 0xFF0
180; X64-BMI1-NEXT:    sete %al
181; X64-BMI1-NEXT:    retq
182;
183; X64-BMI2-LABEL: scalar_i16_bitsinmiddle_eq:
184; X64-BMI2:       # %bb.0:
185; X64-BMI2-NEXT:    movzwl %di, %eax
186; X64-BMI2-NEXT:    shrxl %esi, %eax, %eax
187; X64-BMI2-NEXT:    testl $4080, %eax # imm = 0xFF0
188; X64-BMI2-NEXT:    sete %al
189; X64-BMI2-NEXT:    retq
190  %t0 = shl i16 4080, %y
191  %t1 = and i16 %t0, %x
192  %res = icmp eq i16 %t1, 0
193  ret i1 %res
194}
195
196; i32 scalar
197
198define i1 @scalar_i32_signbit_eq(i32 %x, i32 %y) nounwind {
199; X86-BMI1-LABEL: scalar_i32_signbit_eq:
200; X86-BMI1:       # %bb.0:
201; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
202; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
203; X86-BMI1-NEXT:    shrl %cl, %eax
204; X86-BMI1-NEXT:    testl $-2147483648, %eax # imm = 0x80000000
205; X86-BMI1-NEXT:    sete %al
206; X86-BMI1-NEXT:    retl
207;
208; X86-BMI2-LABEL: scalar_i32_signbit_eq:
209; X86-BMI2:       # %bb.0:
210; X86-BMI2-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
211; X86-BMI2-NEXT:    shrxl %eax, {{[0-9]+}}(%esp), %eax
212; X86-BMI2-NEXT:    testl $-2147483648, %eax # imm = 0x80000000
213; X86-BMI2-NEXT:    sete %al
214; X86-BMI2-NEXT:    retl
215;
216; X64-BMI1-LABEL: scalar_i32_signbit_eq:
217; X64-BMI1:       # %bb.0:
218; X64-BMI1-NEXT:    movl %esi, %ecx
219; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
220; X64-BMI1-NEXT:    shrl %cl, %edi
221; X64-BMI1-NEXT:    testl $-2147483648, %edi # imm = 0x80000000
222; X64-BMI1-NEXT:    sete %al
223; X64-BMI1-NEXT:    retq
224;
225; X64-BMI2-LABEL: scalar_i32_signbit_eq:
226; X64-BMI2:       # %bb.0:
227; X64-BMI2-NEXT:    shrxl %esi, %edi, %eax
228; X64-BMI2-NEXT:    testl $-2147483648, %eax # imm = 0x80000000
229; X64-BMI2-NEXT:    sete %al
230; X64-BMI2-NEXT:    retq
231  %t0 = shl i32 2147483648, %y
232  %t1 = and i32 %t0, %x
233  %res = icmp eq i32 %t1, 0
234  ret i1 %res
235}
236
237define i1 @scalar_i32_lowestbit_eq(i32 %x, i32 %y) nounwind {
238; X86-LABEL: scalar_i32_lowestbit_eq:
239; X86:       # %bb.0:
240; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
241; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
242; X86-NEXT:    btl %ecx, %eax
243; X86-NEXT:    setae %al
244; X86-NEXT:    retl
245;
246; X64-LABEL: scalar_i32_lowestbit_eq:
247; X64:       # %bb.0:
248; X64-NEXT:    btl %esi, %edi
249; X64-NEXT:    setae %al
250; X64-NEXT:    retq
251  %t0 = shl i32 1, %y
252  %t1 = and i32 %t0, %x
253  %res = icmp eq i32 %t1, 0
254  ret i1 %res
255}
256
257define i1 @scalar_i32_bitsinmiddle_eq(i32 %x, i32 %y) nounwind {
258; X86-BMI1-LABEL: scalar_i32_bitsinmiddle_eq:
259; X86-BMI1:       # %bb.0:
260; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
261; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
262; X86-BMI1-NEXT:    shrl %cl, %eax
263; X86-BMI1-NEXT:    testl $16776960, %eax # imm = 0xFFFF00
264; X86-BMI1-NEXT:    sete %al
265; X86-BMI1-NEXT:    retl
266;
267; X86-BMI2-LABEL: scalar_i32_bitsinmiddle_eq:
268; X86-BMI2:       # %bb.0:
269; X86-BMI2-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
270; X86-BMI2-NEXT:    shrxl %eax, {{[0-9]+}}(%esp), %eax
271; X86-BMI2-NEXT:    testl $16776960, %eax # imm = 0xFFFF00
272; X86-BMI2-NEXT:    sete %al
273; X86-BMI2-NEXT:    retl
274;
275; X64-BMI1-LABEL: scalar_i32_bitsinmiddle_eq:
276; X64-BMI1:       # %bb.0:
277; X64-BMI1-NEXT:    movl %esi, %ecx
278; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
279; X64-BMI1-NEXT:    shrl %cl, %edi
280; X64-BMI1-NEXT:    testl $16776960, %edi # imm = 0xFFFF00
281; X64-BMI1-NEXT:    sete %al
282; X64-BMI1-NEXT:    retq
283;
284; X64-BMI2-LABEL: scalar_i32_bitsinmiddle_eq:
285; X64-BMI2:       # %bb.0:
286; X64-BMI2-NEXT:    shrxl %esi, %edi, %eax
287; X64-BMI2-NEXT:    testl $16776960, %eax # imm = 0xFFFF00
288; X64-BMI2-NEXT:    sete %al
289; X64-BMI2-NEXT:    retq
290  %t0 = shl i32 16776960, %y
291  %t1 = and i32 %t0, %x
292  %res = icmp eq i32 %t1, 0
293  ret i1 %res
294}
295
296; i64 scalar
297
298define i1 @scalar_i64_signbit_eq(i64 %x, i64 %y) nounwind {
299; X86-BMI1-LABEL: scalar_i64_signbit_eq:
300; X86-BMI1:       # %bb.0:
301; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
302; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
303; X86-BMI1-NEXT:    shrl %cl, %eax
304; X86-BMI1-NEXT:    xorl %edx, %edx
305; X86-BMI1-NEXT:    testb $32, %cl
306; X86-BMI1-NEXT:    cmovel %eax, %edx
307; X86-BMI1-NEXT:    testl $-2147483648, %edx # imm = 0x80000000
308; X86-BMI1-NEXT:    sete %al
309; X86-BMI1-NEXT:    retl
310;
311; X86-BMI2-LABEL: scalar_i64_signbit_eq:
312; X86-BMI2:       # %bb.0:
313; X86-BMI2-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
314; X86-BMI2-NEXT:    shrxl %eax, {{[0-9]+}}(%esp), %ecx
315; X86-BMI2-NEXT:    xorl %edx, %edx
316; X86-BMI2-NEXT:    testb $32, %al
317; X86-BMI2-NEXT:    cmovel %ecx, %edx
318; X86-BMI2-NEXT:    testl $-2147483648, %edx # imm = 0x80000000
319; X86-BMI2-NEXT:    sete %al
320; X86-BMI2-NEXT:    retl
321;
322; X64-BMI1-LABEL: scalar_i64_signbit_eq:
323; X64-BMI1:       # %bb.0:
324; X64-BMI1-NEXT:    movq %rsi, %rcx
325; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $rcx
326; X64-BMI1-NEXT:    shrq %cl, %rdi
327; X64-BMI1-NEXT:    btq $63, %rdi
328; X64-BMI1-NEXT:    setae %al
329; X64-BMI1-NEXT:    retq
330;
331; X64-BMI2-LABEL: scalar_i64_signbit_eq:
332; X64-BMI2:       # %bb.0:
333; X64-BMI2-NEXT:    shrxq %rsi, %rdi, %rax
334; X64-BMI2-NEXT:    btq $63, %rax
335; X64-BMI2-NEXT:    setae %al
336; X64-BMI2-NEXT:    retq
337  %t0 = shl i64 9223372036854775808, %y
338  %t1 = and i64 %t0, %x
339  %res = icmp eq i64 %t1, 0
340  ret i1 %res
341}
342
343define i1 @scalar_i64_lowestbit_eq(i64 %x, i64 %y) nounwind {
344; X86-BMI1-LABEL: scalar_i64_lowestbit_eq:
345; X86-BMI1:       # %bb.0:
346; X86-BMI1-NEXT:    pushl %esi
347; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
348; X86-BMI1-NEXT:    movl $1, %eax
349; X86-BMI1-NEXT:    xorl %esi, %esi
350; X86-BMI1-NEXT:    xorl %edx, %edx
351; X86-BMI1-NEXT:    shldl %cl, %eax, %edx
352; X86-BMI1-NEXT:    shll %cl, %eax
353; X86-BMI1-NEXT:    testb $32, %cl
354; X86-BMI1-NEXT:    cmovnel %eax, %edx
355; X86-BMI1-NEXT:    cmovnel %esi, %eax
356; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %edx
357; X86-BMI1-NEXT:    andl {{[0-9]+}}(%esp), %eax
358; X86-BMI1-NEXT:    orl %edx, %eax
359; X86-BMI1-NEXT:    sete %al
360; X86-BMI1-NEXT:    popl %esi
361; X86-BMI1-NEXT:    retl
362;
363; X86-BMI2-LABEL: scalar_i64_lowestbit_eq:
364; X86-BMI2:       # %bb.0:
365; X86-BMI2-NEXT:    pushl %esi
366; X86-BMI2-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
367; X86-BMI2-NEXT:    movl $1, %edx
368; X86-BMI2-NEXT:    xorl %esi, %esi
369; X86-BMI2-NEXT:    xorl %eax, %eax
370; X86-BMI2-NEXT:    shldl %cl, %edx, %eax
371; X86-BMI2-NEXT:    shlxl %ecx, %edx, %edx
372; X86-BMI2-NEXT:    testb $32, %cl
373; X86-BMI2-NEXT:    cmovnel %edx, %eax
374; X86-BMI2-NEXT:    cmovnel %esi, %edx
375; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %eax
376; X86-BMI2-NEXT:    andl {{[0-9]+}}(%esp), %edx
377; X86-BMI2-NEXT:    orl %eax, %edx
378; X86-BMI2-NEXT:    sete %al
379; X86-BMI2-NEXT:    popl %esi
380; X86-BMI2-NEXT:    retl
381;
382; X64-LABEL: scalar_i64_lowestbit_eq:
383; X64:       # %bb.0:
384; X64-NEXT:    btq %rsi, %rdi
385; X64-NEXT:    setae %al
386; X64-NEXT:    retq
387  %t0 = shl i64 1, %y
388  %t1 = and i64 %t0, %x
389  %res = icmp eq i64 %t1, 0
390  ret i1 %res
391}
392
393define i1 @scalar_i64_bitsinmiddle_eq(i64 %x, i64 %y) nounwind {
394; X86-BMI1-LABEL: scalar_i64_bitsinmiddle_eq:
395; X86-BMI1:       # %bb.0:
396; X86-BMI1-NEXT:    pushl %esi
397; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
398; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
399; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %edx
400; X86-BMI1-NEXT:    movl %edx, %esi
401; X86-BMI1-NEXT:    shrl %cl, %esi
402; X86-BMI1-NEXT:    shrdl %cl, %edx, %eax
403; X86-BMI1-NEXT:    xorl %edx, %edx
404; X86-BMI1-NEXT:    testb $32, %cl
405; X86-BMI1-NEXT:    cmovnel %esi, %eax
406; X86-BMI1-NEXT:    cmovel %esi, %edx
407; X86-BMI1-NEXT:    andl $-65536, %eax # imm = 0xFFFF0000
408; X86-BMI1-NEXT:    movzwl %dx, %ecx
409; X86-BMI1-NEXT:    orl %eax, %ecx
410; X86-BMI1-NEXT:    sete %al
411; X86-BMI1-NEXT:    popl %esi
412; X86-BMI1-NEXT:    retl
413;
414; X86-BMI2-LABEL: scalar_i64_bitsinmiddle_eq:
415; X86-BMI2:       # %bb.0:
416; X86-BMI2-NEXT:    pushl %esi
417; X86-BMI2-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
418; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
419; X86-BMI2-NEXT:    movl {{[0-9]+}}(%esp), %edx
420; X86-BMI2-NEXT:    shrdl %cl, %edx, %eax
421; X86-BMI2-NEXT:    shrxl %ecx, %edx, %edx
422; X86-BMI2-NEXT:    xorl %esi, %esi
423; X86-BMI2-NEXT:    testb $32, %cl
424; X86-BMI2-NEXT:    cmovnel %edx, %eax
425; X86-BMI2-NEXT:    cmovel %edx, %esi
426; X86-BMI2-NEXT:    andl $-65536, %eax # imm = 0xFFFF0000
427; X86-BMI2-NEXT:    movzwl %si, %ecx
428; X86-BMI2-NEXT:    orl %eax, %ecx
429; X86-BMI2-NEXT:    sete %al
430; X86-BMI2-NEXT:    popl %esi
431; X86-BMI2-NEXT:    retl
432;
433; X64-BMI1-LABEL: scalar_i64_bitsinmiddle_eq:
434; X64-BMI1:       # %bb.0:
435; X64-BMI1-NEXT:    movq %rsi, %rcx
436; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $rcx
437; X64-BMI1-NEXT:    shrq %cl, %rdi
438; X64-BMI1-NEXT:    shrq $16, %rdi
439; X64-BMI1-NEXT:    testl %edi, %edi
440; X64-BMI1-NEXT:    sete %al
441; X64-BMI1-NEXT:    retq
442;
443; X64-BMI2-LABEL: scalar_i64_bitsinmiddle_eq:
444; X64-BMI2:       # %bb.0:
445; X64-BMI2-NEXT:    shrxq %rsi, %rdi, %rax
446; X64-BMI2-NEXT:    shrq $16, %rax
447; X64-BMI2-NEXT:    testl %eax, %eax
448; X64-BMI2-NEXT:    sete %al
449; X64-BMI2-NEXT:    retq
450  %t0 = shl i64 281474976645120, %y
451  %t1 = and i64 %t0, %x
452  %res = icmp eq i64 %t1, 0
453  ret i1 %res
454}
455
456;------------------------------------------------------------------------------;
457; A few trivial vector tests
458;------------------------------------------------------------------------------;
459
460define <4 x i1> @vec_4xi32_splat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
461; X86-SSE2-LABEL: vec_4xi32_splat_eq:
462; X86-SSE2:       # %bb.0:
463; X86-SSE2-NEXT:    pslld $23, %xmm1
464; X86-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
465; X86-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
466; X86-SSE2-NEXT:    pand %xmm1, %xmm0
467; X86-SSE2-NEXT:    pxor %xmm1, %xmm1
468; X86-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
469; X86-SSE2-NEXT:    retl
470;
471; AVX2-LABEL: vec_4xi32_splat_eq:
472; AVX2:       # %bb.0:
473; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
474; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
475; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
476; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
477; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
478; AVX2-NEXT:    ret{{[l|q]}}
479;
480; X64-SSE2-LABEL: vec_4xi32_splat_eq:
481; X64-SSE2:       # %bb.0:
482; X64-SSE2-NEXT:    pslld $23, %xmm1
483; X64-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
484; X64-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
485; X64-SSE2-NEXT:    pand %xmm1, %xmm0
486; X64-SSE2-NEXT:    pxor %xmm1, %xmm1
487; X64-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
488; X64-SSE2-NEXT:    retq
489  %t0 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
490  %t1 = and <4 x i32> %t0, %x
491  %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
492  ret <4 x i1> %res
493}
494
495define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
496; X86-SSE2-LABEL: vec_4xi32_nonsplat_eq:
497; X86-SSE2:       # %bb.0:
498; X86-SSE2-NEXT:    pslld $23, %xmm1
499; X86-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
500; X86-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
501; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
502; X86-SSE2-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
503; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
504; X86-SSE2-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
505; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
506; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
507; X86-SSE2-NEXT:    pand %xmm1, %xmm0
508; X86-SSE2-NEXT:    pxor %xmm1, %xmm1
509; X86-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
510; X86-SSE2-NEXT:    retl
511;
512; AVX2-LABEL: vec_4xi32_nonsplat_eq:
513; AVX2:       # %bb.0:
514; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,16776960,2147483648]
515; AVX2-NEXT:    vpsllvd %xmm1, %xmm2, %xmm1
516; AVX2-NEXT:    vpand %xmm0, %xmm1, %xmm0
517; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
518; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
519; AVX2-NEXT:    ret{{[l|q]}}
520;
521; X64-SSE2-LABEL: vec_4xi32_nonsplat_eq:
522; X64-SSE2:       # %bb.0:
523; X64-SSE2-NEXT:    pslld $23, %xmm1
524; X64-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
525; X64-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
526; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
527; X64-SSE2-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
528; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
529; X64-SSE2-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
530; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
531; X64-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
532; X64-SSE2-NEXT:    pand %xmm1, %xmm0
533; X64-SSE2-NEXT:    pxor %xmm1, %xmm1
534; X64-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
535; X64-SSE2-NEXT:    retq
536  %t0 = shl <4 x i32> <i32 0, i32 1, i32 16776960, i32 2147483648>, %y
537  %t1 = and <4 x i32> %t0, %x
538  %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
539  ret <4 x i1> %res
540}
541
542define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
543; X86-SSE2-LABEL: vec_4xi32_nonsplat_undef0_eq:
544; X86-SSE2:       # %bb.0:
545; X86-SSE2-NEXT:    movl $1, %eax
546; X86-SSE2-NEXT:    movd %eax, %xmm2
547; X86-SSE2-NEXT:    pslld $23, %xmm1
548; X86-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
549; X86-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
550; X86-SSE2-NEXT:    pmuludq %xmm1, %xmm2
551; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
552; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
553; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
554; X86-SSE2-NEXT:    pand %xmm2, %xmm0
555; X86-SSE2-NEXT:    pxor %xmm1, %xmm1
556; X86-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
557; X86-SSE2-NEXT:    retl
558;
559; AVX2-LABEL: vec_4xi32_nonsplat_undef0_eq:
560; AVX2:       # %bb.0:
561; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
562; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
563; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
564; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
565; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
566; AVX2-NEXT:    ret{{[l|q]}}
567;
568; X64-SSE2-LABEL: vec_4xi32_nonsplat_undef0_eq:
569; X64-SSE2:       # %bb.0:
570; X64-SSE2-NEXT:    movl $1, %eax
571; X64-SSE2-NEXT:    movd %eax, %xmm2
572; X64-SSE2-NEXT:    pslld $23, %xmm1
573; X64-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
574; X64-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
575; X64-SSE2-NEXT:    pmuludq %xmm1, %xmm2
576; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
577; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
578; X64-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
579; X64-SSE2-NEXT:    pand %xmm2, %xmm0
580; X64-SSE2-NEXT:    pxor %xmm1, %xmm1
581; X64-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
582; X64-SSE2-NEXT:    retq
583  %t0 = shl <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y
584  %t1 = and <4 x i32> %t0, %x
585  %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
586  ret <4 x i1> %res
587}
588define <4 x i1> @vec_4xi32_nonsplat_undef1_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
589; X86-SSE2-LABEL: vec_4xi32_nonsplat_undef1_eq:
590; X86-SSE2:       # %bb.0:
591; X86-SSE2-NEXT:    pslld $23, %xmm1
592; X86-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
593; X86-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
594; X86-SSE2-NEXT:    pand %xmm1, %xmm0
595; X86-SSE2-NEXT:    pxor %xmm1, %xmm1
596; X86-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
597; X86-SSE2-NEXT:    retl
598;
599; AVX2-LABEL: vec_4xi32_nonsplat_undef1_eq:
600; AVX2:       # %bb.0:
601; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
602; AVX2-NEXT:    vpsllvd %xmm1, %xmm2, %xmm1
603; AVX2-NEXT:    vpand %xmm0, %xmm1, %xmm0
604; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
605; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
606; AVX2-NEXT:    ret{{[l|q]}}
607;
608; X64-SSE2-LABEL: vec_4xi32_nonsplat_undef1_eq:
609; X64-SSE2:       # %bb.0:
610; X64-SSE2-NEXT:    pslld $23, %xmm1
611; X64-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
612; X64-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
613; X64-SSE2-NEXT:    pand %xmm1, %xmm0
614; X64-SSE2-NEXT:    pxor %xmm1, %xmm1
615; X64-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
616; X64-SSE2-NEXT:    retq
617  %t0 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
618  %t1 = and <4 x i32> %t0, %x
619  %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
620  ret <4 x i1> %res
621}
622define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
623; X86-SSE2-LABEL: vec_4xi32_nonsplat_undef2_eq:
624; X86-SSE2:       # %bb.0:
625; X86-SSE2-NEXT:    movl $1, %eax
626; X86-SSE2-NEXT:    movd %eax, %xmm2
627; X86-SSE2-NEXT:    pslld $23, %xmm1
628; X86-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
629; X86-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
630; X86-SSE2-NEXT:    pmuludq %xmm1, %xmm2
631; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
632; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
633; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
634; X86-SSE2-NEXT:    pand %xmm2, %xmm0
635; X86-SSE2-NEXT:    pxor %xmm1, %xmm1
636; X86-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
637; X86-SSE2-NEXT:    retl
638;
639; AVX2-LABEL: vec_4xi32_nonsplat_undef2_eq:
640; AVX2:       # %bb.0:
641; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
642; AVX2-NEXT:    vpsllvd %xmm1, %xmm2, %xmm1
643; AVX2-NEXT:    vpand %xmm0, %xmm1, %xmm0
644; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
645; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
646; AVX2-NEXT:    ret{{[l|q]}}
647;
648; X64-SSE2-LABEL: vec_4xi32_nonsplat_undef2_eq:
649; X64-SSE2:       # %bb.0:
650; X64-SSE2-NEXT:    movl $1, %eax
651; X64-SSE2-NEXT:    movd %eax, %xmm2
652; X64-SSE2-NEXT:    pslld $23, %xmm1
653; X64-SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
654; X64-SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
655; X64-SSE2-NEXT:    pmuludq %xmm1, %xmm2
656; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
657; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
658; X64-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
659; X64-SSE2-NEXT:    pand %xmm2, %xmm0
660; X64-SSE2-NEXT:    pxor %xmm1, %xmm1
661; X64-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
662; X64-SSE2-NEXT:    retq
663  %t0 = shl <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y
664  %t1 = and <4 x i32> %t0, %x
665  %res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
666  ret <4 x i1> %res
667}
668
669;------------------------------------------------------------------------------;
670; A special tests
671;------------------------------------------------------------------------------;
672
673define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
674; X86-LABEL: scalar_i8_signbit_ne:
675; X86:       # %bb.0:
676; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
677; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
678; X86-NEXT:    shrb %cl, %al
679; X86-NEXT:    shrb $7, %al
680; X86-NEXT:    retl
681;
682; X64-LABEL: scalar_i8_signbit_ne:
683; X64:       # %bb.0:
684; X64-NEXT:    movl %esi, %ecx
685; X64-NEXT:    movl %edi, %eax
686; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
687; X64-NEXT:    shrb %cl, %al
688; X64-NEXT:    shrb $7, %al
689; X64-NEXT:    # kill: def $al killed $al killed $eax
690; X64-NEXT:    retq
691  %t0 = shl i8 128, %y
692  %t1 = and i8 %t0, %x
693  %res = icmp ne i8 %t1, 0 ;  we are perfectly happy with 'ne' predicate
694  ret i1 %res
695}
696
697;------------------------------------------------------------------------------;
698; What if X is a constant too?
699;------------------------------------------------------------------------------;
700
701define i1 @scalar_i32_x_is_const_eq(i32 %y) nounwind {
702; X86-BMI1-LABEL: scalar_i32_x_is_const_eq:
703; X86-BMI1:       # %bb.0:
704; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
705; X86-BMI1-NEXT:    movl $-1437226411, %eax # imm = 0xAA55AA55
706; X86-BMI1-NEXT:    shll %cl, %eax
707; X86-BMI1-NEXT:    testb $1, %al
708; X86-BMI1-NEXT:    sete %al
709; X86-BMI1-NEXT:    retl
710;
711; X86-BMI2-LABEL: scalar_i32_x_is_const_eq:
712; X86-BMI2:       # %bb.0:
713; X86-BMI2-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
714; X86-BMI2-NEXT:    movl $-1437226411, %ecx # imm = 0xAA55AA55
715; X86-BMI2-NEXT:    shlxl %eax, %ecx, %eax
716; X86-BMI2-NEXT:    testb $1, %al
717; X86-BMI2-NEXT:    sete %al
718; X86-BMI2-NEXT:    retl
719;
720; X64-BMI1-LABEL: scalar_i32_x_is_const_eq:
721; X64-BMI1:       # %bb.0:
722; X64-BMI1-NEXT:    movl %edi, %ecx
723; X64-BMI1-NEXT:    movl $-1437226411, %eax # imm = 0xAA55AA55
724; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
725; X64-BMI1-NEXT:    shll %cl, %eax
726; X64-BMI1-NEXT:    testb $1, %al
727; X64-BMI1-NEXT:    sete %al
728; X64-BMI1-NEXT:    retq
729;
730; X64-BMI2-LABEL: scalar_i32_x_is_const_eq:
731; X64-BMI2:       # %bb.0:
732; X64-BMI2-NEXT:    movl $-1437226411, %eax # imm = 0xAA55AA55
733; X64-BMI2-NEXT:    shlxl %edi, %eax, %eax
734; X64-BMI2-NEXT:    testb $1, %al
735; X64-BMI2-NEXT:    sete %al
736; X64-BMI2-NEXT:    retq
737  %t0 = shl i32 2857740885, %y
738  %t1 = and i32 %t0, 1
739  %res = icmp eq i32 %t1, 0
740  ret i1 %res
741}
742define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
743; X86-LABEL: scalar_i32_x_is_const2_eq:
744; X86:       # %bb.0:
745; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
746; X86-NEXT:    movl $-1437226411, %ecx # imm = 0xAA55AA55
747; X86-NEXT:    btl %eax, %ecx
748; X86-NEXT:    setae %al
749; X86-NEXT:    retl
750;
751; X64-LABEL: scalar_i32_x_is_const2_eq:
752; X64:       # %bb.0:
753; X64-NEXT:    movl $-1437226411, %eax # imm = 0xAA55AA55
754; X64-NEXT:    btl %edi, %eax
755; X64-NEXT:    setae %al
756; X64-NEXT:    retq
757  %t0 = shl i32 1, %y
758  %t1 = and i32 %t0, 2857740885
759  %res = icmp eq i32 %t1, 0
760  ret i1 %res
761}
762
763;------------------------------------------------------------------------------;
764; A few negative tests
765;------------------------------------------------------------------------------;
766
767define i1 @negative_scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
768; X86-LABEL: negative_scalar_i8_bitsinmiddle_slt:
769; X86:       # %bb.0:
770; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
771; X86-NEXT:    movb $24, %al
772; X86-NEXT:    shlb %cl, %al
773; X86-NEXT:    andb {{[0-9]+}}(%esp), %al
774; X86-NEXT:    shrb $7, %al
775; X86-NEXT:    retl
776;
777; X64-LABEL: negative_scalar_i8_bitsinmiddle_slt:
778; X64:       # %bb.0:
779; X64-NEXT:    movl %esi, %ecx
780; X64-NEXT:    movb $24, %al
781; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
782; X64-NEXT:    shlb %cl, %al
783; X64-NEXT:    andb %dil, %al
784; X64-NEXT:    shrb $7, %al
785; X64-NEXT:    retq
786  %t0 = shl i8 24, %y
787  %t1 = and i8 %t0, %x
788  %res = icmp slt i8 %t1, 0
789  ret i1 %res
790}
791
792define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
793; X86-LABEL: scalar_i8_signbit_eq_with_nonzero:
794; X86:       # %bb.0:
795; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
796; X86-NEXT:    movb $-128, %al
797; X86-NEXT:    shlb %cl, %al
798; X86-NEXT:    andb {{[0-9]+}}(%esp), %al
799; X86-NEXT:    cmpb $1, %al
800; X86-NEXT:    sete %al
801; X86-NEXT:    retl
802;
803; X64-LABEL: scalar_i8_signbit_eq_with_nonzero:
804; X64:       # %bb.0:
805; X64-NEXT:    movl %esi, %ecx
806; X64-NEXT:    movb $-128, %al
807; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
808; X64-NEXT:    shlb %cl, %al
809; X64-NEXT:    andb %dil, %al
810; X64-NEXT:    cmpb $1, %al
811; X64-NEXT:    sete %al
812; X64-NEXT:    retq
813  %t0 = shl i8 128, %y
814  %t1 = and i8 %t0, %x
815  %res = icmp eq i8 %t1, 1 ; should be comparing with 0
816  ret i1 %res
817}
818