xref: /llvm-project/llvm/test/CodeGen/X86/and-with-overflow.ll (revision f0dd12ec5c0169ba5b4363b62d59511181cf954a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+cmov | FileCheck %s --check-prefix=X64
4
5;
6; PR48768 - 'and' clears the overflow flag, so we don't need a separate 'test'.
7;
8
9define i8 @and_i8_ri(i8 zeroext %0, i8 zeroext %1) {
10; X86-LABEL: and_i8_ri:
11; X86:       # %bb.0:
12; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
13; X86-NEXT:    movl %eax, %ecx
14; X86-NEXT:    andb $-17, %cl
15; X86-NEXT:    je .LBB0_2
16; X86-NEXT:  # %bb.1:
17; X86-NEXT:    movl %ecx, %eax
18; X86-NEXT:  .LBB0_2:
19; X86-NEXT:    retl
20;
21; X64-LABEL: and_i8_ri:
22; X64:       # %bb.0:
23; X64-NEXT:    movl %edi, %eax
24; X64-NEXT:    andb $-17, %al
25; X64-NEXT:    movzbl %al, %eax
26; X64-NEXT:    cmovel %edi, %eax
27; X64-NEXT:    # kill: def $al killed $al killed $eax
28; X64-NEXT:    retq
29  %3 = and i8 %0, -17
30  %4 = icmp eq i8 %3, 0
31  %5 = select i1 %4, i8 %0, i8 %3
32  ret i8 %5
33}
34
35define i8 @and_i8_rr(i8 zeroext %0, i8 zeroext %1) {
36; X86-LABEL: and_i8_rr:
37; X86:       # %bb.0:
38; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
39; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
40; X86-NEXT:    andb %al, %cl
41; X86-NEXT:    je .LBB1_2
42; X86-NEXT:  # %bb.1:
43; X86-NEXT:    movl %ecx, %eax
44; X86-NEXT:  .LBB1_2:
45; X86-NEXT:    retl
46;
47; X64-LABEL: and_i8_rr:
48; X64:       # %bb.0:
49; X64-NEXT:    movl %esi, %eax
50; X64-NEXT:    andl %edi, %eax
51; X64-NEXT:    cmovel %edi, %eax
52; X64-NEXT:    # kill: def $al killed $al killed $eax
53; X64-NEXT:    retq
54  %3 = and i8 %1, %0
55  %4 = icmp eq i8 %3, 0
56  %5 = select i1 %4, i8 %0, i8 %3
57  ret i8 %5
58}
59
60define i16 @and_i16_ri(i16 zeroext %0, i16 zeroext %1) {
61; X86-LABEL: and_i16_ri:
62; X86:       # %bb.0:
63; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
64; X86-NEXT:    movl %eax, %ecx
65; X86-NEXT:    andl $-17, %ecx
66; X86-NEXT:    je .LBB2_2
67; X86-NEXT:  # %bb.1:
68; X86-NEXT:    movl %ecx, %eax
69; X86-NEXT:  .LBB2_2:
70; X86-NEXT:    # kill: def $ax killed $ax killed $eax
71; X86-NEXT:    retl
72;
73; X64-LABEL: and_i16_ri:
74; X64:       # %bb.0:
75; X64-NEXT:    movl %edi, %eax
76; X64-NEXT:    andl $-17, %eax
77; X64-NEXT:    cmovel %edi, %eax
78; X64-NEXT:    # kill: def $ax killed $ax killed $eax
79; X64-NEXT:    retq
80  %3 = and i16 %0, -17
81  %4 = icmp eq i16 %3, 0
82  %5 = select i1 %4, i16 %0, i16 %3
83  ret i16 %5
84}
85
86define i16 @and_i16_rr(i16 zeroext %0, i16 zeroext %1) {
87; X86-LABEL: and_i16_rr:
88; X86:       # %bb.0:
89; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
90; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
91; X86-NEXT:    andw %ax, %cx
92; X86-NEXT:    je .LBB3_2
93; X86-NEXT:  # %bb.1:
94; X86-NEXT:    movl %ecx, %eax
95; X86-NEXT:  .LBB3_2:
96; X86-NEXT:    # kill: def $ax killed $ax killed $eax
97; X86-NEXT:    retl
98;
99; X64-LABEL: and_i16_rr:
100; X64:       # %bb.0:
101; X64-NEXT:    movl %esi, %eax
102; X64-NEXT:    andl %edi, %eax
103; X64-NEXT:    cmovel %edi, %eax
104; X64-NEXT:    # kill: def $ax killed $ax killed $eax
105; X64-NEXT:    retq
106  %3 = and i16 %1, %0
107  %4 = icmp eq i16 %3, 0
108  %5 = select i1 %4, i16 %0, i16 %3
109  ret i16 %5
110}
111
112define i32 @and_i32_ri(i32 %0, i32 %1) {
113; X86-LABEL: and_i32_ri:
114; X86:       # %bb.0:
115; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
116; X86-NEXT:    movl %eax, %ecx
117; X86-NEXT:    andl $-17, %ecx
118; X86-NEXT:    jle .LBB4_2
119; X86-NEXT:  # %bb.1:
120; X86-NEXT:    movl %ecx, %eax
121; X86-NEXT:  .LBB4_2:
122; X86-NEXT:    retl
123;
124; X64-LABEL: and_i32_ri:
125; X64:       # %bb.0:
126; X64-NEXT:    movl %edi, %eax
127; X64-NEXT:    andl $-17, %eax
128; X64-NEXT:    cmovlel %edi, %eax
129; X64-NEXT:    retq
130  %3 = and i32 %0, -17
131  %4 = icmp slt i32 %3, 1
132  %5 = select i1 %4, i32 %0, i32 %3
133  ret i32 %5
134}
135
136define i32 @and_i32_rr(i32 %0, i32 %1) {
137; X86-LABEL: and_i32_rr:
138; X86:       # %bb.0:
139; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
140; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
141; X86-NEXT:    andl %eax, %ecx
142; X86-NEXT:    jle .LBB5_2
143; X86-NEXT:  # %bb.1:
144; X86-NEXT:    movl %ecx, %eax
145; X86-NEXT:  .LBB5_2:
146; X86-NEXT:    retl
147;
148; X64-LABEL: and_i32_rr:
149; X64:       # %bb.0:
150; X64-NEXT:    movl %esi, %eax
151; X64-NEXT:    andl %edi, %eax
152; X64-NEXT:    cmovlel %edi, %eax
153; X64-NEXT:    retq
154  %3 = and i32 %1, %0
155  %4 = icmp slt i32 %3, 1
156  %5 = select i1 %4, i32 %0, i32 %3
157  ret i32 %5
158}
159
160define i64 @and_i64_ri(i64 %0, i64 %1) nounwind {
161; X86-LABEL: and_i64_ri:
162; X86:       # %bb.0:
163; X86-NEXT:    pushl %esi
164; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
165; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
166; X86-NEXT:    movl %eax, %ecx
167; X86-NEXT:    andl $-17, %ecx
168; X86-NEXT:    cmpl $1, %ecx
169; X86-NEXT:    movl %edx, %esi
170; X86-NEXT:    sbbl $0, %esi
171; X86-NEXT:    jl .LBB6_2
172; X86-NEXT:  # %bb.1:
173; X86-NEXT:    movl %ecx, %eax
174; X86-NEXT:  .LBB6_2:
175; X86-NEXT:    popl %esi
176; X86-NEXT:    retl
177;
178; X64-LABEL: and_i64_ri:
179; X64:       # %bb.0:
180; X64-NEXT:    movq %rdi, %rax
181; X64-NEXT:    andq $-17, %rax
182; X64-NEXT:    cmovleq %rdi, %rax
183; X64-NEXT:    retq
184  %3 = and i64 %0, -17
185  %4 = icmp slt i64 %3, 1
186  %5 = select i1 %4, i64 %0, i64 %3
187  ret i64 %5
188}
189
190define i64 @and_i64_rr(i64 %0, i64 %1) nounwind {
191; X86-LABEL: and_i64_rr:
192; X86:       # %bb.0:
193; X86-NEXT:    pushl %edi
194; X86-NEXT:    pushl %esi
195; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
196; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
197; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
198; X86-NEXT:    andl %edx, %ecx
199; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
200; X86-NEXT:    andl %eax, %esi
201; X86-NEXT:    cmpl $1, %esi
202; X86-NEXT:    movl %ecx, %edi
203; X86-NEXT:    sbbl $0, %edi
204; X86-NEXT:    jl .LBB7_2
205; X86-NEXT:  # %bb.1:
206; X86-NEXT:    movl %esi, %eax
207; X86-NEXT:    movl %ecx, %edx
208; X86-NEXT:  .LBB7_2:
209; X86-NEXT:    popl %esi
210; X86-NEXT:    popl %edi
211; X86-NEXT:    retl
212;
213; X64-LABEL: and_i64_rr:
214; X64:       # %bb.0:
215; X64-NEXT:    movq %rsi, %rax
216; X64-NEXT:    andq %rdi, %rax
217; X64-NEXT:    cmovleq %rdi, %rax
218; X64-NEXT:    retq
219  %3 = and i64 %1, %0
220  %4 = icmp slt i64 %3, 1
221  %5 = select i1 %4, i64 %0, i64 %3
222  ret i64 %5
223}
224