xref: /llvm-project/llvm/test/CodeGen/X86/xor-with-overflow.ll (revision f0dd12ec5c0169ba5b4363b62d59511181cf954a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+cmov | FileCheck %s --check-prefix=X64
4
5;
6; PR48768 - 'xor' clears the overflow flag, so we don't need a separate 'test'.
7;
8
9define i8 @xor_i8_ri(i8 zeroext %0, i8 zeroext %1) {
10; X86-LABEL: xor_i8_ri:
11; X86:       # %bb.0:
12; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
13; X86-NEXT:    movl %eax, %ecx
14; X86-NEXT:    xorb $-17, %cl
15; X86-NEXT:    je .LBB0_2
16; X86-NEXT:  # %bb.1:
17; X86-NEXT:    movl %ecx, %eax
18; X86-NEXT:  .LBB0_2:
19; X86-NEXT:    retl
20;
21; X64-LABEL: xor_i8_ri:
22; X64:       # %bb.0:
23; X64-NEXT:    movl %edi, %eax
24; X64-NEXT:    xorb $-17, %al
25; X64-NEXT:    movzbl %al, %eax
26; X64-NEXT:    cmovel %edi, %eax
27; X64-NEXT:    # kill: def $al killed $al killed $eax
28; X64-NEXT:    retq
29  %3 = xor i8 %0, -17
30  %4 = icmp eq i8 %3, 0
31  %5 = select i1 %4, i8 %0, i8 %3
32  ret i8 %5
33}
34
35define i8 @xor_i8_rr(i8 zeroext %0, i8 zeroext %1) {
36; X86-LABEL: xor_i8_rr:
37; X86:       # %bb.0:
38; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
39; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
40; X86-NEXT:    xorb %al, %cl
41; X86-NEXT:    je .LBB1_2
42; X86-NEXT:  # %bb.1:
43; X86-NEXT:    movl %ecx, %eax
44; X86-NEXT:  .LBB1_2:
45; X86-NEXT:    retl
46;
47; X64-LABEL: xor_i8_rr:
48; X64:       # %bb.0:
49; X64-NEXT:    movl %esi, %eax
50; X64-NEXT:    xorl %edi, %eax
51; X64-NEXT:    cmovel %edi, %eax
52; X64-NEXT:    # kill: def $al killed $al killed $eax
53; X64-NEXT:    retq
54  %3 = xor i8 %1, %0
55  %4 = icmp eq i8 %3, 0
56  %5 = select i1 %4, i8 %0, i8 %3
57  ret i8 %5
58}
59
60define i16 @xor_i16_ri(i16 zeroext %0, i16 zeroext %1) {
61; X86-LABEL: xor_i16_ri:
62; X86:       # %bb.0:
63; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
64; X86-NEXT:    movl %eax, %ecx
65; X86-NEXT:    xorl $65519, %ecx # imm = 0xFFEF
66; X86-NEXT:    testw %cx, %cx
67; X86-NEXT:    je .LBB2_2
68; X86-NEXT:  # %bb.1:
69; X86-NEXT:    movl %ecx, %eax
70; X86-NEXT:  .LBB2_2:
71; X86-NEXT:    # kill: def $ax killed $ax killed $eax
72; X86-NEXT:    retl
73;
74; X64-LABEL: xor_i16_ri:
75; X64:       # %bb.0:
76; X64-NEXT:    movl %edi, %eax
77; X64-NEXT:    xorl $65519, %eax # imm = 0xFFEF
78; X64-NEXT:    cmovel %edi, %eax
79; X64-NEXT:    # kill: def $ax killed $ax killed $eax
80; X64-NEXT:    retq
81  %3 = xor i16 %0, -17
82  %4 = icmp eq i16 %3, 0
83  %5 = select i1 %4, i16 %0, i16 %3
84  ret i16 %5
85}
86
87define i16 @xor_i16_rr(i16 zeroext %0, i16 zeroext %1) {
88; X86-LABEL: xor_i16_rr:
89; X86:       # %bb.0:
90; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
91; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
92; X86-NEXT:    xorw %ax, %cx
93; X86-NEXT:    je .LBB3_2
94; X86-NEXT:  # %bb.1:
95; X86-NEXT:    movl %ecx, %eax
96; X86-NEXT:  .LBB3_2:
97; X86-NEXT:    # kill: def $ax killed $ax killed $eax
98; X86-NEXT:    retl
99;
100; X64-LABEL: xor_i16_rr:
101; X64:       # %bb.0:
102; X64-NEXT:    movl %esi, %eax
103; X64-NEXT:    xorl %edi, %eax
104; X64-NEXT:    cmovel %edi, %eax
105; X64-NEXT:    # kill: def $ax killed $ax killed $eax
106; X64-NEXT:    retq
107  %3 = xor i16 %1, %0
108  %4 = icmp eq i16 %3, 0
109  %5 = select i1 %4, i16 %0, i16 %3
110  ret i16 %5
111}
112
113define i32 @xor_i32_ri(i32 %0, i32 %1) {
114; X86-LABEL: xor_i32_ri:
115; X86:       # %bb.0:
116; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
117; X86-NEXT:    movl %eax, %ecx
118; X86-NEXT:    xorl $-17, %ecx
119; X86-NEXT:    jle .LBB4_2
120; X86-NEXT:  # %bb.1:
121; X86-NEXT:    movl %ecx, %eax
122; X86-NEXT:  .LBB4_2:
123; X86-NEXT:    retl
124;
125; X64-LABEL: xor_i32_ri:
126; X64:       # %bb.0:
127; X64-NEXT:    movl %edi, %eax
128; X64-NEXT:    xorl $-17, %eax
129; X64-NEXT:    cmovlel %edi, %eax
130; X64-NEXT:    retq
131  %3 = xor i32 %0, -17
132  %4 = icmp slt i32 %3, 1
133  %5 = select i1 %4, i32 %0, i32 %3
134  ret i32 %5
135}
136
137define i32 @xor_i32_rr(i32 %0, i32 %1) {
138; X86-LABEL: xor_i32_rr:
139; X86:       # %bb.0:
140; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
141; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
142; X86-NEXT:    xorl %eax, %ecx
143; X86-NEXT:    jle .LBB5_2
144; X86-NEXT:  # %bb.1:
145; X86-NEXT:    movl %ecx, %eax
146; X86-NEXT:  .LBB5_2:
147; X86-NEXT:    retl
148;
149; X64-LABEL: xor_i32_rr:
150; X64:       # %bb.0:
151; X64-NEXT:    movl %esi, %eax
152; X64-NEXT:    xorl %edi, %eax
153; X64-NEXT:    cmovlel %edi, %eax
154; X64-NEXT:    retq
155  %3 = xor i32 %1, %0
156  %4 = icmp slt i32 %3, 1
157  %5 = select i1 %4, i32 %0, i32 %3
158  ret i32 %5
159}
160
161define i64 @xor_i64_ri(i64 %0, i64 %1) nounwind {
162; X86-LABEL: xor_i64_ri:
163; X86:       # %bb.0:
164; X86-NEXT:    pushl %edi
165; X86-NEXT:    pushl %esi
166; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
167; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
168; X86-NEXT:    movl %edx, %ecx
169; X86-NEXT:    notl %ecx
170; X86-NEXT:    movl %eax, %esi
171; X86-NEXT:    xorl $-17, %esi
172; X86-NEXT:    cmpl $1, %esi
173; X86-NEXT:    movl %ecx, %edi
174; X86-NEXT:    sbbl $0, %edi
175; X86-NEXT:    jl .LBB6_2
176; X86-NEXT:  # %bb.1:
177; X86-NEXT:    movl %esi, %eax
178; X86-NEXT:    movl %ecx, %edx
179; X86-NEXT:  .LBB6_2:
180; X86-NEXT:    popl %esi
181; X86-NEXT:    popl %edi
182; X86-NEXT:    retl
183;
184; X64-LABEL: xor_i64_ri:
185; X64:       # %bb.0:
186; X64-NEXT:    movq %rdi, %rax
187; X64-NEXT:    xorq $-17, %rax
188; X64-NEXT:    cmovleq %rdi, %rax
189; X64-NEXT:    retq
190  %3 = xor i64 %0, -17
191  %4 = icmp slt i64 %3, 1
192  %5 = select i1 %4, i64 %0, i64 %3
193  ret i64 %5
194}
195
196define i64 @xor_i64_rr(i64 %0, i64 %1) nounwind {
197; X86-LABEL: xor_i64_rr:
198; X86:       # %bb.0:
199; X86-NEXT:    pushl %edi
200; X86-NEXT:    pushl %esi
201; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
202; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
203; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
204; X86-NEXT:    xorl %edx, %ecx
205; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
206; X86-NEXT:    xorl %eax, %esi
207; X86-NEXT:    cmpl $1, %esi
208; X86-NEXT:    movl %ecx, %edi
209; X86-NEXT:    sbbl $0, %edi
210; X86-NEXT:    jl .LBB7_2
211; X86-NEXT:  # %bb.1:
212; X86-NEXT:    movl %esi, %eax
213; X86-NEXT:    movl %ecx, %edx
214; X86-NEXT:  .LBB7_2:
215; X86-NEXT:    popl %esi
216; X86-NEXT:    popl %edi
217; X86-NEXT:    retl
218;
219; X64-LABEL: xor_i64_rr:
220; X64:       # %bb.0:
221; X64-NEXT:    movq %rsi, %rax
222; X64-NEXT:    xorq %rdi, %rax
223; X64-NEXT:    cmovleq %rdi, %rax
224; X64-NEXT:    retq
225  %3 = xor i64 %1, %0
226  %4 = icmp slt i64 %3, 1
227  %5 = select i1 %4, i64 %0, i64 %3
228  ret i64 %5
229}
230