xref: /llvm-project/llvm/test/CodeGen/X86/xor-icmp.ll (revision ee5585ed09aff2e54cb540fad4c33f0c93626b1b)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown   | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
4; rdar://7367229
5
6define i32 @t(i32 %a, i32 %b) nounwind ssp {
7; X86-LABEL: t:
8; X86:       # %bb.0: # %entry
9; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
10; X86-NEXT:    xorb {{[0-9]+}}(%esp), %al
11; X86-NEXT:    testb $64, %al
12; X86-NEXT:    jne bar # TAILCALL
13; X86-NEXT:  # %bb.1: # %bb
14; X86-NEXT:    jmp foo # TAILCALL
15;
16; X64-LABEL: t:
17; X64:       # %bb.0: # %entry
18; X64-NEXT:    xorl %esi, %edi
19; X64-NEXT:    xorl %eax, %eax
20; X64-NEXT:    testl $16384, %edi # imm = 0x4000
21; X64-NEXT:    jne bar # TAILCALL
22; X64-NEXT:  # %bb.1: # %bb
23; X64-NEXT:    jmp foo # TAILCALL
24entry:
25  %0 = and i32 %a, 16384
26  %1 = icmp ne i32 %0, 0
27  %2 = and i32 %b, 16384
28  %3 = icmp ne i32 %2, 0
29  %4 = xor i1 %1, %3
30  br i1 %4, label %bb1, label %bb
31
32bb:                                               ; preds = %entry
33  %5 = tail call i32 (...) @foo() nounwind       ; <i32> [#uses=1]
34  ret i32 %5
35
36bb1:                                              ; preds = %entry
37  %6 = tail call i32 (...) @bar() nounwind       ; <i32> [#uses=1]
38  ret i32 %6
39}
40
41declare dso_local i32 @foo(...)
42
43declare dso_local i32 @bar(...)
44
45define i32 @t2(i32 %x, i32 %y) nounwind ssp {
46; X86-LABEL: t2:
47; X86:       # %bb.0: # %entry
48; X86-NEXT:    cmpl $0, {{[0-9]+}}(%esp)
49; X86-NEXT:    sete %al
50; X86-NEXT:    cmpl $0, {{[0-9]+}}(%esp)
51; X86-NEXT:    sete %cl
52; X86-NEXT:    cmpb %al, %cl
53; X86-NEXT:    jne foo # TAILCALL
54; X86-NEXT:  # %bb.1: # %return
55; X86-NEXT:    retl
56;
57; X64-LABEL: t2:
58; X64:       # %bb.0: # %entry
59; X64-NEXT:    testl %edi, %edi
60; X64-NEXT:    sete %al
61; X64-NEXT:    testl %esi, %esi
62; X64-NEXT:    sete %cl
63; X64-NEXT:    cmpb %al, %cl
64; X64-NEXT:    je .LBB1_1
65; X64-NEXT:  # %bb.2: # %bb
66; X64-NEXT:    xorl %eax, %eax
67; X64-NEXT:    jmp foo # TAILCALL
68; X64-NEXT:  .LBB1_1: # %return
69; X64-NEXT:    retq
70
71entry:
72  %0 = icmp eq i32 %x, 0                          ; <i1> [#uses=1]
73  %1 = icmp eq i32 %y, 0                          ; <i1> [#uses=1]
74  %2 = xor i1 %1, %0                              ; <i1> [#uses=1]
75  br i1 %2, label %bb, label %return
76
77bb:                                               ; preds = %entry
78  %3 = tail call i32 (...) @foo() nounwind       ; <i32> [#uses=0]
79  ret i32 undef
80
81return:                                           ; preds = %entry
82  ret i32 undef
83}
84
85; PR45703
86; https://bugs.llvm.org/show_bug.cgi?id=45703
87
88define i1 @xor_not_bools(i1 zeroext %x, i1 zeroext %y) nounwind {
89; X86-LABEL: xor_not_bools:
90; X86:       # %bb.0:
91; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
92; X86-NEXT:    xorb {{[0-9]+}}(%esp), %al
93; X86-NEXT:    xorb $1, %al
94; X86-NEXT:    retl
95;
96; X64-LABEL: xor_not_bools:
97; X64:       # %bb.0:
98; X64-NEXT:    movl %edi, %eax
99; X64-NEXT:    xorl %esi, %eax
100; X64-NEXT:    xorb $1, %al
101; X64-NEXT:    # kill: def $al killed $al killed $eax
102; X64-NEXT:    retq
103  %xor = xor i1 %x, %y
104  %not = xor i1 %xor, true
105  ret i1 %not
106}
107
108; This is probably not canonical IR; just testing another possible pattern.
109
110define zeroext i1 @xor_not_cmps(i32 %x, i32 %y) nounwind {
111; X86-LABEL: xor_not_cmps:
112; X86:       # %bb.0:
113; X86-NEXT:    cmpl $42, {{[0-9]+}}(%esp)
114; X86-NEXT:    setne %cl
115; X86-NEXT:    cmpl $235, {{[0-9]+}}(%esp)
116; X86-NEXT:    sete %al
117; X86-NEXT:    xorb %cl, %al
118; X86-NEXT:    xorb $1, %al
119; X86-NEXT:    retl
120;
121; X64-LABEL: xor_not_cmps:
122; X64:       # %bb.0:
123; X64-NEXT:    cmpl $42, %edi
124; X64-NEXT:    setne %cl
125; X64-NEXT:    cmpl $235, %esi
126; X64-NEXT:    sete %al
127; X64-NEXT:    xorb %cl, %al
128; X64-NEXT:    xorb $1, %al
129; X64-NEXT:    retq
130  %cmpx = icmp ne i32 %x, 42
131  %cmpy = icmp eq i32 %y, 235
132  %xor = xor i1 %cmpx, %cmpy
133  %not = xor i1 %xor, 1
134  ret i1 %not
135}
136
137define zeroext i1 @xor_not_cmps_extra_use(i32 %x, i32 %y, ptr %p) nounwind {
138; X86-LABEL: xor_not_cmps_extra_use:
139; X86:       # %bb.0:
140; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
141; X86-NEXT:    cmpl $42, {{[0-9]+}}(%esp)
142; X86-NEXT:    setne %dl
143; X86-NEXT:    cmpl $235, {{[0-9]+}}(%esp)
144; X86-NEXT:    sete %al
145; X86-NEXT:    xorb %dl, %al
146; X86-NEXT:    movzbl %al, %edx
147; X86-NEXT:    movl %edx, (%ecx)
148; X86-NEXT:    xorb $1, %al
149; X86-NEXT:    retl
150;
151; X64-LABEL: xor_not_cmps_extra_use:
152; X64:       # %bb.0:
153; X64-NEXT:    cmpl $42, %edi
154; X64-NEXT:    setne %al
155; X64-NEXT:    cmpl $235, %esi
156; X64-NEXT:    sete %cl
157; X64-NEXT:    xorb %al, %cl
158; X64-NEXT:    movzbl %cl, %eax
159; X64-NEXT:    movl %eax, (%rdx)
160; X64-NEXT:    xorb $1, %al
161; X64-NEXT:    # kill: def $al killed $al killed $eax
162; X64-NEXT:    retq
163  %cmpx = icmp ne i32 %x, 42
164  %cmpy = icmp eq i32 %y, 235
165  %xor = xor i1 %cmpx, %cmpy
166  %z = zext i1 %xor to i32
167  store i32 %z, ptr %p
168  %not = xor i1 %xor, 1
169  ret i1 %not
170}
171