xref: /llvm-project/llvm/test/CodeGen/X86/pr108731.ll (revision 49ebe329052db8c1376ec6714315673af7418248)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,NOBMI
3; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,BMI
4
5define i64 @test_i64(i64 %w, i64 %x, i64 %y, i64 %z) {
6; NOBMI-LABEL: test_i64:
7; NOBMI:       # %bb.0: # %Entry
8; NOBMI-NEXT:    movq %rcx, %rax
9; NOBMI-NEXT:    andq %rdx, %rsi
10; NOBMI-NEXT:    notq %rsi
11; NOBMI-NEXT:    andq %rdi, %rsi
12; NOBMI-NEXT:    notq %rax
13; NOBMI-NEXT:    orq %rdx, %rax
14; NOBMI-NEXT:    andq %rsi, %rax
15; NOBMI-NEXT:    retq
16;
17; BMI-LABEL: test_i64:
18; BMI:       # %bb.0: # %Entry
19; BMI-NEXT:    andq %rdx, %rsi
20; BMI-NEXT:    andnq %rdi, %rsi, %rax
21; BMI-NEXT:    andnq %rcx, %rdx, %rcx
22; BMI-NEXT:    andnq %rax, %rcx, %rax
23; BMI-NEXT:    retq
24Entry:
25  %and1 = and i64 %y, %x
26  %xor1 = xor i64 %and1, -1
27  %and2 = and i64 %xor1, %w
28  %.not = xor i64 %z, -1
29  %or1 = or i64 %.not, %y
30  %and3 = and i64 %and2, %or1
31  ret i64 %and3
32}
33
34define i32 @test_i32(i32 %w, i32 %x, i32 %y, i32 %z) {
35; NOBMI-LABEL: test_i32:
36; NOBMI:       # %bb.0: # %Entry
37; NOBMI-NEXT:    movl %ecx, %eax
38; NOBMI-NEXT:    andl %edx, %esi
39; NOBMI-NEXT:    notl %esi
40; NOBMI-NEXT:    andl %edi, %esi
41; NOBMI-NEXT:    notl %eax
42; NOBMI-NEXT:    orl %edx, %eax
43; NOBMI-NEXT:    andl %esi, %eax
44; NOBMI-NEXT:    retq
45;
46; BMI-LABEL: test_i32:
47; BMI:       # %bb.0: # %Entry
48; BMI-NEXT:    andl %edx, %esi
49; BMI-NEXT:    andnl %edi, %esi, %eax
50; BMI-NEXT:    andnl %ecx, %edx, %ecx
51; BMI-NEXT:    andnl %eax, %ecx, %eax
52; BMI-NEXT:    retq
53Entry:
54  %and1 = and i32 %y, %x
55  %xor1 = xor i32 %and1, -1
56  %and2 = and i32 %xor1, %w
57  %.not = xor i32 %z, -1
58  %or1 = or i32 %.not, %y
59  %and3 = and i32 %and2, %or1
60  ret i32 %and3
61}
62
63define i16 @test_i16(i16 %w, i16 %x, i16 %y, i16 %z) {
64; NOBMI-LABEL: test_i16:
65; NOBMI:       # %bb.0: # %Entry
66; NOBMI-NEXT:    movl %ecx, %eax
67; NOBMI-NEXT:    andl %edx, %esi
68; NOBMI-NEXT:    notl %esi
69; NOBMI-NEXT:    andl %edi, %esi
70; NOBMI-NEXT:    notl %eax
71; NOBMI-NEXT:    orl %edx, %eax
72; NOBMI-NEXT:    andl %esi, %eax
73; NOBMI-NEXT:    # kill: def $ax killed $ax killed $eax
74; NOBMI-NEXT:    retq
75;
76; BMI-LABEL: test_i16:
77; BMI:       # %bb.0: # %Entry
78; BMI-NEXT:    andl %edx, %esi
79; BMI-NEXT:    andnl %edi, %esi, %eax
80; BMI-NEXT:    notl %ecx
81; BMI-NEXT:    orl %edx, %ecx
82; BMI-NEXT:    andl %ecx, %eax
83; BMI-NEXT:    # kill: def $ax killed $ax killed $eax
84; BMI-NEXT:    retq
85Entry:
86  %and1 = and i16 %y, %x
87  %xor1 = xor i16 %and1, -1
88  %and2 = and i16 %xor1, %w
89  %.not = xor i16 %z, -1
90  %or1 = or i16 %.not, %y
91  %and3 = and i16 %and2, %or1
92  ret i16 %and3
93}
94
95define i8 @test_i8(i8 %w, i8 %x, i8 %y, i8 %z) {
96; CHECK-LABEL: test_i8:
97; CHECK:       # %bb.0: # %Entry
98; CHECK-NEXT:    movl %edx, %eax
99; CHECK-NEXT:    andl %edx, %esi
100; CHECK-NEXT:    notb %sil
101; CHECK-NEXT:    andb %dil, %sil
102; CHECK-NEXT:    notb %cl
103; CHECK-NEXT:    orb %cl, %al
104; CHECK-NEXT:    andb %sil, %al
105; CHECK-NEXT:    # kill: def $al killed $al killed $eax
106; CHECK-NEXT:    retq
107Entry:
108  %and1 = and i8 %y, %x
109  %xor1 = xor i8 %and1, -1
110  %and2 = and i8 %xor1, %w
111  %.not = xor i8 %z, -1
112  %or1 = or i8 %.not, %y
113  %and3 = and i8 %and2, %or1
114  ret i8 %and3
115}
116
117define <16 x i8> @test_v16i8(<16 x i8> %w, <16 x i8> %x, <16 x i8> %y, <16 x i8> %z) {
118; NOBMI-LABEL: test_v16i8:
119; NOBMI:       # %bb.0: # %Entry
120; NOBMI-NEXT:    andps %xmm2, %xmm1
121; NOBMI-NEXT:    andnps %xmm0, %xmm1
122; NOBMI-NEXT:    andnps %xmm3, %xmm2
123; NOBMI-NEXT:    andnps %xmm1, %xmm2
124; NOBMI-NEXT:    movaps %xmm2, %xmm0
125; NOBMI-NEXT:    retq
126;
127; BMI-LABEL: test_v16i8:
128; BMI:       # %bb.0: # %Entry
129; BMI-NEXT:    vandps %xmm1, %xmm2, %xmm1
130; BMI-NEXT:    vandnps %xmm0, %xmm1, %xmm0
131; BMI-NEXT:    vandnps %xmm3, %xmm2, %xmm1
132; BMI-NEXT:    vandnps %xmm0, %xmm1, %xmm0
133; BMI-NEXT:    retq
134Entry:
135  %and1 = and <16 x i8> %y, %x
136  %xor1 = xor <16 x i8> %and1, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
137  %and2 = and <16 x i8> %xor1, %w
138  %.not = xor <16 x i8> %z, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
139  %or1 = or <16 x i8> %.not, %y
140  %and3 = and <16 x i8> %and2, %or1
141  ret <16 x i8> %and3
142}
143
144define <32 x i8> @test_v32i8(<32 x i8> %w, <32 x i8> %x, <32 x i8> %y, <32 x i8> %z) {
145; NOBMI-LABEL: test_v32i8:
146; NOBMI:       # %bb.0: # %Entry
147; NOBMI-NEXT:    andps %xmm4, %xmm2
148; NOBMI-NEXT:    andps %xmm5, %xmm3
149; NOBMI-NEXT:    andnps %xmm1, %xmm3
150; NOBMI-NEXT:    andnps %xmm0, %xmm2
151; NOBMI-NEXT:    andnps %xmm6, %xmm4
152; NOBMI-NEXT:    andnps %xmm2, %xmm4
153; NOBMI-NEXT:    andnps %xmm7, %xmm5
154; NOBMI-NEXT:    andnps %xmm3, %xmm5
155; NOBMI-NEXT:    movaps %xmm4, %xmm0
156; NOBMI-NEXT:    movaps %xmm5, %xmm1
157; NOBMI-NEXT:    retq
158;
159; BMI-LABEL: test_v32i8:
160; BMI:       # %bb.0: # %Entry
161; BMI-NEXT:    vandps %ymm1, %ymm2, %ymm1
162; BMI-NEXT:    vandnps %ymm0, %ymm1, %ymm0
163; BMI-NEXT:    vandnps %ymm3, %ymm2, %ymm1
164; BMI-NEXT:    vandnps %ymm0, %ymm1, %ymm0
165; BMI-NEXT:    retq
166Entry:
167  %and1 = and <32 x i8> %y, %x
168  %xor1 = xor <32 x i8> %and1, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
169  %and2 = and <32 x i8> %xor1, %w
170  %.not = xor <32 x i8> %z, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
171  %or1 = or <32 x i8> %.not, %y
172  %and3 = and <32 x i8> %and2, %or1
173  ret <32 x i8> %and3
174}
175
176; PR112347 - don't fold if we'd be inverting a constant, as demorgan normalisation will invert it back again.
177define void @PR112347(ptr %p0, ptr %p1, ptr %p2) {
178; CHECK-LABEL: PR112347:
179; CHECK:       # %bb.0:
180; CHECK-NEXT:    movl (%rdi), %eax
181; CHECK-NEXT:    notl %eax
182; CHECK-NEXT:    orl $-16777204, %eax # imm = 0xFF00000C
183; CHECK-NEXT:    andl (%rsi), %eax
184; CHECK-NEXT:    movl %eax, (%rdx)
185; CHECK-NEXT:    retq
186  %load0 = load i32, ptr %p0, align 1
187  %load1 = load i32, ptr %p1, align 4
188  %not = xor i32 %load0, -1
189  %top = or i32 %not, -16777204
190  %mask = and i32 %load1, %top
191  store i32 %mask, ptr %p2, align 4
192  ret void
193}
194
195define void @PR113240(i64 %a) {
196; CHECK-LABEL: PR113240:
197; CHECK:       # %bb.0: # %entry
198; CHECK-NEXT:    movq %rdi, %rax
199; CHECK-NEXT:    notq %rax
200; CHECK-NEXT:    movabsq $8796093022206, %rcx # imm = 0x7FFFFFFFFFE
201; CHECK-NEXT:    notq %rcx
202; CHECK-NEXT:    orq %rax, %rcx
203; CHECK-NEXT:    andq %rdi, %rcx
204; CHECK-NEXT:    movq %rcx, 0
205; CHECK-NEXT:    retq
206entry:
207  %and = and i64 %a, 8796093022206
208  %bf.value = and i64 8796093022206, 0
209  %not = xor i64 %and, -1
210  %and4 = and i64 %a, %not
211  store i64 %and4, ptr null, align 8
212  ret void
213}
214
215