xref: /llvm-project/llvm/test/CodeGen/X86/atomic-xor.ll (revision e6bf48d11047e970cb24554a01b65b566d6b5d22)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
4
5define void @xor32_signbit_unused(ptr %p) nounwind {
6; X86-LABEL: xor32_signbit_unused:
7; X86:       # %bb.0:
8; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
9; X86-NEXT:    lock xorl $-2147483648, (%eax) # imm = 0x80000000
10; X86-NEXT:    retl
11;
12; X64-LABEL: xor32_signbit_unused:
13; X64:       # %bb.0:
14; X64-NEXT:    lock xorl $-2147483648, (%rdi) # imm = 0x80000000
15; X64-NEXT:    retq
16  %r = atomicrmw xor ptr %p, i32 2147483648 monotonic
17  ret void
18}
19
20define i128 @xor128_signbit_used(ptr %p) nounwind {
21; X86-LABEL: xor128_signbit_used:
22; X86:       # %bb.0:
23; X86-NEXT:    pushl %ebp
24; X86-NEXT:    movl %esp, %ebp
25; X86-NEXT:    pushl %ebx
26; X86-NEXT:    pushl %edi
27; X86-NEXT:    pushl %esi
28; X86-NEXT:    andl $-16, %esp
29; X86-NEXT:    subl $48, %esp
30; X86-NEXT:    movl 12(%ebp), %edi
31; X86-NEXT:    movl 12(%edi), %ecx
32; X86-NEXT:    movl 8(%edi), %edx
33; X86-NEXT:    movl (%edi), %ebx
34; X86-NEXT:    movl 4(%edi), %esi
35; X86-NEXT:    .p2align 4
36; X86-NEXT:  .LBB1_1: # %atomicrmw.start
37; X86-NEXT:    # =>This Inner Loop Header: Depth=1
38; X86-NEXT:    movl %ebx, (%esp)
39; X86-NEXT:    movl %esi, {{[0-9]+}}(%esp)
40; X86-NEXT:    movl %edx, {{[0-9]+}}(%esp)
41; X86-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
42; X86-NEXT:    addl $-2147483648, %ecx # imm = 0x80000000
43; X86-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
44; X86-NEXT:    movl %edx, {{[0-9]+}}(%esp)
45; X86-NEXT:    movl %esi, {{[0-9]+}}(%esp)
46; X86-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
47; X86-NEXT:    pushl $0
48; X86-NEXT:    pushl $0
49; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
50; X86-NEXT:    pushl %eax
51; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
52; X86-NEXT:    pushl %eax
53; X86-NEXT:    pushl %edi
54; X86-NEXT:    pushl $16
55; X86-NEXT:    calll __atomic_compare_exchange@PLT
56; X86-NEXT:    addl $24, %esp
57; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
58; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
59; X86-NEXT:    movl (%esp), %ebx
60; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
61; X86-NEXT:    testb %al, %al
62; X86-NEXT:    je .LBB1_1
63; X86-NEXT:  # %bb.2: # %atomicrmw.end
64; X86-NEXT:    movl 8(%ebp), %eax
65; X86-NEXT:    movl %ebx, (%eax)
66; X86-NEXT:    movl %esi, 4(%eax)
67; X86-NEXT:    movl %edx, 8(%eax)
68; X86-NEXT:    movl %ecx, 12(%eax)
69; X86-NEXT:    leal -12(%ebp), %esp
70; X86-NEXT:    popl %esi
71; X86-NEXT:    popl %edi
72; X86-NEXT:    popl %ebx
73; X86-NEXT:    popl %ebp
74; X86-NEXT:    retl $4
75;
76; X64-LABEL: xor128_signbit_used:
77; X64:       # %bb.0:
78; X64-NEXT:    pushq %rax
79; X64-NEXT:    movabsq $-9223372036854775808, %rdx # imm = 0x8000000000000000
80; X64-NEXT:    xorl %esi, %esi
81; X64-NEXT:    xorl %ecx, %ecx
82; X64-NEXT:    callq __atomic_fetch_xor_16@PLT
83; X64-NEXT:    popq %rcx
84; X64-NEXT:    retq
85  %r = atomicrmw xor ptr %p, i128 170141183460469231731687303715884105728 monotonic
86  ret i128 %r
87}
88
89define i64 @xor64_signbit_used(ptr %p) nounwind {
90; X86-LABEL: xor64_signbit_used:
91; X86:       # %bb.0:
92; X86-NEXT:    pushl %ebx
93; X86-NEXT:    pushl %esi
94; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
95; X86-NEXT:    movl (%esi), %eax
96; X86-NEXT:    movl 4(%esi), %edx
97; X86-NEXT:    .p2align 4
98; X86-NEXT:  .LBB2_1: # %atomicrmw.start
99; X86-NEXT:    # =>This Inner Loop Header: Depth=1
100; X86-NEXT:    leal -2147483648(%edx), %ecx
101; X86-NEXT:    movl %eax, %ebx
102; X86-NEXT:    lock cmpxchg8b (%esi)
103; X86-NEXT:    jne .LBB2_1
104; X86-NEXT:  # %bb.2: # %atomicrmw.end
105; X86-NEXT:    popl %esi
106; X86-NEXT:    popl %ebx
107; X86-NEXT:    retl
108;
109; X64-LABEL: xor64_signbit_used:
110; X64:       # %bb.0:
111; X64-NEXT:    movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000
112; X64-NEXT:    lock xaddq %rax, (%rdi)
113; X64-NEXT:    retq
114  %r = atomicrmw xor ptr %p, i64 9223372036854775808 monotonic
115  ret i64 %r
116}
117
118define i32 @xor32_signbit_used(ptr %p) nounwind {
119; X86-LABEL: xor32_signbit_used:
120; X86:       # %bb.0:
121; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
122; X86-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
123; X86-NEXT:    lock xaddl %eax, (%ecx)
124; X86-NEXT:    retl
125;
126; X64-LABEL: xor32_signbit_used:
127; X64:       # %bb.0:
128; X64-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
129; X64-NEXT:    lock xaddl %eax, (%rdi)
130; X64-NEXT:    retq
131  %r = atomicrmw xor ptr %p, i32 2147483648 monotonic
132  ret i32 %r
133}
134
135define i16 @xor16_signbit_used(ptr %p) nounwind {
136; X86-LABEL: xor16_signbit_used:
137; X86:       # %bb.0:
138; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
139; X86-NEXT:    movw $-32768, %ax # imm = 0x8000
140; X86-NEXT:    lock xaddw %ax, (%ecx)
141; X86-NEXT:    retl
142;
143; X64-LABEL: xor16_signbit_used:
144; X64:       # %bb.0:
145; X64-NEXT:    movw $-32768, %ax # imm = 0x8000
146; X64-NEXT:    lock xaddw %ax, (%rdi)
147; X64-NEXT:    retq
148  %r = atomicrmw xor ptr %p, i16 32768 monotonic
149  ret i16 %r
150}
151
152define i8 @xor8_signbit_used(ptr %p) nounwind {
153; X86-LABEL: xor8_signbit_used:
154; X86:       # %bb.0:
155; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
156; X86-NEXT:    movb $-128, %al
157; X86-NEXT:    lock xaddb %al, (%ecx)
158; X86-NEXT:    retl
159;
160; X64-LABEL: xor8_signbit_used:
161; X64:       # %bb.0:
162; X64-NEXT:    movb $-128, %al
163; X64-NEXT:    lock xaddb %al, (%rdi)
164; X64-NEXT:    retq
165  %r = atomicrmw xor ptr %p, i8 128 monotonic
166  ret i8 %r
167}
168
169define i32 @xor32_not_signbit_used(ptr %p) nounwind {
170; X86-LABEL: xor32_not_signbit_used:
171; X86:       # %bb.0:
172; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
173; X86-NEXT:    movl (%ecx), %eax
174; X86-NEXT:    .p2align 4
175; X86-NEXT:  .LBB6_1: # %atomicrmw.start
176; X86-NEXT:    # =>This Inner Loop Header: Depth=1
177; X86-NEXT:    movl %eax, %edx
178; X86-NEXT:    xorl $-2147483647, %edx # imm = 0x80000001
179; X86-NEXT:    lock cmpxchgl %edx, (%ecx)
180; X86-NEXT:    jne .LBB6_1
181; X86-NEXT:  # %bb.2: # %atomicrmw.end
182; X86-NEXT:    retl
183;
184; X64-LABEL: xor32_not_signbit_used:
185; X64:       # %bb.0:
186; X64-NEXT:    movl (%rdi), %eax
187; X64-NEXT:    .p2align 4
188; X64-NEXT:  .LBB6_1: # %atomicrmw.start
189; X64-NEXT:    # =>This Inner Loop Header: Depth=1
190; X64-NEXT:    movl %eax, %ecx
191; X64-NEXT:    xorl $-2147483647, %ecx # imm = 0x80000001
192; X64-NEXT:    lock cmpxchgl %ecx, (%rdi)
193; X64-NEXT:    jne .LBB6_1
194; X64-NEXT:  # %bb.2: # %atomicrmw.end
195; X64-NEXT:    retq
196  %r = atomicrmw xor ptr %p, i32 2147483649 monotonic
197  ret i32 %r
198}
199