xref: /llvm-project/llvm/test/CodeGen/X86/atomic64.ll (revision fa46f1ac3b02bcd786bcaa947f8c4f14ea652f7a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -O0 -mtriple=x86_64-- -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X64
3; RUN: llc < %s -O0 -mtriple=i386-- -mcpu=i486 -verify-machineinstrs | FileCheck %s --check-prefix I486
4
5@sc64 = external dso_local global i64
6@fsc64 = external dso_local global double
7@psc64 = external dso_local global ptr
8
9define void @atomic_fetch_add64() nounwind {
10; X64-LABEL: atomic_fetch_add64:
11; X64:       # %bb.0: # %entry
12; X64-NEXT:    lock incq sc64(%rip)
13; X64-NEXT:    lock addq $3, sc64(%rip)
14; X64-NEXT:    movl $5, %eax
15; X64-NEXT:    lock xaddq %rax, sc64(%rip)
16; X64-NEXT:    lock addq %rax, sc64(%rip)
17; X64-NEXT:    retq
18;
19; I486-LABEL: atomic_fetch_add64:
20; I486:       # %bb.0: # %entry
21; I486-NEXT:    subl $16, %esp
22; I486-NEXT:    movl %esp, %eax
23; I486-NEXT:    movl $2, 12(%eax)
24; I486-NEXT:    movl $0, 8(%eax)
25; I486-NEXT:    movl $1, 4(%eax)
26; I486-NEXT:    movl $sc64, (%eax)
27; I486-NEXT:    calll __atomic_fetch_add_8@PLT
28; I486-NEXT:    movl %esp, %eax
29; I486-NEXT:    movl $2, 12(%eax)
30; I486-NEXT:    movl $0, 8(%eax)
31; I486-NEXT:    movl $3, 4(%eax)
32; I486-NEXT:    movl $sc64, (%eax)
33; I486-NEXT:    calll __atomic_fetch_add_8@PLT
34; I486-NEXT:    movl %esp, %eax
35; I486-NEXT:    movl $2, 12(%eax)
36; I486-NEXT:    movl $0, 8(%eax)
37; I486-NEXT:    movl $5, 4(%eax)
38; I486-NEXT:    movl $sc64, (%eax)
39; I486-NEXT:    calll __atomic_fetch_add_8@PLT
40; I486-NEXT:    movl %eax, %ecx
41; I486-NEXT:    movl %esp, %eax
42; I486-NEXT:    movl %edx, 8(%eax)
43; I486-NEXT:    movl %ecx, 4(%eax)
44; I486-NEXT:    movl $2, 12(%eax)
45; I486-NEXT:    movl $sc64, (%eax)
46; I486-NEXT:    calll __atomic_fetch_add_8@PLT
47; I486-NEXT:    addl $16, %esp
48; I486-NEXT:    retl
49entry:
50  %t1 = atomicrmw add  ptr @sc64, i64 1 acquire
51  %t2 = atomicrmw add  ptr @sc64, i64 3 acquire
52  %t3 = atomicrmw add  ptr @sc64, i64 5 acquire
53  %t4 = atomicrmw add  ptr @sc64, i64 %t3 acquire
54  ret void
55}
56
57define void @atomic_fetch_sub64() nounwind {
58; X64-LABEL: atomic_fetch_sub64:
59; X64:       # %bb.0:
60; X64-NEXT:    lock decq sc64(%rip)
61; X64-NEXT:    lock subq $3, sc64(%rip)
62; X64-NEXT:    movq $-5, %rax
63; X64-NEXT:    lock xaddq %rax, sc64(%rip)
64; X64-NEXT:    lock subq %rax, sc64(%rip)
65; X64-NEXT:    retq
66;
67; I486-LABEL: atomic_fetch_sub64:
68; I486:       # %bb.0:
69; I486-NEXT:    subl $16, %esp
70; I486-NEXT:    movl %esp, %eax
71; I486-NEXT:    movl $2, 12(%eax)
72; I486-NEXT:    movl $0, 8(%eax)
73; I486-NEXT:    movl $1, 4(%eax)
74; I486-NEXT:    movl $sc64, (%eax)
75; I486-NEXT:    calll __atomic_fetch_sub_8@PLT
76; I486-NEXT:    movl %esp, %eax
77; I486-NEXT:    movl $2, 12(%eax)
78; I486-NEXT:    movl $0, 8(%eax)
79; I486-NEXT:    movl $3, 4(%eax)
80; I486-NEXT:    movl $sc64, (%eax)
81; I486-NEXT:    calll __atomic_fetch_sub_8@PLT
82; I486-NEXT:    movl %esp, %eax
83; I486-NEXT:    movl $2, 12(%eax)
84; I486-NEXT:    movl $0, 8(%eax)
85; I486-NEXT:    movl $5, 4(%eax)
86; I486-NEXT:    movl $sc64, (%eax)
87; I486-NEXT:    calll __atomic_fetch_sub_8@PLT
88; I486-NEXT:    movl %eax, %ecx
89; I486-NEXT:    movl %esp, %eax
90; I486-NEXT:    movl %edx, 8(%eax)
91; I486-NEXT:    movl %ecx, 4(%eax)
92; I486-NEXT:    movl $2, 12(%eax)
93; I486-NEXT:    movl $sc64, (%eax)
94; I486-NEXT:    calll __atomic_fetch_sub_8@PLT
95; I486-NEXT:    addl $16, %esp
96; I486-NEXT:    retl
97  %t1 = atomicrmw sub  ptr @sc64, i64 1 acquire
98  %t2 = atomicrmw sub  ptr @sc64, i64 3 acquire
99  %t3 = atomicrmw sub  ptr @sc64, i64 5 acquire
100  %t4 = atomicrmw sub  ptr @sc64, i64 %t3 acquire
101  ret void
102}
103
104define void @atomic_fetch_and64() nounwind {
105; X64-LABEL: atomic_fetch_and64:
106; X64:       # %bb.0:
107; X64-NEXT:    lock andq $3, sc64(%rip)
108; X64-NEXT:    movq sc64, %rax
109; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
110; X64-NEXT:  .LBB2_1: # %atomicrmw.start
111; X64-NEXT:    # =>This Inner Loop Header: Depth=1
112; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
113; X64-NEXT:    movl %eax, %ecx
114; X64-NEXT:    andl $5, %ecx
115; X64-NEXT:    # kill: def $rcx killed $ecx
116; X64-NEXT:    lock cmpxchgq %rcx, sc64(%rip)
117; X64-NEXT:    sete %cl
118; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
119; X64-NEXT:    testb $1, %cl
120; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
121; X64-NEXT:    jne .LBB2_2
122; X64-NEXT:    jmp .LBB2_1
123; X64-NEXT:  .LBB2_2: # %atomicrmw.end
124; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
125; X64-NEXT:    lock andq %rax, sc64(%rip)
126; X64-NEXT:    retq
127;
128; I486-LABEL: atomic_fetch_and64:
129; I486:       # %bb.0:
130; I486-NEXT:    subl $16, %esp
131; I486-NEXT:    movl %esp, %eax
132; I486-NEXT:    movl $2, 12(%eax)
133; I486-NEXT:    movl $0, 8(%eax)
134; I486-NEXT:    movl $3, 4(%eax)
135; I486-NEXT:    movl $sc64, (%eax)
136; I486-NEXT:    calll __atomic_fetch_and_8@PLT
137; I486-NEXT:    movl %esp, %eax
138; I486-NEXT:    movl $2, 12(%eax)
139; I486-NEXT:    movl $0, 8(%eax)
140; I486-NEXT:    movl $5, 4(%eax)
141; I486-NEXT:    movl $sc64, (%eax)
142; I486-NEXT:    calll __atomic_fetch_and_8@PLT
143; I486-NEXT:    movl %eax, %ecx
144; I486-NEXT:    movl %esp, %eax
145; I486-NEXT:    movl %edx, 8(%eax)
146; I486-NEXT:    movl %ecx, 4(%eax)
147; I486-NEXT:    movl $2, 12(%eax)
148; I486-NEXT:    movl $sc64, (%eax)
149; I486-NEXT:    calll __atomic_fetch_and_8@PLT
150; I486-NEXT:    addl $16, %esp
151; I486-NEXT:    retl
152  %t1 = atomicrmw and  ptr @sc64, i64 3 acquire
153  %t2 = atomicrmw and  ptr @sc64, i64 5 acquire
154  %t3 = atomicrmw and  ptr @sc64, i64 %t2 acquire
155  ret void
156}
157
158define void @atomic_fetch_or64() nounwind {
159; X64-LABEL: atomic_fetch_or64:
160; X64:       # %bb.0:
161; X64-NEXT:    lock orq $3, sc64(%rip)
162; X64-NEXT:    movq sc64, %rax
163; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
164; X64-NEXT:  .LBB3_1: # %atomicrmw.start
165; X64-NEXT:    # =>This Inner Loop Header: Depth=1
166; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
167; X64-NEXT:    movq %rax, %rcx
168; X64-NEXT:    orq $5, %rcx
169; X64-NEXT:    lock cmpxchgq %rcx, sc64(%rip)
170; X64-NEXT:    sete %cl
171; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
172; X64-NEXT:    testb $1, %cl
173; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
174; X64-NEXT:    jne .LBB3_2
175; X64-NEXT:    jmp .LBB3_1
176; X64-NEXT:  .LBB3_2: # %atomicrmw.end
177; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
178; X64-NEXT:    lock orq %rax, sc64(%rip)
179; X64-NEXT:    retq
180;
181; I486-LABEL: atomic_fetch_or64:
182; I486:       # %bb.0:
183; I486-NEXT:    subl $16, %esp
184; I486-NEXT:    movl %esp, %eax
185; I486-NEXT:    movl $2, 12(%eax)
186; I486-NEXT:    movl $0, 8(%eax)
187; I486-NEXT:    movl $3, 4(%eax)
188; I486-NEXT:    movl $sc64, (%eax)
189; I486-NEXT:    calll __atomic_fetch_or_8@PLT
190; I486-NEXT:    movl %esp, %eax
191; I486-NEXT:    movl $2, 12(%eax)
192; I486-NEXT:    movl $0, 8(%eax)
193; I486-NEXT:    movl $5, 4(%eax)
194; I486-NEXT:    movl $sc64, (%eax)
195; I486-NEXT:    calll __atomic_fetch_or_8@PLT
196; I486-NEXT:    movl %eax, %ecx
197; I486-NEXT:    movl %esp, %eax
198; I486-NEXT:    movl %edx, 8(%eax)
199; I486-NEXT:    movl %ecx, 4(%eax)
200; I486-NEXT:    movl $2, 12(%eax)
201; I486-NEXT:    movl $sc64, (%eax)
202; I486-NEXT:    calll __atomic_fetch_or_8@PLT
203; I486-NEXT:    addl $16, %esp
204; I486-NEXT:    retl
205  %t1 = atomicrmw or   ptr @sc64, i64 3 acquire
206  %t2 = atomicrmw or   ptr @sc64, i64 5 acquire
207  %t3 = atomicrmw or   ptr @sc64, i64 %t2 acquire
208  ret void
209}
210
211define void @atomic_fetch_xor64() nounwind {
212; X64-LABEL: atomic_fetch_xor64:
213; X64:       # %bb.0:
214; X64-NEXT:    lock xorq $3, sc64(%rip)
215; X64-NEXT:    movq sc64, %rax
216; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
217; X64-NEXT:  .LBB4_1: # %atomicrmw.start
218; X64-NEXT:    # =>This Inner Loop Header: Depth=1
219; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
220; X64-NEXT:    movq %rax, %rcx
221; X64-NEXT:    xorq $5, %rcx
222; X64-NEXT:    lock cmpxchgq %rcx, sc64(%rip)
223; X64-NEXT:    sete %cl
224; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
225; X64-NEXT:    testb $1, %cl
226; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
227; X64-NEXT:    jne .LBB4_2
228; X64-NEXT:    jmp .LBB4_1
229; X64-NEXT:  .LBB4_2: # %atomicrmw.end
230; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
231; X64-NEXT:    lock xorq %rax, sc64(%rip)
232; X64-NEXT:    retq
233;
234; I486-LABEL: atomic_fetch_xor64:
235; I486:       # %bb.0:
236; I486-NEXT:    subl $16, %esp
237; I486-NEXT:    movl %esp, %eax
238; I486-NEXT:    movl $2, 12(%eax)
239; I486-NEXT:    movl $0, 8(%eax)
240; I486-NEXT:    movl $3, 4(%eax)
241; I486-NEXT:    movl $sc64, (%eax)
242; I486-NEXT:    calll __atomic_fetch_xor_8@PLT
243; I486-NEXT:    movl %esp, %eax
244; I486-NEXT:    movl $2, 12(%eax)
245; I486-NEXT:    movl $0, 8(%eax)
246; I486-NEXT:    movl $5, 4(%eax)
247; I486-NEXT:    movl $sc64, (%eax)
248; I486-NEXT:    calll __atomic_fetch_xor_8@PLT
249; I486-NEXT:    movl %eax, %ecx
250; I486-NEXT:    movl %esp, %eax
251; I486-NEXT:    movl %edx, 8(%eax)
252; I486-NEXT:    movl %ecx, 4(%eax)
253; I486-NEXT:    movl $2, 12(%eax)
254; I486-NEXT:    movl $sc64, (%eax)
255; I486-NEXT:    calll __atomic_fetch_xor_8@PLT
256; I486-NEXT:    addl $16, %esp
257; I486-NEXT:    retl
258  %t1 = atomicrmw xor  ptr @sc64, i64 3 acquire
259  %t2 = atomicrmw xor  ptr @sc64, i64 5 acquire
260  %t3 = atomicrmw xor  ptr @sc64, i64 %t2 acquire
261  ret void
262}
263
264define void @atomic_fetch_nand64(i64 %x) nounwind {
265; X64-LABEL: atomic_fetch_nand64:
266; X64:       # %bb.0:
267; X64-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
268; X64-NEXT:    movq sc64, %rax
269; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
270; X64-NEXT:  .LBB5_1: # %atomicrmw.start
271; X64-NEXT:    # =>This Inner Loop Header: Depth=1
272; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
273; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
274; X64-NEXT:    movq %rax, %rcx
275; X64-NEXT:    andq %rdx, %rcx
276; X64-NEXT:    notq %rcx
277; X64-NEXT:    lock cmpxchgq %rcx, sc64(%rip)
278; X64-NEXT:    sete %cl
279; X64-NEXT:    testb $1, %cl
280; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
281; X64-NEXT:    jne .LBB5_2
282; X64-NEXT:    jmp .LBB5_1
283; X64-NEXT:  .LBB5_2: # %atomicrmw.end
284; X64-NEXT:    retq
285;
286; I486-LABEL: atomic_fetch_nand64:
287; I486:       # %bb.0:
288; I486-NEXT:    subl $16, %esp
289; I486-NEXT:    movl {{[0-9]+}}(%esp), %edx
290; I486-NEXT:    movl {{[0-9]+}}(%esp), %ecx
291; I486-NEXT:    movl %esp, %eax
292; I486-NEXT:    movl %edx, 8(%eax)
293; I486-NEXT:    movl %ecx, 4(%eax)
294; I486-NEXT:    movl $2, 12(%eax)
295; I486-NEXT:    movl $sc64, (%eax)
296; I486-NEXT:    calll __atomic_fetch_nand_8@PLT
297; I486-NEXT:    addl $16, %esp
298; I486-NEXT:    retl
299  %t1 = atomicrmw nand ptr @sc64, i64 %x acquire
300  ret void
301}
302
303define void @atomic_fetch_max64(i64 %x) nounwind {
304; X64-LABEL: atomic_fetch_max64:
305; X64:       # %bb.0:
306; X64-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
307; X64-NEXT:    movq sc64, %rax
308; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
309; X64-NEXT:  .LBB6_1: # %atomicrmw.start
310; X64-NEXT:    # =>This Inner Loop Header: Depth=1
311; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
312; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
313; X64-NEXT:    movq %rax, %rdx
314; X64-NEXT:    subq %rcx, %rdx
315; X64-NEXT:    cmovgq %rax, %rcx
316; X64-NEXT:    lock cmpxchgq %rcx, sc64(%rip)
317; X64-NEXT:    sete %cl
318; X64-NEXT:    testb $1, %cl
319; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
320; X64-NEXT:    jne .LBB6_2
321; X64-NEXT:    jmp .LBB6_1
322; X64-NEXT:  .LBB6_2: # %atomicrmw.end
323; X64-NEXT:    retq
324;
325; I486-LABEL: atomic_fetch_max64:
326; I486:       # %bb.0:
327; I486-NEXT:    pushl %ebp
328; I486-NEXT:    movl %esp, %ebp
329; I486-NEXT:    pushl %esi
330; I486-NEXT:    andl $-8, %esp
331; I486-NEXT:    subl $72, %esp
332; I486-NEXT:    movl 12(%ebp), %eax
333; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
334; I486-NEXT:    movl 8(%ebp), %eax
335; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
336; I486-NEXT:    movl sc64+4, %eax
337; I486-NEXT:    movl sc64, %ecx
338; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
339; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
340; I486-NEXT:    jmp .LBB6_1
341; I486-NEXT:  .LBB6_1: # %atomicrmw.start
342; I486-NEXT:    # =>This Inner Loop Header: Depth=1
343; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
344; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
345; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
346; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
347; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
348; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
349; I486-NEXT:    subl %ecx, %esi
350; I486-NEXT:    sbbl %eax, %edx
351; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
352; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
353; I486-NEXT:    jl .LBB6_4
354; I486-NEXT:  # %bb.3: # %atomicrmw.start
355; I486-NEXT:    # in Loop: Header=BB6_1 Depth=1
356; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
357; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
358; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
359; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
360; I486-NEXT:  .LBB6_4: # %atomicrmw.start
361; I486-NEXT:    # in Loop: Header=BB6_1 Depth=1
362; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
363; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
364; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
365; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
366; I486-NEXT:    movl %esi, {{[0-9]+}}(%esp)
367; I486-NEXT:    movl %eax, {{[0-9]+}}(%esp)
368; I486-NEXT:    movl %esp, %eax
369; I486-NEXT:    movl %edx, 12(%eax)
370; I486-NEXT:    movl %ecx, 8(%eax)
371; I486-NEXT:    leal {{[0-9]+}}(%esp), %ecx
372; I486-NEXT:    movl %ecx, 4(%eax)
373; I486-NEXT:    movl $2, 20(%eax)
374; I486-NEXT:    movl $2, 16(%eax)
375; I486-NEXT:    movl $sc64, (%eax)
376; I486-NEXT:    calll __atomic_compare_exchange_8@PLT
377; I486-NEXT:    movb %al, %dl
378; I486-NEXT:    movl {{[0-9]+}}(%esp), %ecx
379; I486-NEXT:    movl {{[0-9]+}}(%esp), %eax
380; I486-NEXT:    testb %dl, %dl
381; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
382; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
383; I486-NEXT:    je .LBB6_1
384; I486-NEXT:    jmp .LBB6_2
385; I486-NEXT:  .LBB6_2: # %atomicrmw.end
386; I486-NEXT:    leal -4(%ebp), %esp
387; I486-NEXT:    popl %esi
388; I486-NEXT:    popl %ebp
389; I486-NEXT:    retl
390  %t1 = atomicrmw max  ptr @sc64, i64 %x acquire
391
392  ret void
393}
394
395define void @atomic_fetch_min64(i64 %x) nounwind {
396; X64-LABEL: atomic_fetch_min64:
397; X64:       # %bb.0:
398; X64-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
399; X64-NEXT:    movq sc64, %rax
400; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
401; X64-NEXT:  .LBB7_1: # %atomicrmw.start
402; X64-NEXT:    # =>This Inner Loop Header: Depth=1
403; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
404; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
405; X64-NEXT:    movq %rax, %rdx
406; X64-NEXT:    subq %rcx, %rdx
407; X64-NEXT:    cmovleq %rax, %rcx
408; X64-NEXT:    lock cmpxchgq %rcx, sc64(%rip)
409; X64-NEXT:    sete %cl
410; X64-NEXT:    testb $1, %cl
411; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
412; X64-NEXT:    jne .LBB7_2
413; X64-NEXT:    jmp .LBB7_1
414; X64-NEXT:  .LBB7_2: # %atomicrmw.end
415; X64-NEXT:    retq
416;
417; I486-LABEL: atomic_fetch_min64:
418; I486:       # %bb.0:
419; I486-NEXT:    pushl %ebp
420; I486-NEXT:    movl %esp, %ebp
421; I486-NEXT:    pushl %esi
422; I486-NEXT:    andl $-8, %esp
423; I486-NEXT:    subl $72, %esp
424; I486-NEXT:    movl 12(%ebp), %eax
425; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
426; I486-NEXT:    movl 8(%ebp), %eax
427; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
428; I486-NEXT:    movl sc64+4, %eax
429; I486-NEXT:    movl sc64, %ecx
430; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
431; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
432; I486-NEXT:    jmp .LBB7_1
433; I486-NEXT:  .LBB7_1: # %atomicrmw.start
434; I486-NEXT:    # =>This Inner Loop Header: Depth=1
435; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
436; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
437; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
438; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
439; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
440; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
441; I486-NEXT:    subl %ecx, %esi
442; I486-NEXT:    sbbl %eax, %edx
443; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
444; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
445; I486-NEXT:    jge .LBB7_4
446; I486-NEXT:  # %bb.3: # %atomicrmw.start
447; I486-NEXT:    # in Loop: Header=BB7_1 Depth=1
448; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
449; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
450; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
451; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
452; I486-NEXT:  .LBB7_4: # %atomicrmw.start
453; I486-NEXT:    # in Loop: Header=BB7_1 Depth=1
454; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
455; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
456; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
457; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
458; I486-NEXT:    movl %esi, {{[0-9]+}}(%esp)
459; I486-NEXT:    movl %eax, {{[0-9]+}}(%esp)
460; I486-NEXT:    movl %esp, %eax
461; I486-NEXT:    movl %edx, 12(%eax)
462; I486-NEXT:    movl %ecx, 8(%eax)
463; I486-NEXT:    leal {{[0-9]+}}(%esp), %ecx
464; I486-NEXT:    movl %ecx, 4(%eax)
465; I486-NEXT:    movl $2, 20(%eax)
466; I486-NEXT:    movl $2, 16(%eax)
467; I486-NEXT:    movl $sc64, (%eax)
468; I486-NEXT:    calll __atomic_compare_exchange_8@PLT
469; I486-NEXT:    movb %al, %dl
470; I486-NEXT:    movl {{[0-9]+}}(%esp), %ecx
471; I486-NEXT:    movl {{[0-9]+}}(%esp), %eax
472; I486-NEXT:    testb %dl, %dl
473; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
474; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
475; I486-NEXT:    je .LBB7_1
476; I486-NEXT:    jmp .LBB7_2
477; I486-NEXT:  .LBB7_2: # %atomicrmw.end
478; I486-NEXT:    leal -4(%ebp), %esp
479; I486-NEXT:    popl %esi
480; I486-NEXT:    popl %ebp
481; I486-NEXT:    retl
482  %t1 = atomicrmw min  ptr @sc64, i64 %x acquire
483
484  ret void
485}
486
487define void @atomic_fetch_umax64(i64 %x) nounwind {
488; X64-LABEL: atomic_fetch_umax64:
489; X64:       # %bb.0:
490; X64-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
491; X64-NEXT:    movq sc64, %rax
492; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
493; X64-NEXT:  .LBB8_1: # %atomicrmw.start
494; X64-NEXT:    # =>This Inner Loop Header: Depth=1
495; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
496; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
497; X64-NEXT:    movq %rax, %rdx
498; X64-NEXT:    subq %rcx, %rdx
499; X64-NEXT:    cmovaq %rax, %rcx
500; X64-NEXT:    lock cmpxchgq %rcx, sc64(%rip)
501; X64-NEXT:    sete %cl
502; X64-NEXT:    testb $1, %cl
503; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
504; X64-NEXT:    jne .LBB8_2
505; X64-NEXT:    jmp .LBB8_1
506; X64-NEXT:  .LBB8_2: # %atomicrmw.end
507; X64-NEXT:    retq
508;
509; I486-LABEL: atomic_fetch_umax64:
510; I486:       # %bb.0:
511; I486-NEXT:    pushl %ebp
512; I486-NEXT:    movl %esp, %ebp
513; I486-NEXT:    pushl %esi
514; I486-NEXT:    andl $-8, %esp
515; I486-NEXT:    subl $72, %esp
516; I486-NEXT:    movl 12(%ebp), %eax
517; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
518; I486-NEXT:    movl 8(%ebp), %eax
519; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
520; I486-NEXT:    movl sc64+4, %eax
521; I486-NEXT:    movl sc64, %ecx
522; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
523; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
524; I486-NEXT:    jmp .LBB8_1
525; I486-NEXT:  .LBB8_1: # %atomicrmw.start
526; I486-NEXT:    # =>This Inner Loop Header: Depth=1
527; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
528; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
529; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
530; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
531; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
532; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
533; I486-NEXT:    subl %ecx, %esi
534; I486-NEXT:    sbbl %eax, %edx
535; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
536; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
537; I486-NEXT:    jb .LBB8_4
538; I486-NEXT:  # %bb.3: # %atomicrmw.start
539; I486-NEXT:    # in Loop: Header=BB8_1 Depth=1
540; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
541; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
542; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
543; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
544; I486-NEXT:  .LBB8_4: # %atomicrmw.start
545; I486-NEXT:    # in Loop: Header=BB8_1 Depth=1
546; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
547; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
548; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
549; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
550; I486-NEXT:    movl %esi, {{[0-9]+}}(%esp)
551; I486-NEXT:    movl %eax, {{[0-9]+}}(%esp)
552; I486-NEXT:    movl %esp, %eax
553; I486-NEXT:    movl %edx, 12(%eax)
554; I486-NEXT:    movl %ecx, 8(%eax)
555; I486-NEXT:    leal {{[0-9]+}}(%esp), %ecx
556; I486-NEXT:    movl %ecx, 4(%eax)
557; I486-NEXT:    movl $2, 20(%eax)
558; I486-NEXT:    movl $2, 16(%eax)
559; I486-NEXT:    movl $sc64, (%eax)
560; I486-NEXT:    calll __atomic_compare_exchange_8@PLT
561; I486-NEXT:    movb %al, %dl
562; I486-NEXT:    movl {{[0-9]+}}(%esp), %ecx
563; I486-NEXT:    movl {{[0-9]+}}(%esp), %eax
564; I486-NEXT:    testb %dl, %dl
565; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
566; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
567; I486-NEXT:    je .LBB8_1
568; I486-NEXT:    jmp .LBB8_2
569; I486-NEXT:  .LBB8_2: # %atomicrmw.end
570; I486-NEXT:    leal -4(%ebp), %esp
571; I486-NEXT:    popl %esi
572; I486-NEXT:    popl %ebp
573; I486-NEXT:    retl
574  %t1 = atomicrmw umax ptr @sc64, i64 %x acquire
575
576  ret void
577}
578
579define void @atomic_fetch_umin64(i64 %x) nounwind {
580; X64-LABEL: atomic_fetch_umin64:
581; X64:       # %bb.0:
582; X64-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
583; X64-NEXT:    movq sc64, %rax
584; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
585; X64-NEXT:  .LBB9_1: # %atomicrmw.start
586; X64-NEXT:    # =>This Inner Loop Header: Depth=1
587; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
588; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
589; X64-NEXT:    movq %rax, %rdx
590; X64-NEXT:    subq %rcx, %rdx
591; X64-NEXT:    cmovbeq %rax, %rcx
592; X64-NEXT:    lock cmpxchgq %rcx, sc64(%rip)
593; X64-NEXT:    sete %cl
594; X64-NEXT:    testb $1, %cl
595; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
596; X64-NEXT:    jne .LBB9_2
597; X64-NEXT:    jmp .LBB9_1
598; X64-NEXT:  .LBB9_2: # %atomicrmw.end
599; X64-NEXT:    retq
600;
601; I486-LABEL: atomic_fetch_umin64:
602; I486:       # %bb.0:
603; I486-NEXT:    pushl %ebp
604; I486-NEXT:    movl %esp, %ebp
605; I486-NEXT:    pushl %esi
606; I486-NEXT:    andl $-8, %esp
607; I486-NEXT:    subl $72, %esp
608; I486-NEXT:    movl 12(%ebp), %eax
609; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
610; I486-NEXT:    movl 8(%ebp), %eax
611; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
612; I486-NEXT:    movl sc64+4, %eax
613; I486-NEXT:    movl sc64, %ecx
614; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
615; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
616; I486-NEXT:    jmp .LBB9_1
617; I486-NEXT:  .LBB9_1: # %atomicrmw.start
618; I486-NEXT:    # =>This Inner Loop Header: Depth=1
619; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
620; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
621; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
622; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
623; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
624; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
625; I486-NEXT:    subl %ecx, %esi
626; I486-NEXT:    sbbl %eax, %edx
627; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
628; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
629; I486-NEXT:    jae .LBB9_4
630; I486-NEXT:  # %bb.3: # %atomicrmw.start
631; I486-NEXT:    # in Loop: Header=BB9_1 Depth=1
632; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
633; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
634; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
635; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
636; I486-NEXT:  .LBB9_4: # %atomicrmw.start
637; I486-NEXT:    # in Loop: Header=BB9_1 Depth=1
638; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
639; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
640; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
641; I486-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
642; I486-NEXT:    movl %esi, {{[0-9]+}}(%esp)
643; I486-NEXT:    movl %eax, {{[0-9]+}}(%esp)
644; I486-NEXT:    movl %esp, %eax
645; I486-NEXT:    movl %edx, 12(%eax)
646; I486-NEXT:    movl %ecx, 8(%eax)
647; I486-NEXT:    leal {{[0-9]+}}(%esp), %ecx
648; I486-NEXT:    movl %ecx, 4(%eax)
649; I486-NEXT:    movl $2, 20(%eax)
650; I486-NEXT:    movl $2, 16(%eax)
651; I486-NEXT:    movl $sc64, (%eax)
652; I486-NEXT:    calll __atomic_compare_exchange_8@PLT
653; I486-NEXT:    movb %al, %dl
654; I486-NEXT:    movl {{[0-9]+}}(%esp), %ecx
655; I486-NEXT:    movl {{[0-9]+}}(%esp), %eax
656; I486-NEXT:    testb %dl, %dl
657; I486-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
658; I486-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
659; I486-NEXT:    je .LBB9_1
660; I486-NEXT:    jmp .LBB9_2
661; I486-NEXT:  .LBB9_2: # %atomicrmw.end
662; I486-NEXT:    leal -4(%ebp), %esp
663; I486-NEXT:    popl %esi
664; I486-NEXT:    popl %ebp
665; I486-NEXT:    retl
666  %t1 = atomicrmw umin ptr @sc64, i64 %x acquire
667
668  ret void
669}
670
671define void @atomic_fetch_cmpxchg64() nounwind {
672; X64-LABEL: atomic_fetch_cmpxchg64:
673; X64:       # %bb.0:
674; X64-NEXT:    xorl %eax, %eax
675; X64-NEXT:    # kill: def $rax killed $eax
676; X64-NEXT:    movl $1, %ecx
677; X64-NEXT:    lock cmpxchgq %rcx, sc64(%rip)
678; X64-NEXT:    retq
679;
680; I486-LABEL: atomic_fetch_cmpxchg64:
681; I486:       # %bb.0:
682; I486-NEXT:    pushl %ebp
683; I486-NEXT:    movl %esp, %ebp
684; I486-NEXT:    andl $-8, %esp
685; I486-NEXT:    subl $32, %esp
686; I486-NEXT:    movl $0, {{[0-9]+}}(%esp)
687; I486-NEXT:    movl $0, {{[0-9]+}}(%esp)
688; I486-NEXT:    movl %esp, %eax
689; I486-NEXT:    leal {{[0-9]+}}(%esp), %ecx
690; I486-NEXT:    movl %ecx, 4(%eax)
691; I486-NEXT:    movl $2, 20(%eax)
692; I486-NEXT:    movl $2, 16(%eax)
693; I486-NEXT:    movl $0, 12(%eax)
694; I486-NEXT:    movl $1, 8(%eax)
695; I486-NEXT:    movl $sc64, (%eax)
696; I486-NEXT:    calll __atomic_compare_exchange_8@PLT
697; I486-NEXT:    movl %ebp, %esp
698; I486-NEXT:    popl %ebp
699; I486-NEXT:    retl
700  %t1 = cmpxchg ptr @sc64, i64 0, i64 1 acquire acquire
701  ret void
702}
703
704define void @atomic_fetch_store64(i64 %x) nounwind {
705; X64-LABEL: atomic_fetch_store64:
706; X64:       # %bb.0:
707; X64-NEXT:    movq %rdi, sc64(%rip)
708; X64-NEXT:    retq
709;
710; I486-LABEL: atomic_fetch_store64:
711; I486:       # %bb.0:
712; I486-NEXT:    subl $16, %esp
713; I486-NEXT:    movl {{[0-9]+}}(%esp), %edx
714; I486-NEXT:    movl {{[0-9]+}}(%esp), %ecx
715; I486-NEXT:    movl %esp, %eax
716; I486-NEXT:    movl %edx, 8(%eax)
717; I486-NEXT:    movl %ecx, 4(%eax)
718; I486-NEXT:    movl $3, 12(%eax)
719; I486-NEXT:    movl $sc64, (%eax)
720; I486-NEXT:    calll __atomic_store_8@PLT
721; I486-NEXT:    addl $16, %esp
722; I486-NEXT:    retl
723  store atomic i64 %x, ptr @sc64 release, align 8
724  ret void
725}
726
727define void @atomic_fetch_swap64(i64 %x) nounwind {
728; X64-LABEL: atomic_fetch_swap64:
729; X64:       # %bb.0:
730; X64-NEXT:    xchgq %rdi, sc64(%rip)
731; X64-NEXT:    retq
732;
733; I486-LABEL: atomic_fetch_swap64:
734; I486:       # %bb.0:
735; I486-NEXT:    subl $16, %esp
736; I486-NEXT:    movl {{[0-9]+}}(%esp), %edx
737; I486-NEXT:    movl {{[0-9]+}}(%esp), %ecx
738; I486-NEXT:    movl %esp, %eax
739; I486-NEXT:    movl %edx, 8(%eax)
740; I486-NEXT:    movl %ecx, 4(%eax)
741; I486-NEXT:    movl $2, 12(%eax)
742; I486-NEXT:    movl $sc64, (%eax)
743; I486-NEXT:    calll __atomic_exchange_8@PLT
744; I486-NEXT:    addl $16, %esp
745; I486-NEXT:    retl
746  %t1 = atomicrmw xchg ptr @sc64, i64 %x acquire
747  ret void
748}
749
750define void @atomic_fetch_swapf64(double %x) nounwind {
751; X64-LABEL: atomic_fetch_swapf64:
752; X64:       # %bb.0:
753; X64-NEXT:    movq %xmm0, %rax
754; X64-NEXT:    xchgq %rax, fsc64(%rip)
755; X64-NEXT:    retq
756;
757; I486-LABEL: atomic_fetch_swapf64:
758; I486:       # %bb.0:
759; I486-NEXT:    pushl %ebp
760; I486-NEXT:    movl %esp, %ebp
761; I486-NEXT:    andl $-8, %esp
762; I486-NEXT:    subl $24, %esp
763; I486-NEXT:    fldl 8(%ebp)
764; I486-NEXT:    fstpl {{[0-9]+}}(%esp)
765; I486-NEXT:    movl {{[0-9]+}}(%esp), %ecx
766; I486-NEXT:    movl {{[0-9]+}}(%esp), %edx
767; I486-NEXT:    movl %esp, %eax
768; I486-NEXT:    movl %edx, 8(%eax)
769; I486-NEXT:    movl %ecx, 4(%eax)
770; I486-NEXT:    movl $2, 12(%eax)
771; I486-NEXT:    movl $fsc64, (%eax)
772; I486-NEXT:    calll __atomic_exchange_8@PLT
773; I486-NEXT:    movl %ebp, %esp
774; I486-NEXT:    popl %ebp
775; I486-NEXT:    retl
776  %t1 = atomicrmw xchg ptr @fsc64, double %x acquire
777  ret void
778}
779
780define void @atomic_fetch_swapptr(ptr %x) nounwind {
781; X64-LABEL: atomic_fetch_swapptr:
782; X64:       # %bb.0:
783; X64-NEXT:    xchgq %rdi, psc64(%rip)
784; X64-NEXT:    retq
785;
786; I486-LABEL: atomic_fetch_swapptr:
787; I486:       # %bb.0:
788; I486-NEXT:    movl {{[0-9]+}}(%esp), %eax
789; I486-NEXT:    xchgl %eax, psc64
790; I486-NEXT:    retl
791  %t1 = atomicrmw xchg ptr @psc64, ptr %x acquire
792  ret void
793}
794