xref: /llvm-project/llvm/test/CodeGen/X86/atomic-unaligned.ll (revision d6f9278ae9e587d2d23a9940a2364aaafba74735)
1; RUN: llc -mtriple=x86_64 < %s | FileCheck %s
2
3; Quick test to ensure that atomics which are not naturally-aligned
4; emit unsized libcalls, and aren't emitted as native instructions or
5; sized libcalls.
6define void @test_i32(ptr %a) nounwind {
7; CHECK-LABEL: test_i32:
8; CHECK: callq __atomic_load
9; CHECK: callq __atomic_store
10; CHECK: callq __atomic_exchange
11; CHECK: callq __atomic_compare_exchange
12; CHECK: callq __atomic_compare_exchange
13  %t0 = load atomic i32, ptr %a seq_cst, align 2
14  store atomic i32 1, ptr %a seq_cst, align 2
15  %t1 = atomicrmw xchg ptr %a, i32 1 seq_cst, align 2
16  %t3 = atomicrmw add ptr %a, i32 2 seq_cst, align 2
17  %t2 = cmpxchg ptr %a, i32 0, i32 1 seq_cst seq_cst, align 2
18  ret void
19}
20
21define void @test_i128(ptr %a) nounwind {
22; CHECK-LABEL: test_i128:
23; CHECK: callq __atomic_load
24; CHECK: callq __atomic_store
25; CHECK: callq __atomic_exchange
26; CHECK: callq __atomic_compare_exchange
27  %t0 = load atomic i128, ptr %a seq_cst, align 8
28  store atomic i128 1, ptr %a seq_cst, align 8
29  %t1 = atomicrmw xchg ptr %a, i128 1 seq_cst, align 8
30  %t2 = atomicrmw add ptr %a, i128 2 seq_cst, align 8
31  %t3 = cmpxchg ptr %a, i128 0, i128 1 seq_cst seq_cst, align 8
32  ret void
33}
34