xref: /llvm-project/llvm/test/Bindings/llvm-c/atomics.ll (revision df6855ba57b2cede7b1578e74276f3ad33c2ad5d)
1; RUN: llvm-as < %s | llvm-dis > %t.orig
2; RUN: llvm-as < %s | llvm-c-test --echo > %t.echo
3; RUN: diff -w %t.orig %t.echo
4
5
6define void @fence_instrs() {
7  fence acquire
8  fence release
9  fence acq_rel
10  fence seq_cst
11
12  fence syncscope("singlethread") acquire
13  fence syncscope("singlethread") release
14  fence syncscope("singlethread") acq_rel
15  fence syncscope("singlethread") seq_cst
16
17  ret void
18}
19
20define void @atomic_load_store(ptr %word) {
21  ; Test different atomic loads
22  %ld.1 = load atomic i32, ptr %word monotonic, align 4
23  %ld.2 = load atomic volatile i32, ptr %word acquire, align 4
24  %ld.3 = load atomic volatile i32, ptr %word seq_cst, align 4
25  %ld.4 = load atomic volatile i32, ptr %word syncscope("singlethread") acquire, align 4
26  %ld.5 = load atomic volatile i32, ptr %word syncscope("singlethread") seq_cst, align 4
27  %ld.6 = load atomic i32, ptr %word syncscope("singlethread") seq_cst, align 4
28
29  ; Test different atomic stores
30  store atomic i32 1, ptr %word monotonic, align 4
31  store atomic volatile i32 2, ptr %word release, align 4
32  store atomic volatile i32 3, ptr %word seq_cst, align 4
33  store atomic volatile i32 4, ptr %word syncscope("singlethread") release, align 4
34  store atomic volatile i32 5, ptr %word syncscope("singlethread") seq_cst, align 4
35  store atomic i32 6, ptr %word syncscope("singlethread") seq_cst, align 4
36  ret void
37}
38
39define void @atomic_rmw_ops(ptr %p, i32 %i, float %f) {
40  ; Test all atomicrmw operations
41  %a.xchg      = atomicrmw xchg      ptr %p, i32 %i acq_rel, align 8
42  %a.add       = atomicrmw add       ptr %p, i32 %i acq_rel, align 8
43  %a.sub       = atomicrmw sub       ptr %p, i32 %i acq_rel, align 8
44  %a.and       = atomicrmw and       ptr %p, i32 %i acq_rel, align 8
45  %a.nand      = atomicrmw nand      ptr %p, i32 %i acq_rel, align 8
46  %a.or        = atomicrmw or        ptr %p, i32 %i acq_rel, align 8
47  %a.xor       = atomicrmw xor       ptr %p, i32 %i acq_rel, align 8
48  %a.max       = atomicrmw max       ptr %p, i32 %i acq_rel, align 8
49  %a.min       = atomicrmw min       ptr %p, i32 %i acq_rel, align 8
50  %a.umax      = atomicrmw umax      ptr %p, i32 %i acq_rel, align 8
51  %a.umin      = atomicrmw umin      ptr %p, i32 %i acq_rel, align 8
52
53  %a.fadd      = atomicrmw fadd      ptr %p, float %f acq_rel, align 8
54  %a.fsub      = atomicrmw fsub      ptr %p, float %f acq_rel, align 8
55  %a.fmax      = atomicrmw fmax      ptr %p, float %f acq_rel, align 8
56  %a.fmin      = atomicrmw fmin      ptr %p, float %f acq_rel, align 8
57
58  %a.uinc_wrap = atomicrmw uinc_wrap ptr %p, i32 %i acq_rel, align 8
59  %a.udec_wrap = atomicrmw udec_wrap ptr %p, i32 %i acq_rel, align 8
60
61  %a.usub_sat  = atomicrmw usub_sat  ptr %p, i32 %i acq_rel, align 8
62  %a.usub_cond = atomicrmw usub_cond ptr %p, i32 %i acq_rel, align 8
63
64  ret void
65}
66
67define i32 @main() {
68  %1 = alloca i32, align 4
69  %2 = cmpxchg ptr %1, i32 2, i32 3 seq_cst acquire
70  %3 = extractvalue { i32, i1 } %2, 0
71  ret i32 %3
72}
73