xref: /llvm-project/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll (revision fe42e72db29e48aa81eac2aa922afd90a7f01517)
1; RUN: opt -S %s -passes=atomic-expand | FileCheck %s
2
3;;; NOTE: this test is actually target-independent -- any target which
4;;; doesn't support inline atomics can be used. (E.g. X86 i386 would
5;;; work, if LLVM is properly taught about what it's missing vs i586.)
6
7;target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
8;target triple = "i386-unknown-unknown"
9target datalayout = "e-m:e-p:32:32-i64:64-f128:64-n32-S64"
10target triple = "sparc-unknown-unknown"
11
12;; First, check the sized calls. Except for cmpxchg, these are fairly
13;; straightforward.
14
15; CHECK-LABEL: @test_load_i16(
16; CHECK:  %1 = call i16 @__atomic_load_2(ptr %arg, i32 5)
17; CHECK:  ret i16 %1
18define i16 @test_load_i16(ptr %arg) {
19  %ret = load atomic i16, ptr %arg seq_cst, align 4
20  ret i16 %ret
21}
22
23; CHECK-LABEL: @test_store_i16(
24; CHECK:  call void @__atomic_store_2(ptr %arg, i16 %val, i32 5)
25; CHECK:  ret void
26define void @test_store_i16(ptr %arg, i16 %val) {
27  store atomic i16 %val, ptr %arg seq_cst, align 4
28  ret void
29}
30
31; CHECK-LABEL: @test_exchange_i16(
32; CHECK:  %1 = call i16 @__atomic_exchange_2(ptr %arg, i16 %val, i32 5)
33; CHECK:  ret i16 %1
34define i16 @test_exchange_i16(ptr %arg, i16 %val) {
35  %ret = atomicrmw xchg ptr %arg, i16 %val seq_cst
36  ret i16 %ret
37}
38
39; CHECK-LABEL: @test_cmpxchg_i16(
40; CHECK:  %1 = alloca i16, align 2
41; CHECK:  call void @llvm.lifetime.start.p0(i64 2, ptr %1)
42; CHECK:  store i16 %old, ptr %1, align 2
43; CHECK:  %2 = call zeroext i1 @__atomic_compare_exchange_2(ptr %arg, ptr %1, i16 %new, i32 5, i32 0)
44; CHECK:  %3 = load i16, ptr %1, align 2
45; CHECK:  call void @llvm.lifetime.end.p0(i64 2, ptr %1)
46; CHECK:  %4 = insertvalue { i16, i1 } poison, i16 %3, 0
47; CHECK:  %5 = insertvalue { i16, i1 } %4, i1 %2, 1
48; CHECK:  %ret = extractvalue { i16, i1 } %5, 0
49; CHECK:  ret i16 %ret
50define i16 @test_cmpxchg_i16(ptr %arg, i16 %old, i16 %new) {
51  %ret_succ = cmpxchg ptr %arg, i16 %old, i16 %new seq_cst monotonic
52  %ret = extractvalue { i16, i1 } %ret_succ, 0
53  ret i16 %ret
54}
55
56; CHECK-LABEL: @test_add_i16(
57; CHECK:  %1 = call i16 @__atomic_fetch_add_2(ptr %arg, i16 %val, i32 5)
58; CHECK:  ret i16 %1
59define i16 @test_add_i16(ptr %arg, i16 %val) {
60  %ret = atomicrmw add ptr %arg, i16 %val seq_cst
61  ret i16 %ret
62}
63
64
65;; Now, check the output for the unsized libcalls. i128 is used for
66;; these tests because the "16" suffixed functions aren't available on
67;; 32-bit i386.
68
69; CHECK-LABEL: @test_load_i128(
70; CHECK:  %1 = alloca i128, align 8
71; CHECK:  call void @llvm.lifetime.start.p0(i64 16, ptr %1)
72; CHECK:  call void @__atomic_load(i32 16, ptr %arg, ptr %1, i32 5)
73; CHECK:  %2 = load i128, ptr %1, align 8
74; CHECK:  call void @llvm.lifetime.end.p0(i64 16, ptr %1)
75; CHECK:  ret i128 %2
76define i128 @test_load_i128(ptr %arg) {
77  %ret = load atomic i128, ptr %arg seq_cst, align 16
78  ret i128 %ret
79}
80
81; CHECK-LABEL: @test_store_i128(
82; CHECK:  %1 = alloca i128, align 8
83; CHECK:  call void @llvm.lifetime.start.p0(i64 16, ptr %1)
84; CHECK:  store i128 %val, ptr %1, align 8
85; CHECK:  call void @__atomic_store(i32 16, ptr %arg, ptr %1, i32 5)
86; CHECK:  call void @llvm.lifetime.end.p0(i64 16, ptr %1)
87; CHECK:  ret void
88define void @test_store_i128(ptr %arg, i128 %val) {
89  store atomic i128 %val, ptr %arg seq_cst, align 16
90  ret void
91}
92
93; CHECK-LABEL: @test_exchange_i128(
94; CHECK:  %1 = alloca i128, align 8
95; CHECK:  call void @llvm.lifetime.start.p0(i64 16, ptr %1)
96; CHECK:  store i128 %val, ptr %1, align 8
97; CHECK:  %2 = alloca i128, align 8
98; CHECK:  call void @llvm.lifetime.start.p0(i64 16, ptr %2)
99; CHECK:  call void @__atomic_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5)
100; CHECK:  call void @llvm.lifetime.end.p0(i64 16, ptr %1)
101; CHECK:  %3 = load i128, ptr %2, align 8
102; CHECK:  call void @llvm.lifetime.end.p0(i64 16, ptr %2)
103; CHECK:  ret i128 %3
104define i128 @test_exchange_i128(ptr %arg, i128 %val) {
105  %ret = atomicrmw xchg ptr %arg, i128 %val seq_cst
106  ret i128 %ret
107}
108
109; CHECK-LABEL: @test_cmpxchg_i128(
110; CHECK:  %1 = alloca i128, align 8
111; CHECK:  call void @llvm.lifetime.start.p0(i64 16, ptr %1)
112; CHECK:  store i128 %old, ptr %1, align 8
113; CHECK:  %2 = alloca i128, align 8
114; CHECK:  call void @llvm.lifetime.start.p0(i64 16, ptr %2)
115; CHECK:  store i128 %new, ptr %2, align 8
116; CHECK:  %3 = call zeroext i1 @__atomic_compare_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5, i32 0)
117; CHECK:  call void @llvm.lifetime.end.p0(i64 16, ptr %2)
118; CHECK:  %4 = load i128, ptr %1, align 8
119; CHECK:  call void @llvm.lifetime.end.p0(i64 16, ptr %1)
120; CHECK:  %5 = insertvalue { i128, i1 } poison, i128 %4, 0
121; CHECK:  %6 = insertvalue { i128, i1 } %5, i1 %3, 1
122; CHECK:  %ret = extractvalue { i128, i1 } %6, 0
123; CHECK:  ret i128 %ret
124define i128 @test_cmpxchg_i128(ptr %arg, i128 %old, i128 %new) {
125  %ret_succ = cmpxchg ptr %arg, i128 %old, i128 %new seq_cst monotonic
126  %ret = extractvalue { i128, i1 } %ret_succ, 0
127  ret i128 %ret
128}
129
130; This one is a verbose expansion, as there is no generic
131; __atomic_fetch_add function, so it needs to expand to a cmpxchg
132; loop, which then itself expands into a libcall.
133
134; CHECK-LABEL: @test_add_i128(
135; CHECK:  %1 = alloca i128, align 8
136; CHECK:  %2 = alloca i128, align 8
137; CHECK:  %3 = load i128, ptr %arg, align 16
138; CHECK:  br label %atomicrmw.start
139; CHECK:atomicrmw.start:
140; CHECK:  %loaded = phi i128 [ %3, %0 ], [ %newloaded, %atomicrmw.start ]
141; CHECK:  %new = add i128 %loaded, %val
142; CHECK:  call void @llvm.lifetime.start.p0(i64 16, ptr %1)
143; CHECK:  store i128 %loaded, ptr %1, align 8
144; CHECK:  call void @llvm.lifetime.start.p0(i64 16, ptr %2)
145; CHECK:  store i128 %new, ptr %2, align 8
146; CHECK:  %4 = call zeroext i1 @__atomic_compare_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5, i32 5)
147; CHECK:  call void @llvm.lifetime.end.p0(i64 16, ptr %2)
148; CHECK:  %5 = load i128, ptr %1, align 8
149; CHECK:  call void @llvm.lifetime.end.p0(i64 16, ptr %1)
150; CHECK:  %6 = insertvalue { i128, i1 } poison, i128 %5, 0
151; CHECK:  %7 = insertvalue { i128, i1 } %6, i1 %4, 1
152; CHECK:  %success = extractvalue { i128, i1 } %7, 1
153; CHECK:  %newloaded = extractvalue { i128, i1 } %7, 0
154; CHECK:  br i1 %success, label %atomicrmw.end, label %atomicrmw.start
155; CHECK:atomicrmw.end:
156; CHECK:  ret i128 %newloaded
157define i128 @test_add_i128(ptr %arg, i128 %val) {
158  %ret = atomicrmw add ptr %arg, i128 %val seq_cst
159  ret i128 %ret
160}
161
162;; Ensure that non-integer types get bitcast correctly on the way in and out of a libcall:
163
164; CHECK-LABEL: @test_load_double(
165; CHECK:  %1 = call i64 @__atomic_load_8(ptr %arg, i32 5)
166; CHECK:  %2 = bitcast i64 %1 to double
167; CHECK:  ret double %2
168define double @test_load_double(ptr %arg, double %val) {
169  %1 = load atomic double, ptr %arg seq_cst, align 16
170  ret double %1
171}
172
173; CHECK-LABEL: @test_store_double(
174; CHECK:  %1 = bitcast double %val to i64
175; CHECK:  call void @__atomic_store_8(ptr %arg, i64 %1, i32 5)
176; CHECK:  ret void
177define void @test_store_double(ptr %arg, double %val) {
178  store atomic double %val, ptr %arg seq_cst, align 16
179  ret void
180}
181
182; CHECK-LABEL: @test_cmpxchg_ptr(
183; CHECK:   %1 = alloca ptr, align 4
184; CHECK:   call void @llvm.lifetime.start.p0(i64 4, ptr %1)
185; CHECK:   store ptr %old, ptr %1, align 4
186; CHECK:   %2 = ptrtoint ptr %new to i32
187; CHECK:   %3 = call zeroext i1 @__atomic_compare_exchange_4(ptr %arg, ptr %1, i32 %2, i32 5, i32 2)
188; CHECK:   %4 = load ptr, ptr %1, align 4
189; CHECK:   call void @llvm.lifetime.end.p0(i64 4, ptr %1)
190; CHECK:   %5 = insertvalue { ptr, i1 } poison, ptr %4, 0
191; CHECK:   %6 = insertvalue { ptr, i1 } %5, i1 %3, 1
192; CHECK:   %ret = extractvalue { ptr, i1 } %6, 0
193; CHECK:   ret ptr %ret
194; CHECK: }
195define ptr @test_cmpxchg_ptr(ptr %arg, ptr %old, ptr %new) {
196  %ret_succ = cmpxchg ptr %arg, ptr %old, ptr %new seq_cst acquire
197  %ret = extractvalue { ptr, i1 } %ret_succ, 0
198  ret ptr %ret
199}
200
201;; ...and for a non-integer type of large size too.
202
203; CHECK-LABEL: @test_store_fp128
204; CHECK:  %1 = alloca fp128, align 8
205; CHECK:  call void @llvm.lifetime.start.p0(i64 16, ptr %1)
206; CHECK:  store fp128 %val, ptr %1, align 8
207; CHECK:  call void @__atomic_store(i32 16, ptr %arg, ptr %1, i32 5)
208; CHECK:  call void @llvm.lifetime.end.p0(i64 16, ptr %1)
209; CHECK:  ret void
210define void @test_store_fp128(ptr %arg, fp128 %val) {
211  store atomic fp128 %val, ptr %arg seq_cst, align 16
212  ret void
213}
214
215;; Unaligned loads and stores should be expanded to the generic
216;; libcall, just like large loads/stores, and not a specialized one.
217;; NOTE: atomicrmw and cmpxchg don't yet support an align attribute;
218;; when such support is added, they should also be tested here.
219
220; CHECK-LABEL: @test_unaligned_load_i16(
221; CHECK:  __atomic_load(
222define i16 @test_unaligned_load_i16(ptr %arg) {
223  %ret = load atomic i16, ptr %arg seq_cst, align 1
224  ret i16 %ret
225}
226
227; CHECK-LABEL: @test_unaligned_store_i16(
228; CHECK: __atomic_store(
229define void @test_unaligned_store_i16(ptr %arg, i16 %val) {
230  store atomic i16 %val, ptr %arg seq_cst, align 1
231  ret void
232}
233