xref: /llvm-project/clang/test/CodeGen/PowerPC/builtins-ppc-xlcompat-fetch.c (revision 39db5e1ed87363a9ffea81e53520b542201b3262)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: powerpc-registered-target
3 // RUN: %clang_cc1 -triple powerpc64-unknown-linux-gnu \
4 // RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
5 // RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu \
6 // RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
7 
8 // CHECK-LABEL: @test_builtin_ppc_fetch_and_add(
9 // CHECK-NEXT:  entry:
10 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11 // CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
12 // CHECK-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
13 // CHECK-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
14 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[B_ADDR]], align 4
15 // CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw add ptr [[A_ADDR]], i32 [[TMP0]] monotonic, align 4
16 // CHECK-NEXT:    ret void
17 //
test_builtin_ppc_fetch_and_add(int a,int b)18 void test_builtin_ppc_fetch_and_add(int a, int b) {
19   __fetch_and_add(&a, b);
20 }
21 
22 // CHECK-LABEL: @test_builtin_ppc_fetch_and_addlp(
23 // CHECK-NEXT:  entry:
24 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
25 // CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
26 // CHECK-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
27 // CHECK-NEXT:    store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
28 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[B_ADDR]], align 8
29 // CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw add ptr [[A_ADDR]], i64 [[TMP0]] monotonic, align 8
30 // CHECK-NEXT:    ret void
31 //
test_builtin_ppc_fetch_and_addlp(long a,long b)32 void test_builtin_ppc_fetch_and_addlp(long a, long b) {
33   __fetch_and_addlp(&a, b);
34 }
35 // CHECK-LABEL: @test_builtin_ppc_fetch_and_and(
36 // CHECK-NEXT:  entry:
37 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
38 // CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
39 // CHECK-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
40 // CHECK-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
41 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[B_ADDR]], align 4
42 // CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A_ADDR]], i32 [[TMP0]] monotonic, align 4
43 // CHECK-NEXT:    ret void
44 //
test_builtin_ppc_fetch_and_and(unsigned int a,unsigned int b)45 void test_builtin_ppc_fetch_and_and(unsigned int a, unsigned int b) {
46   __fetch_and_and(&a, b);
47 }
48 // CHECK-LABEL: @test_builtin_ppc_fetch_and_andlp(
49 // CHECK-NEXT:  entry:
50 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
51 // CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
52 // CHECK-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
53 // CHECK-NEXT:    store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
54 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[B_ADDR]], align 8
55 // CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A_ADDR]], i64 [[TMP0]] monotonic, align 8
56 // CHECK-NEXT:    ret void
57 //
test_builtin_ppc_fetch_and_andlp(unsigned long a,unsigned long b)58 void test_builtin_ppc_fetch_and_andlp(unsigned long a, unsigned long b) {
59   __fetch_and_andlp(&a, b);
60 }
61 // CHECK-LABEL: @test_builtin_ppc_fetch_and_or(
62 // CHECK-NEXT:  entry:
63 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
64 // CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
65 // CHECK-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
66 // CHECK-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
67 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[B_ADDR]], align 4
68 // CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A_ADDR]], i32 [[TMP0]] monotonic, align 4
69 // CHECK-NEXT:    ret void
70 //
test_builtin_ppc_fetch_and_or(unsigned int a,unsigned int b)71 void test_builtin_ppc_fetch_and_or(unsigned int a, unsigned int b) {
72   __fetch_and_or(&a, b);
73 }
74 // CHECK-LABEL: @test_builtin_ppc_fetch_and_orlp(
75 // CHECK-NEXT:  entry:
76 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
77 // CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
78 // CHECK-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
79 // CHECK-NEXT:    store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
80 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[B_ADDR]], align 8
81 // CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A_ADDR]], i64 [[TMP0]] monotonic, align 8
82 // CHECK-NEXT:    ret void
83 //
test_builtin_ppc_fetch_and_orlp(unsigned long a,unsigned long b)84 void test_builtin_ppc_fetch_and_orlp(unsigned long a, unsigned long b) {
85   __fetch_and_orlp(&a, b);
86 }
87 // CHECK-LABEL: @test_builtin_ppc_fetch_and_swap(
88 // CHECK-NEXT:  entry:
89 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
90 // CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
91 // CHECK-NEXT:    store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
92 // CHECK-NEXT:    store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
93 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[B_ADDR]], align 4
94 // CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw xchg ptr [[A_ADDR]], i32 [[TMP0]] monotonic, align 4
95 // CHECK-NEXT:    ret void
96 //
test_builtin_ppc_fetch_and_swap(unsigned int a,unsigned int b)97 void test_builtin_ppc_fetch_and_swap(unsigned int a, unsigned int b) {
98   __fetch_and_swap(&a, b);
99 }
100 // CHECK-LABEL: @test_builtin_ppc_fetch_and_swaplp(
101 // CHECK-NEXT:  entry:
102 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
103 // CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i64, align 8
104 // CHECK-NEXT:    store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
105 // CHECK-NEXT:    store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
106 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[B_ADDR]], align 8
107 // CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw xchg ptr [[A_ADDR]], i64 [[TMP0]] monotonic, align 8
108 // CHECK-NEXT:    ret void
109 //
test_builtin_ppc_fetch_and_swaplp(unsigned long a,unsigned long b)110 void test_builtin_ppc_fetch_and_swaplp(unsigned long a, unsigned long b) {
111   __fetch_and_swaplp(&a, b);
112 }
113