xref: /llvm-project/clang/test/CodeGen/builtins-arm64.c (revision c5de4dd1eab00df76c1a68c5f397304ceacb71f2)
1 // RUN: %clang_cc1 -triple arm64-unknown-linux -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LINUX
2 // RUN: %clang_cc1 -triple aarch64-windows -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-WIN
3 // RUN: %clang_cc1 -triple arm64_32-apple-ios13 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
4 #include <stdint.h>
5 
f0(void * a,void * b)6 void f0(void *a, void *b) {
7 	__clear_cache(a,b);
8 // CHECK: call {{.*}} @__clear_cache
9 }
10 
tp(void)11 void *tp (void) {
12   return __builtin_thread_pointer ();
13 // CHECK-LINUX: call {{.*}} @llvm.thread.pointer()
14 }
15 
16 // CHECK: call {{.*}} @llvm.bitreverse.i32(i32 %a)
rbit(unsigned a)17 unsigned rbit(unsigned a) {
18   return __builtin_arm_rbit(a);
19 }
20 
21 // CHECK-WIN: [[A64:%[^ ]+]] = zext i32 %a to i64
22 // CHECK-WIN: call i64 @llvm.bitreverse.i64(i64 [[A64]])
23 // CHECK-LINUX: call i64 @llvm.bitreverse.i64(i64 %a)
rbitl(unsigned long a)24 unsigned long rbitl(unsigned long a) {
25   return __builtin_arm_rbit64(a);
26 }
27 
28 // CHECK: call {{.*}} @llvm.bitreverse.i64(i64 %a)
rbit64(uint64_t a)29 uint64_t rbit64(uint64_t a) {
30   return __builtin_arm_rbit64(a);
31 }
32 
hints(void)33 void hints(void) {
34   __builtin_arm_nop();    //CHECK: call {{.*}} @llvm.aarch64.hint(i32 0)
35   __builtin_arm_yield();  //CHECK: call {{.*}} @llvm.aarch64.hint(i32 1)
36   __builtin_arm_wfe();    //CHECK: call {{.*}} @llvm.aarch64.hint(i32 2)
37   __builtin_arm_wfi();    //CHECK: call {{.*}} @llvm.aarch64.hint(i32 3)
38   __builtin_arm_sev();    //CHECK: call {{.*}} @llvm.aarch64.hint(i32 4)
39   __builtin_arm_sevl();   //CHECK: call {{.*}} @llvm.aarch64.hint(i32 5)
40 }
41 
barriers(void)42 void barriers(void) {
43   __builtin_arm_dmb(1);  //CHECK: call {{.*}} @llvm.aarch64.dmb(i32 1)
44   __builtin_arm_dsb(2);  //CHECK: call {{.*}} @llvm.aarch64.dsb(i32 2)
45   __builtin_arm_isb(3);  //CHECK: call {{.*}} @llvm.aarch64.isb(i32 3)
46 }
47 
prefetch(void)48 void prefetch(void) {
49   __builtin_arm_prefetch(0, 1, 2, 0, 1); // pstl3keep
50   // CHECK: call {{.*}} @llvm.aarch64.prefetch(ptr null, i32 1, i32 2, i32 0, i32 1)
51 
52   __builtin_arm_prefetch(0, 0, 0, 1, 1); // pldl1keep
53   // CHECK: call {{.*}} @llvm.aarch64.prefetch(ptr null, i32 0, i32 0, i32 1, i32 1)
54 
55   __builtin_arm_prefetch(0, 0, 0, 1, 1); // pldl1strm
56   // CHECK: call {{.*}} @llvm.aarch64.prefetch(ptr null, i32 0, i32 0, i32 1, i32 1)
57 
58   __builtin_arm_prefetch(0, 0, 0, 0, 0); // plil1keep
59   // CHECK: call {{.*}} @llvm.aarch64.prefetch(ptr null, i32 0, i32 0, i32 0, i32 0)
60 
61   __builtin_arm_prefetch(0, 0, 3, 0, 1); // pldslckeep
62   // CHECK: call {{.*}} @llvm.aarch64.prefetch(ptr null, i32 0, i32 3, i32 0, i32 1)
63 }
64 
65 __attribute__((target("v8.5a")))
jcvt(double v)66 int32_t jcvt(double v) {
67   //CHECK-LABEL: @jcvt(
68   //CHECK: call i32 @llvm.aarch64.fjcvtzs
69   return __builtin_arm_jcvt(v);
70 }
71 
72 __typeof__(__builtin_arm_rsr("1:2:3:4:5")) rsr(void);
73 
rsr(void)74 uint32_t rsr(void) {
75   // CHECK: [[V0:[%A-Za-z0-9.]+]] = call i64 @llvm.read_volatile_register.i64(metadata ![[M0:[0-9]]])
76   // CHECK-NEXT: trunc i64 [[V0]] to i32
77   return __builtin_arm_rsr("1:2:3:4:5");
78 }
79 
80 __typeof__(__builtin_arm_rsr64("1:2:3:4:5")) rsr64(void);
81 
rsr64(void)82 uint64_t rsr64(void) {
83   // CHECK: call i64 @llvm.read_volatile_register.i64(metadata ![[M0:[0-9]]])
84   return __builtin_arm_rsr64("1:2:3:4:5");
85 }
86 
rsrp(void)87 void *rsrp(void) {
88   // CHECK: [[V0:[%A-Za-z0-9.]+]] = call i64 @llvm.read_volatile_register.i64(metadata ![[M0:[0-9]]])
89   // CHECK-NEXT: inttoptr i64 [[V0]] to ptr
90   return __builtin_arm_rsrp("1:2:3:4:5");
91 }
92 
93 __typeof__(__builtin_arm_wsr("1:2:3:4:5", 0)) wsr(unsigned);
94 
wsr(unsigned v)95 void wsr(unsigned v) {
96   // CHECK: [[V0:[%A-Za-z0-9.]+]] = zext i32 %v to i64
97   // CHECK-NEXT: call void @llvm.write_register.i64(metadata ![[M0:[0-9]]], i64 [[V0]])
98   __builtin_arm_wsr("1:2:3:4:5", v);
99 }
100 
101 __typeof__(__builtin_arm_wsr64("1:2:3:4:5", 0)) wsr64(uint64_t);
102 
wsr64(uint64_t v)103 void wsr64(uint64_t v) {
104   // CHECK: call void @llvm.write_register.i64(metadata ![[M0:[0-9]]], i64 %v)
105   __builtin_arm_wsr64("1:2:3:4:5", v);
106 }
107 
wsrp(void * v)108 void wsrp(void *v) {
109   // CHECK: [[V0:[%A-Za-z0-9.]+]] = ptrtoint ptr %v to i64
110   // CHECK-NEXT: call void @llvm.write_register.i64(metadata ![[M0:[0-9]]], i64 [[V0]])
111   __builtin_arm_wsrp("1:2:3:4:5", v);
112 }
113 
cls(uint32_t v)114 unsigned int cls(uint32_t v) {
115   // CHECK: call i32 @llvm.aarch64.cls(i32 %v)
116   return __builtin_arm_cls(v);
117 }
118 
clsl(unsigned long v)119 unsigned int clsl(unsigned long v) {
120   // CHECK-WIN: [[V64:%[^ ]+]] = zext i32 %v to i64
121   // CHECK-WIN: call i32 @llvm.aarch64.cls64(i64 [[V64]]
122   // CHECK-LINUX: call i32 @llvm.aarch64.cls64(i64 %v)
123   return __builtin_arm_cls64(v);
124 }
125 
clsll(uint64_t v)126 unsigned int clsll(uint64_t v) {
127   // CHECK: call i32 @llvm.aarch64.cls64(i64 %v)
128   return __builtin_arm_cls64(v);
129 }
130 
131 // CHECK-LABEL: @rndr(
132 // CHECK-NEXT:  entry:
133 // CHECK-NEXT:    [[TMP0:%.*]] = call { i64, i1 } @llvm.aarch64.rndr()
134 // CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
135 // CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
136 // CHECK-NEXT:    store i64 [[TMP1]], ptr [[__ADDR:%.*]], align 8
137 // CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
138 // CHECK-NEXT:    ret i32 [[TMP3]]
139 //
140 __attribute__((target("rand")))
rndr(uint64_t * __addr)141 int rndr(uint64_t *__addr) {
142   return __builtin_arm_rndr(__addr);
143 }
144 
145 // CHECK-LABEL: @rndrrs(
146 // CHECK-NEXT:  entry:
147 // CHECK-NEXT:    [[TMP0:%.*]] = call { i64, i1 } @llvm.aarch64.rndrrs()
148 // CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
149 // CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
150 // CHECK-NEXT:    store i64 [[TMP1]], ptr [[__ADDR:%.*]], align 8
151 // CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
152 // CHECK-NEXT:    ret i32 [[TMP3]]
153 //
154 __attribute__((target("rand")))
rndrrs(uint64_t * __addr)155 int rndrrs(uint64_t *__addr) {
156   return __builtin_arm_rndrrs(__addr);
157 }
158 
159 // CHECK-LABEL: @trap(
160 // CHECK: call void @llvm.aarch64.break(i32 42)
trap()161 void trap() {
162   __builtin_arm_trap(42);
163 }
164 
165 // CHECK: ![[M0]] = !{!"1:2:3:4:5"}
166