1 // RUN: %clang_cc1 -verify -fopenmp=libiomp5 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp=libiomp5 -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp=libiomp5 -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -verify -fopenmp=libiomp5 -x c++ -std=c++11 -DLAMBDA -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s 5 // RUN: %clang_cc1 -verify -fopenmp=libiomp5 -x c++ -fblocks -DBLOCKS -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s 6 // expected-no-diagnostics 7 #ifndef HEADER 8 #define HEADER 9 10 template <class T> 11 struct S { 12 T f; 13 S(T a) : f(a) {} 14 S() : f() {} 15 operator T() { return T(); } 16 ~S() {} 17 }; 18 19 volatile int g = 1212; 20 21 // CHECK: [[S_FLOAT_TY:%.+]] = type { float } 22 // CHECK: [[CAP_MAIN_TY:%.+]] = type { [2 x i{{[0-9]+}}]*, i{{[0-9]+}}*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]* } 23 // CHECK: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} } 24 // CHECK: [[CAP_TMAIN_TY:%.+]] = type { [2 x i{{[0-9]+}}]*, i{{[0-9]+}}*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]* } 25 // CHECK: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8* 26 template <typename T> 27 T tmain() { 28 S<T> test; 29 T t_var = T(); 30 T vec[] = {1, 2}; 31 S<T> s_arr[] = {1, 2}; 32 S<T> var(3); 33 #pragma omp parallel private(t_var, vec, s_arr, var) 34 { 35 vec[0] = t_var; 36 s_arr[0] = var; 37 } 38 return T(); 39 } 40 41 int main() { 42 #ifdef LAMBDA 43 // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212, 44 // LAMBDA-LABEL: @main 45 // LAMBDA: call void [[OUTER_LAMBDA:@.+]]( 46 [&]() { 47 // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( 48 // LAMBDA: [[G_LOCAL_REF:%.+]] = getelementptr inbounds %{{.+}}* [[AGG_CAPTURED:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 49 // LAMBDA: store i{{[0-9]+}}* [[G]], i{{[0-9]+}}** [[G_LOCAL_REF]] 50 // LAMBDA: [[ARG:%.+]] = bitcast %{{.+}}* [[AGG_CAPTURED]] to i8* 51 // LAMBDA: call void {{.+}}* @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* [[ARG]]) 52 #pragma omp parallel private(g) 53 { 54 // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]]) 55 // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 56 // LAMBDA: store %{{.+}}* [[ARG]], %{{.+}}** [[ARG_REF:%.+]], 57 // LAMBDA: call i32 @__kmpc_cancel_barrier( 58 g = 1; 59 // LAMBDA: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 60 // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 61 // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]] 62 // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) 63 [&]() { 64 // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) 65 // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], 66 g = 2; 67 // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}** [[ARG_PTR_REF]] 68 // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 69 // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}** [[G_PTR_REF]] 70 // LAMBDA: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]] 71 }(); 72 } 73 }(); 74 return 0; 75 #elif defined(BLOCKS) 76 // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212, 77 // BLOCKS-LABEL: @main 78 // BLOCKS: call void {{%.+}}(i8* 79 ^{ 80 // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8* 81 // BLOCKS: [[G_LOCAL_REF:%.+]] = getelementptr inbounds %{{.+}}* [[AGG_CAPTURED:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 82 // BLOCKS: store i{{[0-9]+}}* [[G]], i{{[0-9]+}}** [[G_LOCAL_REF]] 83 // BLOCKS: [[ARG:%.+]] = bitcast %{{.+}}* [[AGG_CAPTURED]] to i8* 84 // BLOCKS: call void {{.+}}* @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* [[ARG]]) 85 #pragma omp parallel private(g) 86 { 87 // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]]) 88 // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 89 // BLOCKS: store %{{.+}}* [[ARG]], %{{.+}}** [[ARG_REF:%.+]], 90 // BLOCKS: call i32 @__kmpc_cancel_barrier( 91 g = 1; 92 // BLOCKS: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 93 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 94 // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]] 95 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 96 // BLOCKS: call void {{%.+}}(i8* 97 ^{ 98 // BLOCKS: define {{.+}} void {{@.+}}(i8* 99 g = 2; 100 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 101 // BLOCKS: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}* 102 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 103 // BLOCKS: ret 104 }(); 105 } 106 }(); 107 return 0; 108 #else 109 S<float> test; 110 int t_var = 0; 111 int vec[] = {1, 2}; 112 S<float> s_arr[] = {1, 2}; 113 S<float> var(3); 114 #pragma omp parallel private(t_var, vec, s_arr, var) 115 { 116 vec[0] = t_var; 117 s_arr[0] = var; 118 } 119 return tmain<int>(); 120 #endif 121 } 122 123 // CHECK: define i{{[0-9]+}} @main() 124 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], 125 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) 126 // CHECK: %{{.+}} = bitcast [[CAP_MAIN_TY]]* 127 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...)* @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[CAP_MAIN_TY]]*)* [[MAIN_MICROTASK:@.+]] to void 128 // CHECK: = call i{{.+}} [[TMAIN_INT:@.+]]() 129 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]* 130 // CHECK: ret 131 // 132 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, [[CAP_MAIN_TY]]* %{{.+}}) 133 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, 134 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], 135 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]], 136 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], 137 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] 138 // CHECK-NOT: [[T_VAR_PRIV]] 139 // CHECK-NOT: [[VEC_PRIV]] 140 // CHECK: {{.+}}: 141 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_FLOAT_TY]]* 142 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]]) 143 // CHECK-NOT: [[T_VAR_PRIV]] 144 // CHECK-NOT: [[VEC_PRIV]] 145 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 146 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}** [[GTID_ADDR_REF]] 147 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}* [[GTID_REF]] 148 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 149 // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 150 // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* 151 // CHECK: ret void 152 153 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]() 154 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], 155 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) 156 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...)* @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[CAP_TMAIN_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void 157 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]* 158 // CHECK: ret 159 // 160 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, [[CAP_TMAIN_TY]]* %{{.+}}) 161 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, 162 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], 163 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], 164 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], 165 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] 166 // CHECK-NOT: [[T_VAR_PRIV]] 167 // CHECK-NOT: [[VEC_PRIV]] 168 // CHECK: {{.+}}: 169 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]* 170 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]]) 171 // CHECK-NOT: [[T_VAR_PRIV]] 172 // CHECK-NOT: [[VEC_PRIV]] 173 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]]) 174 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}** [[GTID_ADDR_REF]] 175 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}* [[GTID_REF]] 176 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 177 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) 178 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* 179 // CHECK: ret void 180 #endif 181 182