1 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c++ -emit-llvm %s -o - -femit-all-decls | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls | FileCheck %s 4 5 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -x c++ -emit-llvm %s -o - -femit-all-decls | FileCheck --check-prefix SIMD-ONLY0 %s 6 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s 7 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls | FileCheck --check-prefix SIMD-ONLY0 %s 8 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} 9 // expected-no-diagnostics 10 #ifndef HEADER 11 #define HEADER 12 13 // CHECK-LABEL: @main 14 int main(int argc, char **argv) { 15 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[DEFLOC:@.+]]) 16 // CHECK: call void @__kmpc_taskgroup(%struct.ident_t* [[DEFLOC]], i32 [[GTID]]) 17 // CHECK: [[TASKV:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i32 33, i64 80, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, [[TDP_TY:%.+]]*)* [[TASK1:@.+]] to i32 (i32, i8*)*)) 18 // CHECK: [[TASK:%.+]] = bitcast i8* [[TASKV]] to [[TDP_TY]]* 19 // CHECK: [[TASK_DATA:%.+]] = getelementptr inbounds [[TDP_TY]], [[TDP_TY]]* [[TASK]], i32 0, i32 0 20 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 5 21 // CHECK: store i64 0, i64* [[DOWN]], 22 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 6 23 // CHECK: store i64 9, i64* [[UP]], 24 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 7 25 // CHECK: store i64 1, i64* [[ST]], 26 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]], 27 // CHECK: call void @__kmpc_taskloop(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i8* [[TASKV]], i32 1, i64* [[DOWN]], i64* [[UP]], i64 [[ST_VAL]], i32 1, i32 0, i64 0, i8* null) 28 // CHECK: call void @__kmpc_end_taskgroup(%struct.ident_t* [[DEFLOC]], i32 [[GTID]]) 29 #pragma omp taskloop simd priority(argc) 30 for (int i = 0; i < 10; ++i) 31 ; 32 // CHECK: [[TASKV:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i32 1, i64 80, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, [[TDP_TY:%.+]]*)* [[TASK2:@.+]] to i32 (i32, i8*)*)) 33 // CHECK: [[TASK:%.+]] = bitcast i8* [[TASKV]] to [[TDP_TY]]* 34 // CHECK: [[TASK_DATA:%.+]] = getelementptr inbounds [[TDP_TY]], [[TDP_TY]]* [[TASK]], i32 0, i32 0 35 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 5 36 // CHECK: store i64 0, i64* [[DOWN]], 37 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 6 38 // CHECK: store i64 9, i64* [[UP]], 39 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 7 40 // CHECK: store i64 1, i64* [[ST]], 41 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]], 42 // CHECK: [[GRAINSIZE:%.+]] = zext i32 %{{.+}} to i64 43 // CHECK: call void @__kmpc_taskloop(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i8* [[TASKV]], i32 1, i64* [[DOWN]], i64* [[UP]], i64 [[ST_VAL]], i32 1, i32 1, i64 [[GRAINSIZE]], i8* null) 44 #pragma omp taskloop simd nogroup grainsize(argc) simdlen(4) 45 for (int i = 0; i < 10; ++i) 46 ; 47 // CHECK: call void @__kmpc_taskgroup(%struct.ident_t* [[DEFLOC]], i32 [[GTID]]) 48 // CHECK: [[TASKV:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i32 1, i64 80, i64 24, i32 (i32, i8*)* bitcast (i32 (i32, [[TDP_TY:%.+]]*)* [[TASK3:@.+]] to i32 (i32, i8*)*)) 49 // CHECK: [[TASK:%.+]] = bitcast i8* [[TASKV]] to [[TDP_TY]]* 50 // CHECK: [[TASK_DATA:%.+]] = getelementptr inbounds [[TDP_TY]], [[TDP_TY]]* [[TASK]], i32 0, i32 0 51 // CHECK: [[IF:%.+]] = icmp ne i32 %{{.+}}, 0 52 // CHECK: [[IF_INT:%.+]] = sext i1 [[IF]] to i32 53 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 5 54 // CHECK: store i64 0, i64* [[DOWN]], 55 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 6 56 // CHECK: store i64 %{{.+}}, i64* [[UP]], 57 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 7 58 // CHECK: store i64 1, i64* [[ST]], 59 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]], 60 // CHECK: call void @__kmpc_taskloop(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i8* [[TASKV]], i32 [[IF_INT]], i64* [[DOWN]], i64* [[UP]], i64 [[ST_VAL]], i32 1, i32 2, i64 4, i8* null) 61 // CHECK: call void @__kmpc_end_taskgroup(%struct.ident_t* [[DEFLOC]], i32 [[GTID]]) 62 int i; 63 #pragma omp taskloop simd if(argc) shared(argc, argv) collapse(2) num_tasks(4) safelen(32) 64 for (i = 0; i < argc; ++i) 65 for (int j = argc; j < argv[argc][argc]; ++j) 66 ; 67 } 68 69 // CHECK: define internal i32 [[TASK1]]( 70 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* %{{.+}}, i32 0, i32 5 71 // CHECK: [[DOWN_VAL:%.+]] = load i64, i64* [[DOWN]], 72 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 6 73 // CHECK: [[UP_VAL:%.+]] = load i64, i64* [[UP]], 74 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 7 75 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]], 76 // CHECK: [[LITER:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 8 77 // CHECK: [[LITER_VAL:%.+]] = load i32, i32* [[LITER]], 78 // CHECK: store i64 [[DOWN_VAL]], i64* [[LB:%[^,]+]], 79 // CHECK: store i64 [[UP_VAL]], i64* [[UB:%[^,]+]], 80 // CHECK: store i64 [[ST_VAL]], i64* [[ST:%[^,]+]], 81 // CHECK: store i32 [[LITER_VAL]], i32* [[LITER:%[^,]+]], 82 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 83 // CHECK: [[LB_I32:%.+]] = trunc i64 [[LB_VAL]] to i32 84 // CHECK: store i32 [[LB_I32]], i32* [[CNT:%.+]], 85 // CHECK: br label 86 // CHECK: [[VAL:%.+]] = load i32, i32* [[CNT]],{{.*}}!llvm.mem.parallel_loop_access [[LOOP1:!.+]] 87 // CHECK: [[VAL_I64:%.+]] = sext i32 [[VAL]] to i64 88 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],{{.*}}!llvm.mem.parallel_loop_access [[LOOP1]] 89 // CHECK: [[CMP:%.+]] = icmp ule i64 [[VAL_I64]], [[UB_VAL]] 90 // CHECK: br i1 [[CMP]], label %{{.+}}, label %{{.+}} 91 // CHECK: load i32, i32* %{{.*}}!llvm.mem.parallel_loop_access [[LOOP1]] 92 // CHECK: store i32 %{{.*}}!llvm.mem.parallel_loop_access [[LOOP1]] 93 // CHECK: load i32, i32* %{{.*}}!llvm.mem.parallel_loop_access [[LOOP1]] 94 // CHECK: add nsw i32 %{{.+}}, 1 95 // CHECK: store i32 %{{.+}}, i32* %{{.*}}!llvm.mem.parallel_loop_access [[LOOP1]] 96 // CHECK: br label %{{.*}}!llvm.loop [[LOOP1]] 97 // CHECK: ret i32 0 98 99 // CHECK: define internal i32 [[TASK2]]( 100 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* %{{.+}}, i32 0, i32 5 101 // CHECK: [[DOWN_VAL:%.+]] = load i64, i64* [[DOWN]], 102 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 6 103 // CHECK: [[UP_VAL:%.+]] = load i64, i64* [[UP]], 104 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 7 105 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]], 106 // CHECK: [[LITER:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 8 107 // CHECK: [[LITER_VAL:%.+]] = load i32, i32* [[LITER]], 108 // CHECK: store i64 [[DOWN_VAL]], i64* [[LB:%[^,]+]], 109 // CHECK: store i64 [[UP_VAL]], i64* [[UB:%[^,]+]], 110 // CHECK: store i64 [[ST_VAL]], i64* [[ST:%[^,]+]], 111 // CHECK: store i32 [[LITER_VAL]], i32* [[LITER:%[^,]+]], 112 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 113 // CHECK: [[LB_I32:%.+]] = trunc i64 [[LB_VAL]] to i32 114 // CHECK: store i32 [[LB_I32]], i32* [[CNT:%.+]], 115 // CHECK: br label 116 // CHECK: [[VAL:%.+]] = load i32, i32* [[CNT]],{{.*}}!llvm.mem.parallel_loop_access [[LOOP2:!.+]] 117 // CHECK: [[VAL_I64:%.+]] = sext i32 [[VAL]] to i64 118 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],{{.*}}!llvm.mem.parallel_loop_access [[LOOP2]] 119 // CHECK: [[CMP:%.+]] = icmp ule i64 [[VAL_I64]], [[UB_VAL]] 120 // CHECK: br i1 [[CMP]], label %{{.+}}, label %{{.+}} 121 // CHECK: load i32, i32* %{{.*}}!llvm.mem.parallel_loop_access [[LOOP2]] 122 // CHECK: store i32 %{{.*}}!llvm.mem.parallel_loop_access [[LOOP2]] 123 // CHECK: load i32, i32* %{{.*}}!llvm.mem.parallel_loop_access [[LOOP2]] 124 // CHECK: add nsw i32 %{{.+}}, 1 125 // CHECK: store i32 %{{.+}}, i32* %{{.*}}!llvm.mem.parallel_loop_access [[LOOP2]] 126 // CHECK: br label %{{.*}}!llvm.loop [[LOOP2]] 127 // CHECK: ret i32 0 128 129 // CHECK: define internal i32 [[TASK3]]( 130 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* %{{.+}}, i32 0, i32 5 131 // CHECK: [[DOWN_VAL:%.+]] = load i64, i64* [[DOWN]], 132 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 6 133 // CHECK: [[UP_VAL:%.+]] = load i64, i64* [[UP]], 134 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 7 135 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]], 136 // CHECK: [[LITER:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 8 137 // CHECK: [[LITER_VAL:%.+]] = load i32, i32* [[LITER]], 138 // CHECK: store i64 [[DOWN_VAL]], i64* [[LB:%[^,]+]], 139 // CHECK: store i64 [[UP_VAL]], i64* [[UB:%[^,]+]], 140 // CHECK: store i64 [[ST_VAL]], i64* [[ST:%[^,]+]], 141 // CHECK: store i32 [[LITER_VAL]], i32* [[LITER:%[^,]+]], 142 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 143 // CHECK: store i64 [[LB_VAL]], i64* [[CNT:%.+]], 144 // CHECK: br label 145 // CHECK-NOT: !llvm.mem.parallel_loop_access 146 // CHECK: br label %{{.*}}!llvm.loop 147 // CHECK: ret i32 0 148 149 // CHECK-LABEL: @_ZN1SC2Ei 150 struct S { 151 int a; 152 S(int c) { 153 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[DEFLOC:@.+]]) 154 // CHECK: [[TASKV:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i32 1, i64 80, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, [[TDP_TY:%.+]]*)* [[TASK4:@.+]] to i32 (i32, i8*)*)) 155 // CHECK: [[TASK:%.+]] = bitcast i8* [[TASKV]] to [[TDP_TY]]* 156 // CHECK: [[TASK_DATA:%.+]] = getelementptr inbounds [[TDP_TY]], [[TDP_TY]]* [[TASK]], i32 0, i32 0 157 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 5 158 // CHECK: store i64 0, i64* [[DOWN]], 159 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 6 160 // CHECK: store i64 %{{.+}}, i64* [[UP]], 161 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* [[TASK_DATA]], i32 0, i32 7 162 // CHECK: store i64 1, i64* [[ST]], 163 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]], 164 // CHECK: [[NUM_TASKS:%.+]] = zext i32 %{{.+}} to i64 165 // CHECK: call void @__kmpc_taskloop(%struct.ident_t* [[DEFLOC]], i32 [[GTID]], i8* [[TASKV]], i32 1, i64* [[DOWN]], i64* [[UP]], i64 [[ST_VAL]], i32 1, i32 2, i64 [[NUM_TASKS]], i8* null) 166 #pragma omp taskloop simd shared(c) num_tasks(a) simdlen(8) safelen(64) 167 for (a = 0; a < c; ++a) 168 ; 169 } 170 } s(1); 171 172 // CHECK: define internal i32 [[TASK4]]( 173 // CHECK: [[DOWN:%.+]] = getelementptr inbounds [[TD_TY:%.+]], [[TD_TY]]* %{{.+}}, i32 0, i32 5 174 // CHECK: [[DOWN_VAL:%.+]] = load i64, i64* [[DOWN]], 175 // CHECK: [[UP:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 6 176 // CHECK: [[UP_VAL:%.+]] = load i64, i64* [[UP]], 177 // CHECK: [[ST:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 7 178 // CHECK: [[ST_VAL:%.+]] = load i64, i64* [[ST]], 179 // CHECK: [[LITER:%.+]] = getelementptr inbounds [[TD_TY]], [[TD_TY]]* %{{.+}}, i32 0, i32 8 180 // CHECK: [[LITER_VAL:%.+]] = load i32, i32* [[LITER]], 181 // CHECK: store i64 [[DOWN_VAL]], i64* [[LB:%[^,]+]], 182 // CHECK: store i64 [[UP_VAL]], i64* [[UB:%[^,]+]], 183 // CHECK: store i64 [[ST_VAL]], i64* [[ST:%[^,]+]], 184 // CHECK: store i32 [[LITER_VAL]], i32* [[LITER:%[^,]+]], 185 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 186 // CHECK: [[LB_I32:%.+]] = trunc i64 [[LB_VAL]] to i32 187 // CHECK: store i32 [[LB_I32]], i32* [[CNT:%.+]], 188 // CHECK: br label 189 // CHECK: [[VAL:%.+]] = load i32, i32* [[CNT]], 190 // CHECK: [[VAL_I64:%.+]] = sext i32 [[VAL]] to i64 191 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 192 // CHECK: [[CMP:%.+]] = icmp ule i64 [[VAL_I64]], [[UB_VAL]] 193 // CHECK: br i1 [[CMP]], label %{{.+}}, label %{{.+}} 194 // CHECK: load i32, i32* % 195 // CHECK-NOT: !llvm.mem.parallel_loop_access 196 // CHECK: store i32 % 197 // CHECK-NOT: !llvm.mem.parallel_loop_access 198 // CHECK: load i32, i32* % 199 // CHECK-NOT: !llvm.mem.parallel_loop_access 200 // CHECK: add nsw i32 %{{.+}}, 1 201 // CHECK: store i32 %{{.+}}, i32* % 202 // CHECK-NOT: !llvm.mem.parallel_loop_access 203 // CHECK: br label %{{.*}}!llvm.loop 204 // CHECK: ret i32 0 205 206 // CHECK: !{!"llvm.loop.vectorize.enable", i1 true} 207 // CHECK: !{!"llvm.loop.vectorize.width", i32 4} 208 // CHECK: !{!"llvm.loop.vectorize.width", i32 32} 209 // CHECK: !{!"llvm.loop.vectorize.width", i32 8} 210 211 #endif 212