1*aedb30fdSCHANDRA GHALE // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --prefix-filecheck-ir-name _ --version 5 2*aedb30fdSCHANDRA GHALE // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -fopenmp-version=52 -x c -emit-llvm %s -o - | FileCheck %s 3*aedb30fdSCHANDRA GHALE // expected-no-diagnostics 4*aedb30fdSCHANDRA GHALE #define N 100 5*aedb30fdSCHANDRA GHALE void parallel_masked_taskloop(){ 6*aedb30fdSCHANDRA GHALE #pragma omp parallel masked taskloop 7*aedb30fdSCHANDRA GHALE for( int i = 0; i < N; i++) 8*aedb30fdSCHANDRA GHALE ; 9*aedb30fdSCHANDRA GHALE 10*aedb30fdSCHANDRA GHALE } 11*aedb30fdSCHANDRA GHALE 12*aedb30fdSCHANDRA GHALE int main() 13*aedb30fdSCHANDRA GHALE { 14*aedb30fdSCHANDRA GHALE parallel_masked_taskloop(); 15*aedb30fdSCHANDRA GHALE } 16*aedb30fdSCHANDRA GHALE // CHECK-LABEL: define dso_local void @parallel_masked_taskloop( 17*aedb30fdSCHANDRA GHALE // CHECK-SAME: ) #[[ATTR0:[0-9]+]] { 18*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[ENTRY:.*:]] 19*aedb30fdSCHANDRA GHALE // CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1:[0-9]+]], i32 0, ptr @parallel_masked_taskloop.omp_outlined) 20*aedb30fdSCHANDRA GHALE // CHECK-NEXT: ret void 21*aedb30fdSCHANDRA GHALE // 22*aedb30fdSCHANDRA GHALE // 23*aedb30fdSCHANDRA GHALE // CHECK-LABEL: define internal void @parallel_masked_taskloop.omp_outlined( 24*aedb30fdSCHANDRA GHALE // CHECK-SAME: ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 25*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[ENTRY:.*:]] 26*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 27*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 28*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 29*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4 30*aedb30fdSCHANDRA GHALE // CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 31*aedb30fdSCHANDRA GHALE // CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 32*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 33*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 34*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_masked(ptr @[[GLOB1]], i32 [[TMP1]], i32 0) 35*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 36*aedb30fdSCHANDRA GHALE // CHECK-NEXT: br i1 [[TMP3]], label %[[OMP_IF_THEN:.*]], label %[[OMP_IF_END:.*]] 37*aedb30fdSCHANDRA GHALE // CHECK: [[OMP_IF_THEN]]: 38*aedb30fdSCHANDRA GHALE // CHECK-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) 39*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 80, i64 0, ptr @.omp_task_entry.) 40*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP4]], i32 0, i32 0 41*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP5]], i32 0, i32 5 42*aedb30fdSCHANDRA GHALE // CHECK-NEXT: store i64 0, ptr [[TMP6]], align 8 43*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP5]], i32 0, i32 6 44*aedb30fdSCHANDRA GHALE // CHECK-NEXT: store i64 99, ptr [[TMP7]], align 8 45*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP5]], i32 0, i32 7 46*aedb30fdSCHANDRA GHALE // CHECK-NEXT: store i64 1, ptr [[TMP8]], align 8 47*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT_KMP_TASK_T]], ptr [[TMP5]], i32 0, i32 9 48*aedb30fdSCHANDRA GHALE // CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 8, i1 false) 49*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP8]], align 8 50*aedb30fdSCHANDRA GHALE // CHECK-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP4]], i32 1, ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP10]], i32 1, i32 0, i64 0, ptr null) 51*aedb30fdSCHANDRA GHALE // CHECK-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) 52*aedb30fdSCHANDRA GHALE // CHECK-NEXT: call void @__kmpc_end_masked(ptr @[[GLOB1]], i32 [[TMP1]]) 53*aedb30fdSCHANDRA GHALE // CHECK-NEXT: br label %[[OMP_IF_END]] 54*aedb30fdSCHANDRA GHALE // CHECK: [[OMP_IF_END]]: 55*aedb30fdSCHANDRA GHALE // CHECK-NEXT: ret void 56*aedb30fdSCHANDRA GHALE // 57*aedb30fdSCHANDRA GHALE // CHECK-LABEL: define dso_local i32 @main( 58*aedb30fdSCHANDRA GHALE // CHECK-SAME: ) #[[ATTR0]] { 59*aedb30fdSCHANDRA GHALE // CHECK-NEXT: [[ENTRY:.*:]] 60*aedb30fdSCHANDRA GHALE // CHECK-NEXT: call void @parallel_masked_taskloop() 61*aedb30fdSCHANDRA GHALE // CHECK-NEXT: ret i32 0 62*aedb30fdSCHANDRA GHALE 63