1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 3 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 4 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 5 6 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 7 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 8 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 9 // REQUIRES: powerpc-registered-target 10 11 // expected-no-diagnostics 12 #ifndef HEADER 13 #define HEADER 14 15 void gtid_test() { 16 #pragma omp target teams loop order(concurrent) 17 for(int i = 0 ; i < 100; i++) {} 18 } 19 20 21 22 23 #endif 24 // CHECK1-LABEL: define {{[^@]+}}@_Z9gtid_testv 25 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 26 // CHECK1-NEXT: entry: 27 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 28 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 29 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 30 // CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 31 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 32 // CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 33 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 34 // CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 35 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 36 // CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 37 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 38 // CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 39 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 40 // CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 41 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 42 // CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 43 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 44 // CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 45 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 46 // CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 47 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 48 // CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 49 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 50 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 51 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 52 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 53 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 54 // CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 55 // CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16.region_id, ptr [[KERNEL_ARGS]]) 56 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 57 // CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 58 // CHECK1: omp_offload.failed: 59 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16() #[[ATTR2:[0-9]+]] 60 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 61 // CHECK1: omp_offload.cont: 62 // CHECK1-NEXT: ret void 63 // 64 // 65 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16 66 // CHECK1-SAME: () #[[ATTR1:[0-9]+]] { 67 // CHECK1-NEXT: entry: 68 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16.omp_outlined) 69 // CHECK1-NEXT: ret void 70 // 71 // 72 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16.omp_outlined 73 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 74 // CHECK1-NEXT: entry: 75 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 76 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 77 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 78 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 79 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 80 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 81 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 82 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 83 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 84 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 85 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 86 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 87 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 88 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 89 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 90 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 91 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 92 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 93 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 94 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 95 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 96 // CHECK1: cond.true: 97 // CHECK1-NEXT: br label [[COND_END:%.*]] 98 // CHECK1: cond.false: 99 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 100 // CHECK1-NEXT: br label [[COND_END]] 101 // CHECK1: cond.end: 102 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 103 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 104 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 105 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 106 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 107 // CHECK1: omp.inner.for.cond: 108 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 109 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 110 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 111 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 112 // CHECK1: omp.inner.for.body: 113 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 114 // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 115 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 116 // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 117 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) 118 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 119 // CHECK1: omp.inner.for.inc: 120 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 121 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 122 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 123 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 124 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 125 // CHECK1: omp.inner.for.end: 126 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 127 // CHECK1: omp.loop.exit: 128 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) 129 // CHECK1-NEXT: ret void 130 // 131 // 132 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l16.omp_outlined.omp_outlined 133 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 134 // CHECK1-NEXT: entry: 135 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 136 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 137 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 138 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 139 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 140 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 141 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 142 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 143 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 144 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 145 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 146 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 147 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 148 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 149 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 150 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 151 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 152 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 153 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 154 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 155 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 156 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 157 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 158 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 159 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 160 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 161 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 162 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 163 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 164 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 165 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 166 // CHECK1: cond.true: 167 // CHECK1-NEXT: br label [[COND_END:%.*]] 168 // CHECK1: cond.false: 169 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 170 // CHECK1-NEXT: br label [[COND_END]] 171 // CHECK1: cond.end: 172 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 173 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 174 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 175 // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 176 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 177 // CHECK1: omp.inner.for.cond: 178 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]] 179 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]] 180 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 181 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 182 // CHECK1: omp.inner.for.body: 183 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] 184 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 185 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 186 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] 187 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 188 // CHECK1: omp.body.continue: 189 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 190 // CHECK1: omp.inner.for.inc: 191 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] 192 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 193 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] 194 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] 195 // CHECK1: omp.inner.for.end: 196 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 197 // CHECK1: omp.loop.exit: 198 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) 199 // CHECK1-NEXT: ret void 200 // 201