1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -verify -fopenmp -DOMP5 -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 3 // RUN: %clang_cc1 -fopenmp -DOMP5 -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 4 // RUN: %clang_cc1 -fopenmp -DOMP5 -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 5 6 // RUN: %clang_cc1 -verify -fopenmp-simd -DOMP5 -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 7 // RUN: %clang_cc1 -fopenmp-simd -DOMP5 -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 8 // RUN: %clang_cc1 -fopenmp-simd -DOMP5 -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 9 // expected-no-diagnostics 10 11 #ifndef HEADER 12 #define HEADER 13 14 int main() { 15 int a = 0; 16 #pragma omp parallel for lastprivate(conditional: a) 17 for (int i = 0; i < 10; ++i) { 18 if (i < 5) { 19 a = 0; 20 #pragma omp parallel reduction(+:a) num_threads(10) 21 a += i; 22 #pragma omp atomic 23 a += i; 24 #pragma omp parallel num_threads(10) 25 #pragma omp atomic 26 a += i; 27 } 28 } 29 return 0; 30 } 31 32 33 34 35 #endif // HEADER 36 // CHECK1-LABEL: define {{[^@]+}}@main 37 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 38 // CHECK1-NEXT: entry: 39 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 40 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 41 // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 42 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 43 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2:[0-9]+]], i32 1, ptr @main.omp_outlined, ptr [[A]]) 44 // CHECK1-NEXT: ret i32 0 45 // 46 // 47 // CHECK1-LABEL: define {{[^@]+}}@main.omp_outlined 48 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] { 49 // CHECK1-NEXT: entry: 50 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 51 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 52 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 53 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 54 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 55 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 56 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 57 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 58 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 59 // CHECK1-NEXT: [[A1:%.*]] = alloca [[STRUCT_LASPRIVATE_CONDITIONAL:%.*]], align 4 60 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 61 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 62 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 63 // CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 64 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 65 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 66 // CHECK1-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 67 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 68 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 69 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT_LASPRIVATE_CONDITIONAL]], ptr [[A1]], i32 0, i32 1 70 // CHECK1-NEXT: store i8 0, ptr [[TMP1]], align 4 71 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT_LASPRIVATE_CONDITIONAL]], ptr [[A1]], i32 0, i32 0 72 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 73 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 74 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 75 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 76 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9 77 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 78 // CHECK1: cond.true: 79 // CHECK1-NEXT: br label [[COND_END:%.*]] 80 // CHECK1: cond.false: 81 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 82 // CHECK1-NEXT: br label [[COND_END]] 83 // CHECK1: cond.end: 84 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] 85 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 86 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 87 // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 88 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 89 // CHECK1: omp.inner.for.cond: 90 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 91 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 92 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 93 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 94 // CHECK1: omp.inner.for.body: 95 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 96 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 97 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 98 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 99 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4 100 // CHECK1-NEXT: [[CMP3:%.*]] = icmp slt i32 [[TMP11]], 5 101 // CHECK1-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] 102 // CHECK1: if.then: 103 // CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 104 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 105 // CHECK1-NEXT: call void @__kmpc_critical(ptr @[[GLOB2]], i32 [[TMP4]], ptr @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var) 106 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr @.{{pl_cond[.].+[.|,]}} align 4 107 // CHECK1-NEXT: [[TMP14:%.*]] = icmp sle i32 [[TMP13]], [[TMP12]] 108 // CHECK1-NEXT: br i1 [[TMP14]], label [[LP_COND_THEN:%.*]], label [[LP_COND_EXIT:%.*]] 109 // CHECK1: lp_cond_then: 110 // CHECK1-NEXT: store i32 [[TMP12]], ptr @.{{pl_cond[.].+[.|,]}} align 4 111 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP2]], align 4 112 // CHECK1-NEXT: store i32 [[TMP15]], ptr @{{pl_cond[.].+[.|,]}} align 4 113 // CHECK1-NEXT: br label [[LP_COND_EXIT]] 114 // CHECK1: lp_cond_exit: 115 // CHECK1-NEXT: call void @__kmpc_end_critical(ptr @[[GLOB2]], i32 [[TMP4]], ptr @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var) 116 // CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB2]], i32 [[TMP4]], i32 10) 117 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @main.omp_outlined.omp_outlined, ptr [[TMP2]], ptr [[I]]) 118 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 119 // CHECK1-NEXT: call void @__kmpc_critical(ptr @[[GLOB2]], i32 [[TMP4]], ptr @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var) 120 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr @.{{pl_cond[.].+[.|,]}} align 4 121 // CHECK1-NEXT: [[TMP18:%.*]] = icmp sle i32 [[TMP17]], [[TMP16]] 122 // CHECK1-NEXT: br i1 [[TMP18]], label [[LP_COND_THEN4:%.*]], label [[LP_COND_EXIT5:%.*]] 123 // CHECK1: lp_cond_then4: 124 // CHECK1-NEXT: store i32 [[TMP16]], ptr @.{{pl_cond[.].+[.|,]}} align 4 125 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP2]], align 4 126 // CHECK1-NEXT: store i32 [[TMP19]], ptr @{{pl_cond[.].+[.|,]}} align 4 127 // CHECK1-NEXT: br label [[LP_COND_EXIT5]] 128 // CHECK1: lp_cond_exit5: 129 // CHECK1-NEXT: call void @__kmpc_end_critical(ptr @[[GLOB2]], i32 [[TMP4]], ptr @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var) 130 // CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4 131 // CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP2]], i32 [[TMP20]] monotonic, align 4 132 // CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 133 // CHECK1-NEXT: call void @__kmpc_critical(ptr @[[GLOB2]], i32 [[TMP4]], ptr @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var) 134 // CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr @.{{pl_cond[.].+[.|,]}} align 4 135 // CHECK1-NEXT: [[TMP24:%.*]] = icmp sle i32 [[TMP23]], [[TMP22]] 136 // CHECK1-NEXT: br i1 [[TMP24]], label [[LP_COND_THEN6:%.*]], label [[LP_COND_EXIT7:%.*]] 137 // CHECK1: lp_cond_then6: 138 // CHECK1-NEXT: store i32 [[TMP22]], ptr @.{{pl_cond[.].+[.|,]}} align 4 139 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP2]], align 4 140 // CHECK1-NEXT: store i32 [[TMP25]], ptr @{{pl_cond[.].+[.|,]}} align 4 141 // CHECK1-NEXT: br label [[LP_COND_EXIT7]] 142 // CHECK1: lp_cond_exit7: 143 // CHECK1-NEXT: call void @__kmpc_end_critical(ptr @[[GLOB2]], i32 [[TMP4]], ptr @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var) 144 // CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB2]], i32 [[TMP4]], i32 10) 145 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @main.omp_outlined.omp_outlined.1, ptr [[TMP2]], ptr [[I]]) 146 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT_LASPRIVATE_CONDITIONAL]], ptr [[A1]], i32 0, i32 1 147 // CHECK1-NEXT: [[TMP27:%.*]] = load i8, ptr [[TMP26]], align 4 148 // CHECK1-NEXT: [[TMP28:%.*]] = icmp ne i8 [[TMP27]], 0 149 // CHECK1-NEXT: br i1 [[TMP28]], label [[LPC_THEN:%.*]], label [[LPC_DONE:%.*]] 150 // CHECK1: lpc.then: 151 // CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 152 // CHECK1-NEXT: call void @__kmpc_critical(ptr @[[GLOB2]], i32 [[TMP4]], ptr @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var) 153 // CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr @.{{pl_cond[.].+[.|,]}} align 4 154 // CHECK1-NEXT: [[TMP31:%.*]] = icmp sle i32 [[TMP30]], [[TMP29]] 155 // CHECK1-NEXT: br i1 [[TMP31]], label [[LP_COND_THEN8:%.*]], label [[LP_COND_EXIT9:%.*]] 156 // CHECK1: lp_cond_then8: 157 // CHECK1-NEXT: store i32 [[TMP29]], ptr @.{{pl_cond[.].+[.|,]}} align 4 158 // CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP2]], align 4 159 // CHECK1-NEXT: store i32 [[TMP32]], ptr @{{pl_cond[.].+[.|,]}} align 4 160 // CHECK1-NEXT: br label [[LP_COND_EXIT9]] 161 // CHECK1: lp_cond_exit9: 162 // CHECK1-NEXT: call void @__kmpc_end_critical(ptr @[[GLOB2]], i32 [[TMP4]], ptr @.gomp_critical_user_{{pl_cond[.].+[.|,]}}var) 163 // CHECK1-NEXT: br label [[LPC_DONE]] 164 // CHECK1: lpc.done: 165 // CHECK1-NEXT: br label [[IF_END]] 166 // CHECK1: if.end: 167 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 168 // CHECK1: omp.body.continue: 169 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 170 // CHECK1: omp.inner.for.inc: 171 // CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 172 // CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP33]], 1 173 // CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4 174 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 175 // CHECK1: omp.inner.for.end: 176 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 177 // CHECK1: omp.loop.exit: 178 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) 179 // CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 180 // CHECK1-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 181 // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4:[0-9]+]], i32 [[TMP4]]) 182 // CHECK1-NEXT: br i1 [[TMP35]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]] 183 // CHECK1: .omp.lastprivate.then: 184 // CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr @{{pl_cond[.].+[.|,]}} align 4 185 // CHECK1-NEXT: store i32 [[TMP36]], ptr [[TMP2]], align 4 186 // CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP2]], align 4 187 // CHECK1-NEXT: store i32 [[TMP37]], ptr [[TMP0]], align 4 188 // CHECK1-NEXT: br label [[DOTOMP_LASTPRIVATE_DONE]] 189 // CHECK1: .omp.lastprivate.done: 190 // CHECK1-NEXT: ret void 191 // 192 // 193 // CHECK1-LABEL: define {{[^@]+}}@main.omp_outlined.omp_outlined 194 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] { 195 // CHECK1-NEXT: entry: 196 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 197 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 198 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 199 // CHECK1-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 200 // CHECK1-NEXT: [[A1:%.*]] = alloca i32, align 4 201 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8 202 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 203 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 204 // CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 205 // CHECK1-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 206 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 207 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[I_ADDR]], align 8 208 // CHECK1-NEXT: store i32 0, ptr [[A1]], align 4 209 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 210 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A1]], align 4 211 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[TMP2]] 212 // CHECK1-NEXT: store i32 [[ADD]], ptr [[A1]], align 4 213 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 214 // CHECK1-NEXT: store ptr [[A1]], ptr [[TMP4]], align 8 215 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 216 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 217 // CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP6]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @main.omp_outlined.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) 218 // CHECK1-NEXT: switch i32 [[TMP7]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 219 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 220 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 221 // CHECK1-NEXT: ] 222 // CHECK1: .omp.reduction.case1: 223 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP0]], align 4 224 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A1]], align 4 225 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] 226 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[TMP0]], align 4 227 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP6]], ptr @.gomp_critical_user_.reduction.var) 228 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 229 // CHECK1: .omp.reduction.case2: 230 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A1]], align 4 231 // CHECK1-NEXT: [[TMP11:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP10]] monotonic, align 4 232 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 233 // CHECK1: .omp.reduction.default: 234 // CHECK1-NEXT: ret void 235 // 236 // 237 // CHECK1-LABEL: define {{[^@]+}}@main.omp_outlined.omp_outlined.omp.reduction.reduction_func 238 // CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] { 239 // CHECK1-NEXT: entry: 240 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8 241 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 242 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 243 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 244 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 245 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 246 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 247 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 248 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 249 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 250 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 251 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4 252 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] 253 // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 254 // CHECK1-NEXT: ret void 255 // 256 // 257 // CHECK1-LABEL: define {{[^@]+}}@main.omp_outlined.omp_outlined.1 258 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR1]] { 259 // CHECK1-NEXT: entry: 260 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 261 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 262 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 263 // CHECK1-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 264 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 265 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 266 // CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 267 // CHECK1-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 268 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 269 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[I_ADDR]], align 8 270 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 271 // CHECK1-NEXT: [[TMP3:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP2]] monotonic, align 4 272 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT_LASPRIVATE_CONDITIONAL:%.*]], ptr [[TMP0]], i32 0, i32 1 273 // CHECK1-NEXT: store atomic volatile i8 1, ptr [[TMP4]] unordered, align 1 274 // CHECK1-NEXT: ret void 275 // 276