1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 3 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s 4 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 5 6 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 7 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s 8 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 9 10 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 11 // RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s 12 // RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 13 14 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 15 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s 16 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 17 18 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 19 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -emit-pch -o %t %s 20 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 21 22 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 23 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -emit-pch -o %t %s 24 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 25 26 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 27 // RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -emit-pch -o %t %s 28 // RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 29 30 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 31 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -emit-pch -o %t %s 32 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 33 34 // expected-no-diagnostics 35 #ifndef HEADER 36 #define HEADER 37 38 void fn1(); 39 void fn2(); 40 void fn3(); 41 void fn4(); 42 void fn5(); 43 void fn6(); 44 45 int Arg; 46 47 void gtid_test() { 48 #pragma omp target teams loop 49 for(int i = 0 ; i < 100; i++) {} 50 51 #pragma omp target teams loop if (target: false) 52 for(int i = 0 ; i < 100; i++) { 53 gtid_test(); 54 } 55 } 56 57 58 template <typename T> 59 int tmain(T Arg) { 60 #pragma omp target teams loop if (true) 61 for(int i = 0 ; i < 100; i++) { 62 fn1(); 63 } 64 #pragma omp target teams loop if (false) 65 for(int i = 0 ; i < 100; i++) { 66 fn2(); 67 } 68 #pragma omp target teams loop if (target: Arg) 69 for(int i = 0 ; i < 100; i++) { 70 fn3(); 71 } 72 return 0; 73 } 74 75 int main() { 76 #pragma omp target teams loop if (true) 77 for(int i = 0 ; i < 100; i++) { 78 79 80 fn4(); 81 } 82 83 #pragma omp target teams loop if (false) 84 for(int i = 0 ; i < 100; i++) { 85 86 87 fn5(); 88 } 89 90 #pragma omp target teams loop if (Arg) 91 for(int i = 0 ; i < 100; i++) { 92 93 94 fn6(); 95 } 96 97 return tmain(Arg); 98 } 99 100 101 102 103 104 105 // call void [[T_OUTLINE_FUN_3:@.+]]( 106 107 #endif 108 // CHECK1-LABEL: define {{[^@]+}}@_Z9gtid_testv 109 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 110 // CHECK1-NEXT: entry: 111 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 112 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 113 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 114 // CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 115 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 116 // CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 117 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 118 // CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 119 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 120 // CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 121 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 122 // CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 123 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 124 // CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 125 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 126 // CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 127 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 128 // CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 129 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 130 // CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 131 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 132 // CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 133 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 134 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 135 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 136 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 137 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 138 // CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 139 // CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.region_id, ptr [[KERNEL_ARGS]]) 140 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 141 // CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 142 // CHECK1: omp_offload.failed: 143 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48() #[[ATTR2:[0-9]+]] 144 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 145 // CHECK1: omp_offload.cont: 146 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l51() #[[ATTR2]] 147 // CHECK1-NEXT: ret void 148 // 149 // 150 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48 151 // CHECK1-SAME: () #[[ATTR1:[0-9]+]] { 152 // CHECK1-NEXT: entry: 153 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.omp_outlined) 154 // CHECK1-NEXT: ret void 155 // 156 // 157 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.omp_outlined 158 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 159 // CHECK1-NEXT: entry: 160 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 161 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 162 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 163 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 164 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 165 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 166 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 167 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 168 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 169 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 170 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 171 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 172 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 173 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 174 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 175 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 176 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 177 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 178 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 179 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 180 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 181 // CHECK1: cond.true: 182 // CHECK1-NEXT: br label [[COND_END:%.*]] 183 // CHECK1: cond.false: 184 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 185 // CHECK1-NEXT: br label [[COND_END]] 186 // CHECK1: cond.end: 187 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 188 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 189 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 190 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 191 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 192 // CHECK1: omp.inner.for.cond: 193 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 194 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 195 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 196 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 197 // CHECK1: omp.inner.for.body: 198 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 199 // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 200 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 201 // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 202 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) 203 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 204 // CHECK1: omp.inner.for.inc: 205 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 206 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 207 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 208 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 209 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 210 // CHECK1: omp.inner.for.end: 211 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 212 // CHECK1: omp.loop.exit: 213 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) 214 // CHECK1-NEXT: ret void 215 // 216 // 217 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.omp_outlined.omp_outlined 218 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 219 // CHECK1-NEXT: entry: 220 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 221 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 222 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 223 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 224 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 225 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 226 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 227 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 228 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 229 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 230 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 231 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 232 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 233 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 234 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 235 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 236 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 237 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 238 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 239 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 240 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 241 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 242 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 243 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 244 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 245 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 246 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 247 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 248 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 249 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 250 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 251 // CHECK1: cond.true: 252 // CHECK1-NEXT: br label [[COND_END:%.*]] 253 // CHECK1: cond.false: 254 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 255 // CHECK1-NEXT: br label [[COND_END]] 256 // CHECK1: cond.end: 257 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 258 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 259 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 260 // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 261 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 262 // CHECK1: omp.inner.for.cond: 263 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 264 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 265 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 266 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 267 // CHECK1: omp.inner.for.body: 268 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 269 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 270 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 271 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 272 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 273 // CHECK1: omp.body.continue: 274 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 275 // CHECK1: omp.inner.for.inc: 276 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 277 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 278 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 279 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 280 // CHECK1: omp.inner.for.end: 281 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 282 // CHECK1: omp.loop.exit: 283 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) 284 // CHECK1-NEXT: ret void 285 // 286 // 287 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l51 288 // CHECK1-SAME: () #[[ATTR1]] { 289 // CHECK1-NEXT: entry: 290 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l51.omp_outlined) 291 // CHECK1-NEXT: ret void 292 // 293 // 294 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l51.omp_outlined 295 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 296 // CHECK1-NEXT: entry: 297 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 298 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 299 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 300 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 301 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 302 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 303 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 304 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 305 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 306 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 307 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 308 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 309 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 310 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 311 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 312 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 313 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 314 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 315 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 316 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 317 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 318 // CHECK1: cond.true: 319 // CHECK1-NEXT: br label [[COND_END:%.*]] 320 // CHECK1: cond.false: 321 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 322 // CHECK1-NEXT: br label [[COND_END]] 323 // CHECK1: cond.end: 324 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 325 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 326 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 327 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 328 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 329 // CHECK1: omp.inner.for.cond: 330 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 331 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 332 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 333 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 334 // CHECK1: omp.inner.for.body: 335 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 336 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 337 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 338 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 339 // CHECK1-NEXT: call void @_Z9gtid_testv() 340 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 341 // CHECK1: omp.body.continue: 342 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 343 // CHECK1: omp.inner.for.inc: 344 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 345 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 346 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 347 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 348 // CHECK1: omp.inner.for.end: 349 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 350 // CHECK1: omp.loop.exit: 351 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) 352 // CHECK1-NEXT: ret void 353 // 354 // 355 // CHECK1-LABEL: define {{[^@]+}}@main 356 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] { 357 // CHECK1-NEXT: entry: 358 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 359 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 360 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 361 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 362 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 363 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 364 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 365 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 366 // CHECK1-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 367 // CHECK1-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 368 // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 369 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 370 // CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 371 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 372 // CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 373 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 374 // CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 375 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 376 // CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 377 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 378 // CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 379 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 380 // CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 381 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 382 // CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 383 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 384 // CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 385 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 386 // CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 387 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 388 // CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 389 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 390 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 391 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 392 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 393 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 394 // CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 395 // CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l76.region_id, ptr [[KERNEL_ARGS]]) 396 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 397 // CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 398 // CHECK1: omp_offload.failed: 399 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l76() #[[ATTR2]] 400 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 401 // CHECK1: omp_offload.cont: 402 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83() #[[ATTR2]] 403 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr @Arg, align 4 404 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0 405 // CHECK1-NEXT: [[STOREDV:%.*]] = zext i1 [[TOBOOL]] to i8 406 // CHECK1-NEXT: store i8 [[STOREDV]], ptr [[DOTCAPTURE_EXPR_]], align 1 407 // CHECK1-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 408 // CHECK1-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP16]] to i1 409 // CHECK1-NEXT: [[STOREDV1:%.*]] = zext i1 [[LOADEDV]] to i8 410 // CHECK1-NEXT: store i8 [[STOREDV1]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 411 // CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 412 // CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 413 // CHECK1-NEXT: [[LOADEDV2:%.*]] = trunc i8 [[TMP18]] to i1 414 // CHECK1-NEXT: br i1 [[LOADEDV2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 415 // CHECK1: omp_if.then: 416 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 417 // CHECK1-NEXT: store i64 [[TMP17]], ptr [[TMP19]], align 8 418 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 419 // CHECK1-NEXT: store i64 [[TMP17]], ptr [[TMP20]], align 8 420 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 421 // CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8 422 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 423 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 424 // CHECK1-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 425 // CHECK1-NEXT: [[LOADEDV3:%.*]] = trunc i8 [[TMP24]] to i1 426 // CHECK1-NEXT: [[TMP25:%.*]] = select i1 [[LOADEDV3]], i32 0, i32 1 427 // CHECK1-NEXT: [[TMP26:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP25]], 0 428 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0 429 // CHECK1-NEXT: store i32 3, ptr [[TMP27]], align 4 430 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1 431 // CHECK1-NEXT: store i32 1, ptr [[TMP28]], align 4 432 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2 433 // CHECK1-NEXT: store ptr [[TMP22]], ptr [[TMP29]], align 8 434 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3 435 // CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP30]], align 8 436 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4 437 // CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP31]], align 8 438 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5 439 // CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP32]], align 8 440 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6 441 // CHECK1-NEXT: store ptr null, ptr [[TMP33]], align 8 442 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7 443 // CHECK1-NEXT: store ptr null, ptr [[TMP34]], align 8 444 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8 445 // CHECK1-NEXT: store i64 100, ptr [[TMP35]], align 8 446 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9 447 // CHECK1-NEXT: store i64 0, ptr [[TMP36]], align 8 448 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10 449 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP37]], align 4 450 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11 451 // CHECK1-NEXT: store [3 x i32] [[TMP26]], ptr [[TMP38]], align 4 452 // CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12 453 // CHECK1-NEXT: store i32 0, ptr [[TMP39]], align 4 454 // CHECK1-NEXT: [[TMP40:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP25]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90.region_id, ptr [[KERNEL_ARGS5]]) 455 // CHECK1-NEXT: [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0 456 // CHECK1-NEXT: br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] 457 // CHECK1: omp_offload.failed6: 458 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90(i64 [[TMP17]]) #[[ATTR2]] 459 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT7]] 460 // CHECK1: omp_offload.cont7: 461 // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] 462 // CHECK1: omp_if.else: 463 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90(i64 [[TMP17]]) #[[ATTR2]] 464 // CHECK1-NEXT: br label [[OMP_IF_END]] 465 // CHECK1: omp_if.end: 466 // CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr @Arg, align 4 467 // CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP42]]) 468 // CHECK1-NEXT: ret i32 [[CALL]] 469 // 470 // 471 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l76 472 // CHECK1-SAME: () #[[ATTR1]] { 473 // CHECK1-NEXT: entry: 474 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l76.omp_outlined) 475 // CHECK1-NEXT: ret void 476 // 477 // 478 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l76.omp_outlined 479 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 480 // CHECK1-NEXT: entry: 481 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 482 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 483 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 484 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 485 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 486 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 487 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 488 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 489 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 490 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 491 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 492 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 493 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 494 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 495 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 496 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 497 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 498 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 499 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 500 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 501 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 502 // CHECK1: cond.true: 503 // CHECK1-NEXT: br label [[COND_END:%.*]] 504 // CHECK1: cond.false: 505 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 506 // CHECK1-NEXT: br label [[COND_END]] 507 // CHECK1: cond.end: 508 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 509 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 510 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 511 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 512 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 513 // CHECK1: omp.inner.for.cond: 514 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 515 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 516 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 517 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 518 // CHECK1: omp.inner.for.body: 519 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 520 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 521 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 522 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 523 // CHECK1-NEXT: call void @_Z3fn4v() 524 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 525 // CHECK1: omp.body.continue: 526 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 527 // CHECK1: omp.inner.for.inc: 528 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 529 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 530 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 531 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 532 // CHECK1: omp.inner.for.end: 533 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 534 // CHECK1: omp.loop.exit: 535 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) 536 // CHECK1-NEXT: ret void 537 // 538 // 539 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83 540 // CHECK1-SAME: () #[[ATTR1]] { 541 // CHECK1-NEXT: entry: 542 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.omp_outlined) 543 // CHECK1-NEXT: ret void 544 // 545 // 546 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.omp_outlined 547 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 548 // CHECK1-NEXT: entry: 549 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 550 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 551 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 552 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 553 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 554 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 555 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 556 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 557 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 558 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 559 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 560 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 561 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 562 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 563 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 564 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 565 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 566 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 567 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 568 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 569 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 570 // CHECK1: cond.true: 571 // CHECK1-NEXT: br label [[COND_END:%.*]] 572 // CHECK1: cond.false: 573 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 574 // CHECK1-NEXT: br label [[COND_END]] 575 // CHECK1: cond.end: 576 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 577 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 578 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 579 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 580 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 581 // CHECK1: omp.inner.for.cond: 582 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 583 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 584 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 585 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 586 // CHECK1: omp.inner.for.body: 587 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 588 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 589 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 590 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 591 // CHECK1-NEXT: call void @_Z3fn5v() 592 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 593 // CHECK1: omp.body.continue: 594 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 595 // CHECK1: omp.inner.for.inc: 596 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 597 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 598 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 599 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 600 // CHECK1: omp.inner.for.end: 601 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 602 // CHECK1: omp.loop.exit: 603 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) 604 // CHECK1-NEXT: ret void 605 // 606 // 607 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90 608 // CHECK1-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 609 // CHECK1-NEXT: entry: 610 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 611 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 612 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 613 // CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 614 // CHECK1-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP0]] to i1 615 // CHECK1-NEXT: [[STOREDV:%.*]] = zext i1 [[LOADEDV]] to i8 616 // CHECK1-NEXT: store i8 [[STOREDV]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 617 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 618 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90.omp_outlined, i64 [[TMP1]]) 619 // CHECK1-NEXT: ret void 620 // 621 // 622 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90.omp_outlined 623 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 624 // CHECK1-NEXT: entry: 625 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 626 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 627 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 628 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 629 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 630 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 631 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 632 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 633 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 634 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 635 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 636 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 637 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 638 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 639 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 640 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 641 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 642 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 643 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 644 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 645 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 646 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 647 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 648 // CHECK1: cond.true: 649 // CHECK1-NEXT: br label [[COND_END:%.*]] 650 // CHECK1: cond.false: 651 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 652 // CHECK1-NEXT: br label [[COND_END]] 653 // CHECK1: cond.end: 654 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 655 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 656 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 657 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 658 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 659 // CHECK1: omp.inner.for.cond: 660 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 661 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 662 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 663 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 664 // CHECK1: omp.inner.for.body: 665 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 666 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 667 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 668 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 669 // CHECK1-NEXT: call void @_Z3fn6v() 670 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 671 // CHECK1: omp.body.continue: 672 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 673 // CHECK1: omp.inner.for.inc: 674 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 675 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 676 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 677 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 678 // CHECK1: omp.inner.for.end: 679 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 680 // CHECK1: omp.loop.exit: 681 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) 682 // CHECK1-NEXT: ret void 683 // 684 // 685 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 686 // CHECK1-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 687 // CHECK1-NEXT: entry: 688 // CHECK1-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 689 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 690 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 691 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 692 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 693 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 694 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 695 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 696 // CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 697 // CHECK1-NEXT: [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 698 // CHECK1-NEXT: store i32 [[ARG]], ptr [[ARG_ADDR]], align 4 699 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 700 // CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 701 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 702 // CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 703 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 704 // CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 705 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 706 // CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 707 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 708 // CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 709 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 710 // CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 711 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 712 // CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 713 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 714 // CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 715 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 716 // CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 717 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 718 // CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 719 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 720 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 721 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 722 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 723 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 724 // CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 725 // CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l60.region_id, ptr [[KERNEL_ARGS]]) 726 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 727 // CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 728 // CHECK1: omp_offload.failed: 729 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l60() #[[ATTR2]] 730 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 731 // CHECK1: omp_offload.cont: 732 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l64() #[[ATTR2]] 733 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 734 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0 735 // CHECK1-NEXT: [[STOREDV:%.*]] = zext i1 [[TOBOOL]] to i8 736 // CHECK1-NEXT: store i8 [[STOREDV]], ptr [[DOTCAPTURE_EXPR_]], align 1 737 // CHECK1-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 738 // CHECK1-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP16]] to i1 739 // CHECK1-NEXT: [[STOREDV1:%.*]] = zext i1 [[LOADEDV]] to i8 740 // CHECK1-NEXT: store i8 [[STOREDV1]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 741 // CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 742 // CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 743 // CHECK1-NEXT: [[LOADEDV2:%.*]] = trunc i8 [[TMP18]] to i1 744 // CHECK1-NEXT: br i1 [[LOADEDV2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 745 // CHECK1: omp_if.then: 746 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 747 // CHECK1-NEXT: store i64 [[TMP17]], ptr [[TMP19]], align 8 748 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 749 // CHECK1-NEXT: store i64 [[TMP17]], ptr [[TMP20]], align 8 750 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 751 // CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8 752 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 753 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 754 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 0 755 // CHECK1-NEXT: store i32 3, ptr [[TMP24]], align 4 756 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 1 757 // CHECK1-NEXT: store i32 1, ptr [[TMP25]], align 4 758 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 2 759 // CHECK1-NEXT: store ptr [[TMP22]], ptr [[TMP26]], align 8 760 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 3 761 // CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 762 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 4 763 // CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP28]], align 8 764 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 5 765 // CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP29]], align 8 766 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 6 767 // CHECK1-NEXT: store ptr null, ptr [[TMP30]], align 8 768 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 7 769 // CHECK1-NEXT: store ptr null, ptr [[TMP31]], align 8 770 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 8 771 // CHECK1-NEXT: store i64 100, ptr [[TMP32]], align 8 772 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 9 773 // CHECK1-NEXT: store i64 0, ptr [[TMP33]], align 8 774 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 10 775 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP34]], align 4 776 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 11 777 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 778 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS4]], i32 0, i32 12 779 // CHECK1-NEXT: store i32 0, ptr [[TMP36]], align 4 780 // CHECK1-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l68.region_id, ptr [[KERNEL_ARGS4]]) 781 // CHECK1-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 782 // CHECK1-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 783 // CHECK1: omp_offload.failed5: 784 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l68(i64 [[TMP17]]) #[[ATTR2]] 785 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT6]] 786 // CHECK1: omp_offload.cont6: 787 // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] 788 // CHECK1: omp_if.else: 789 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l68(i64 [[TMP17]]) #[[ATTR2]] 790 // CHECK1-NEXT: br label [[OMP_IF_END]] 791 // CHECK1: omp_if.end: 792 // CHECK1-NEXT: ret i32 0 793 // 794 // 795 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l60 796 // CHECK1-SAME: () #[[ATTR1]] { 797 // CHECK1-NEXT: entry: 798 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l60.omp_outlined) 799 // CHECK1-NEXT: ret void 800 // 801 // 802 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l60.omp_outlined 803 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 804 // CHECK1-NEXT: entry: 805 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 806 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 807 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 808 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 809 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 810 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 811 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 812 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 813 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 814 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 815 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 816 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 817 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 818 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 819 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 820 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 821 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 822 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 823 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 824 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 825 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 826 // CHECK1: cond.true: 827 // CHECK1-NEXT: br label [[COND_END:%.*]] 828 // CHECK1: cond.false: 829 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 830 // CHECK1-NEXT: br label [[COND_END]] 831 // CHECK1: cond.end: 832 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 833 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 834 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 835 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 836 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 837 // CHECK1: omp.inner.for.cond: 838 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 839 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 840 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 841 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 842 // CHECK1: omp.inner.for.body: 843 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 844 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 845 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 846 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 847 // CHECK1-NEXT: call void @_Z3fn1v() 848 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 849 // CHECK1: omp.body.continue: 850 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 851 // CHECK1: omp.inner.for.inc: 852 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 853 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 854 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 855 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 856 // CHECK1: omp.inner.for.end: 857 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 858 // CHECK1: omp.loop.exit: 859 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) 860 // CHECK1-NEXT: ret void 861 // 862 // 863 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l64 864 // CHECK1-SAME: () #[[ATTR1]] { 865 // CHECK1-NEXT: entry: 866 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l64.omp_outlined) 867 // CHECK1-NEXT: ret void 868 // 869 // 870 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l64.omp_outlined 871 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 872 // CHECK1-NEXT: entry: 873 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 874 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 875 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 876 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 877 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 878 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 879 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 880 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 881 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 882 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 883 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 884 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 885 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 886 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 887 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 888 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 889 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 890 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 891 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 892 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 893 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 894 // CHECK1: cond.true: 895 // CHECK1-NEXT: br label [[COND_END:%.*]] 896 // CHECK1: cond.false: 897 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 898 // CHECK1-NEXT: br label [[COND_END]] 899 // CHECK1: cond.end: 900 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 901 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 902 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 903 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 904 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 905 // CHECK1: omp.inner.for.cond: 906 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 907 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 908 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 909 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 910 // CHECK1: omp.inner.for.body: 911 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 912 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 913 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 914 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 915 // CHECK1-NEXT: call void @_Z3fn2v() 916 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 917 // CHECK1: omp.body.continue: 918 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 919 // CHECK1: omp.inner.for.inc: 920 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 921 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 922 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 923 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 924 // CHECK1: omp.inner.for.end: 925 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 926 // CHECK1: omp.loop.exit: 927 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) 928 // CHECK1-NEXT: ret void 929 // 930 // 931 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l68 932 // CHECK1-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 933 // CHECK1-NEXT: entry: 934 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 935 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 936 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 937 // CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 938 // CHECK1-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP0]] to i1 939 // CHECK1-NEXT: [[STOREDV:%.*]] = zext i1 [[LOADEDV]] to i8 940 // CHECK1-NEXT: store i8 [[STOREDV]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 941 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 942 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l68.omp_outlined, i64 [[TMP1]]) 943 // CHECK1-NEXT: ret void 944 // 945 // 946 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l68.omp_outlined 947 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 948 // CHECK1-NEXT: entry: 949 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 950 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 951 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 952 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 953 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 954 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 955 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 956 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 957 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 958 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 959 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 960 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 961 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 962 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 963 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 964 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 965 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 966 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 967 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 968 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 969 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 970 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 971 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 972 // CHECK1: cond.true: 973 // CHECK1-NEXT: br label [[COND_END:%.*]] 974 // CHECK1: cond.false: 975 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 976 // CHECK1-NEXT: br label [[COND_END]] 977 // CHECK1: cond.end: 978 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 979 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 980 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 981 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 982 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 983 // CHECK1: omp.inner.for.cond: 984 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 985 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 986 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 987 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 988 // CHECK1: omp.inner.for.body: 989 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 990 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 991 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 992 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 993 // CHECK1-NEXT: call void @_Z3fn3v() 994 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 995 // CHECK1: omp.body.continue: 996 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 997 // CHECK1: omp.inner.for.inc: 998 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 999 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 1000 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 1001 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 1002 // CHECK1: omp.inner.for.end: 1003 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1004 // CHECK1: omp.loop.exit: 1005 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) 1006 // CHECK1-NEXT: ret void 1007 // 1008